[Midnightbsd-cvs] src [10120] trunk/sys/dev/cxgbe: udpate cxgbe

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sun May 27 20:17:55 EDT 2018


Revision: 10120
          http://svnweb.midnightbsd.org/src/?rev=10120
Author:   laffer1
Date:     2018-05-27 20:17:55 -0400 (Sun, 27 May 2018)
Log Message:
-----------
udpate cxgbe

Modified Paths:
--------------
    trunk/sys/dev/cxgbe/adapter.h
    trunk/sys/dev/cxgbe/common/common.h
    trunk/sys/dev/cxgbe/common/t4_hw.c
    trunk/sys/dev/cxgbe/common/t4_hw.h
    trunk/sys/dev/cxgbe/common/t4_msg.h
    trunk/sys/dev/cxgbe/common/t4_regs.h
    trunk/sys/dev/cxgbe/common/t4_regs_values.h
    trunk/sys/dev/cxgbe/common/t4_tcb.h
    trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt
    trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt
    trunk/sys/dev/cxgbe/firmware/t4fw_interface.h
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt
    trunk/sys/dev/cxgbe/offload.h
    trunk/sys/dev/cxgbe/osdep.h
    trunk/sys/dev/cxgbe/t4_ioctl.h
    trunk/sys/dev/cxgbe/t4_l2t.c
    trunk/sys/dev/cxgbe/t4_l2t.h
    trunk/sys/dev/cxgbe/t4_main.c
    trunk/sys/dev/cxgbe/t4_sge.c
    trunk/sys/dev/cxgbe/tom/t4_connect.c
    trunk/sys/dev/cxgbe/tom/t4_cpl_io.c
    trunk/sys/dev/cxgbe/tom/t4_ddp.c
    trunk/sys/dev/cxgbe/tom/t4_listen.c
    trunk/sys/dev/cxgbe/tom/t4_tom.c
    trunk/sys/dev/cxgbe/tom/t4_tom.h
    trunk/sys/dev/cxgbe/tom/t4_tom_l2t.c
    trunk/sys/dev/cxgbe/tom/t4_tom_l2t.h

Added Paths:
-----------
    trunk/sys/dev/cxgbe/common/t4vf_hw.c
    trunk/sys/dev/cxgbe/if_cc.c
    trunk/sys/dev/cxgbe/if_ccv.c
    trunk/sys/dev/cxgbe/if_cxl.c
    trunk/sys/dev/cxgbe/if_cxlv.c
    trunk/sys/dev/cxgbe/iw_cxgbe/
    trunk/sys/dev/cxgbe/iw_cxgbe/cm.c
    trunk/sys/dev/cxgbe/iw_cxgbe/cq.c
    trunk/sys/dev/cxgbe/iw_cxgbe/device.c
    trunk/sys/dev/cxgbe/iw_cxgbe/ev.c
    trunk/sys/dev/cxgbe/iw_cxgbe/id_table.c
    trunk/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
    trunk/sys/dev/cxgbe/iw_cxgbe/mem.c
    trunk/sys/dev/cxgbe/iw_cxgbe/provider.c
    trunk/sys/dev/cxgbe/iw_cxgbe/qp.c
    trunk/sys/dev/cxgbe/iw_cxgbe/resource.c
    trunk/sys/dev/cxgbe/iw_cxgbe/t4.h
    trunk/sys/dev/cxgbe/iw_cxgbe/user.h
    trunk/sys/dev/cxgbe/t4_mp_ring.c
    trunk/sys/dev/cxgbe/t4_mp_ring.h
    trunk/sys/dev/cxgbe/t4_netmap.c
    trunk/sys/dev/cxgbe/t4_sched.c
    trunk/sys/dev/cxgbe/t4_tracer.c
    trunk/sys/dev/cxgbe/t4_vf.c

Property Changed:
----------------
    trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt
    trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt
    trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt

Modified: trunk/sys/dev/cxgbe/adapter.h
===================================================================
--- trunk/sys/dev/cxgbe/adapter.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/adapter.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/adapter.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/adapter.h 318855 2017-05-25 02:00:37Z np $
  *
  */
 
@@ -48,11 +49,11 @@
 #include <netinet/tcp_lro.h>
 
 #include "offload.h"
+#include "t4_ioctl.h"
+#include "common/t4_msg.h"
 #include "firmware/t4fw_interface.h"
 
-#define T4_CFGNAME "t4fw_cfg"
-#define T4_FWNAME "t4fw"
-
+#define KTR_CXGBE	KTR_SPARE3
 MALLOC_DECLARE(M_CXGBE);
 #define CXGBE_UNIMPLEMENTED(s) \
     panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
@@ -78,78 +79,41 @@
 #define SBUF_DRAIN 1
 #endif
 
-#ifdef __amd64__
-/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
-static __inline uint64_t
-t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
-    bus_size_t offset)
-{
-	KASSERT(tag == X86_BUS_SPACE_MEM,
-	    ("%s: can only handle mem space", __func__));
-
-	return (*(volatile uint64_t *)(handle + offset));
-}
-
-static __inline void
-t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
-    bus_size_t offset, uint64_t value)
-{
-	KASSERT(tag == X86_BUS_SPACE_MEM,
-	    ("%s: can only handle mem space", __func__));
-
-	*(volatile uint64_t *)(bsh + offset) = value;
-}
-#else
-static __inline uint64_t
-t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
-    bus_size_t offset)
-{
-	return (uint64_t)bus_space_read_4(tag, handle, offset) +
-	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
-}
-
-static __inline void
-t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
-    bus_size_t offset, uint64_t value)
-{
-	bus_space_write_4(tag, bsh, offset, value);
-	bus_space_write_4(tag, bsh, offset + 4, value >> 32);
-}
-#endif
-
 struct adapter;
 typedef struct adapter adapter_t;
 
 enum {
+	/*
+	 * All ingress queues use this entry size.  Note that the firmware event
+	 * queue and any iq expecting CPL_RX_PKT in the descriptor needs this to
+	 * be at least 64.
+	 */
+	IQ_ESIZE = 64,
+
+	/* Default queue sizes for all kinds of ingress queues */
 	FW_IQ_QSIZE = 256,
-	FW_IQ_ESIZE = 64,	/* At least 64 mandated by the firmware spec */
-
 	RX_IQ_QSIZE = 1024,
-	RX_IQ_ESIZE = 64,	/* At least 64 so CPL_RX_PKT will fit */
 
-	EQ_ESIZE = 64,		/* All egress queues use this entry size */
+	/* All egress queues use this entry size */
+	EQ_ESIZE = 64,
 
-	RX_FL_ESIZE = EQ_ESIZE,	/* 8 64bit addresses */
+	/* Default queue sizes for all kinds of egress queues */
+	CTRL_EQ_QSIZE = 128,
+	TX_EQ_QSIZE = 1024,
+
 #if MJUMPAGESIZE != MCLBYTES
-	FL_BUF_SIZES = 4,	/* cluster, jumbop, jumbo9k, jumbo16k */
+	SW_ZONE_SIZES = 4,	/* cluster, jumbop, jumbo9k, jumbo16k */
 #else
-	FL_BUF_SIZES = 3,	/* cluster, jumbo9k, jumbo16k */
+	SW_ZONE_SIZES = 3,	/* cluster, jumbo9k, jumbo16k */
 #endif
-	OFLD_BUF_SIZE = MJUM16BYTES,	/* size of fl buffer for TOE rxq */
+	CL_METADATA_SIZE = CACHE_LINE_SIZE,
 
-	CTRL_EQ_QSIZE = 128,
-
-	TX_EQ_QSIZE = 1024,
-	TX_SGL_SEGS = 36,
+	SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
+	TX_SGL_SEGS = 39,
+	TX_SGL_SEGS_TSO = 38,
 	TX_WR_FLITS = SGE_MAX_WR_LEN / 8
 };
 
-#ifdef T4_PKT_TIMESTAMP
-#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
-#else
-#define RX_COPY_THRESHOLD MINCLSIZE
-#endif
-
 enum {
 	/* adapter intr_type */
 	INTR_INTX	= (1 << 0),
@@ -158,6 +122,17 @@
 };
 
 enum {
+	XGMAC_MTU	= (1 << 0),
+	XGMAC_PROMISC	= (1 << 1),
+	XGMAC_ALLMULTI	= (1 << 2),
+	XGMAC_VLANEX	= (1 << 3),
+	XGMAC_UCADDR	= (1 << 4),
+	XGMAC_MCADDRS	= (1 << 5),
+
+	XGMAC_ALL	= 0xffff
+};
+
+enum {
 	/* flags understood by begin_synchronized_op */
 	HOLD_LOCK	= (1 << 0),
 	SLEEP_OK	= (1 << 1),
@@ -171,68 +146,80 @@
 	/* adapter flags */
 	FULL_INIT_DONE	= (1 << 0),
 	FW_OK		= (1 << 1),
-	INTR_DIRECT	= (1 << 2),	/* direct interrupts for everything */
+	/* INTR_DIRECT	= (1 << 2),	No longer used. */
 	MASTER_PF	= (1 << 3),
 	ADAP_SYSCTL_CTX	= (1 << 4),
-	TOM_INIT_DONE	= (1 << 5),
+	/* TOM_INIT_DONE= (1 << 5),	No longer used */
+	BUF_PACKING_OK	= (1 << 6),
+	IS_VF		= (1 << 7),
 
 	CXGBE_BUSY	= (1 << 9),
 
 	/* port flags */
+	HAS_TRACEQ	= (1 << 3),
+
+	/* VI flags */
 	DOOMED		= (1 << 0),
-	PORT_INIT_DONE	= (1 << 1),
-	PORT_SYSCTL_CTX	= (1 << 2),
+	VI_INIT_DONE	= (1 << 1),
+	VI_SYSCTL_CTX	= (1 << 2),
+	INTR_RXQ	= (1 << 4),	/* All NIC rxq's take interrupts */
+	INTR_OFLD_RXQ	= (1 << 5),	/* All TOE rxq's take interrupts */
+	INTR_ALL	= (INTR_RXQ | INTR_OFLD_RXQ),
+
+	/* adapter debug_flags */
+	DF_DUMP_MBOX	= (1 << 0),
 };
 
-#define IS_DOOMED(pi)	((pi)->flags & DOOMED)
-#define SET_DOOMED(pi)	do {(pi)->flags |= DOOMED;} while (0)
+#define IS_DOOMED(vi)	((vi)->flags & DOOMED)
+#define SET_DOOMED(vi)	do {(vi)->flags |= DOOMED;} while (0)
 #define IS_BUSY(sc)	((sc)->flags & CXGBE_BUSY)
 #define SET_BUSY(sc)	do {(sc)->flags |= CXGBE_BUSY;} while (0)
 #define CLR_BUSY(sc)	do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
 
-struct port_info {
+struct vi_info {
 	device_t dev;
-	struct adapter *adapter;
+	struct port_info *pi;
 
 	struct ifnet *ifp;
 	struct ifmedia media;
 
-	struct mtx pi_lock;
-	char lockname[16];
 	unsigned long flags;
 	int if_flags;
 
+	uint16_t *rss, *nm_rss;
+	int smt_idx;		/* for convenience */
 	uint16_t viid;
 	int16_t  xact_addr_filt;/* index of exact MAC address filter */
 	uint16_t rss_size;	/* size of VI's RSS table slice */
-	uint8_t  lport;		/* associated offload logical port */
-	int8_t   mdio_addr;
-	uint8_t  port_type;
-	uint8_t  mod_type;
-	uint8_t  port_id;
-	uint8_t  tx_chan;
+	uint16_t rss_base;	/* start of VI's RSS table slice */
 
+	eventhandler_tag vlan_c;
+
+	int nintr;
+	int first_intr;
+
 	/* These need to be int as they are used in sysctl */
-	int ntxq;	/* # of tx queues */
-	int first_txq;	/* index of first tx queue */
-	int nrxq;	/* # of rx queues */
-	int first_rxq;	/* index of first rx queue */
-#ifdef TCP_OFFLOAD
+	int ntxq;		/* # of tx queues */
+	int first_txq;		/* index of first tx queue */
+	int rsrv_noflowq; 	/* Reserve queue 0 for non-flowid packets */
+	int nrxq;		/* # of rx queues */
+	int first_rxq;		/* index of first rx queue */
 	int nofldtxq;		/* # of offload tx queues */
 	int first_ofld_txq;	/* index of first offload tx queue */
 	int nofldrxq;		/* # of offload rx queues */
 	int first_ofld_rxq;	/* index of first offload rx queue */
-#endif
+	int nnmtxq;
+	int first_nm_txq;
+	int nnmrxq;
+	int first_nm_rxq;
 	int tmr_idx;
 	int pktc_idx;
 	int qsize_rxq;
 	int qsize_txq;
 
-	struct link_config link_cfg;
-	struct port_stats stats;
+	struct timeval last_refreshed;
+	struct fw_vi_stats_vf stats;
 
-	eventhandler_tag vlan_c;
-
 	struct callout tick;
 	struct sysctl_ctx_list ctx;	/* from ifconfig up to driver detach */
 
@@ -239,14 +226,90 @@
 	uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
 };
 
+struct tx_ch_rl_params {
+	enum fw_sched_params_rate ratemode;	/* %port (REL) or kbps (ABS) */
+	uint32_t maxrate;
+};
+
+enum {
+	TX_CLRL_REFRESH	= (1 << 0),	/* Need to update hardware state. */
+	TX_CLRL_ERROR	= (1 << 1),	/* Error, hardware state unknown. */
+};
+
+struct tx_cl_rl_params {
+	int refcount;
+	u_int flags;
+	enum fw_sched_params_rate ratemode;	/* %port REL or ABS value */
+	enum fw_sched_params_unit rateunit;	/* kbps or pps (when ABS) */
+	enum fw_sched_params_mode mode;		/* aggr or per-flow */
+	uint32_t maxrate;
+	uint16_t pktsize;
+};
+
+/* Tx scheduler parameters for a channel/port */
+struct tx_sched_params {
+	/* Channel Rate Limiter */
+	struct tx_ch_rl_params ch_rl;
+
+	/* Class WRR */
+	/* XXX */
+
+	/* Class Rate Limiter */
+	struct tx_cl_rl_params cl_rl[];
+};
+
+struct port_info {
+	device_t dev;
+	struct adapter *adapter;
+
+	struct vi_info *vi;
+	int nvi;
+	int up_vis;
+	int uld_vis;
+
+	struct tx_sched_params *sched_params;
+
+	struct mtx pi_lock;
+	char lockname[16];
+	unsigned long flags;
+
+	uint8_t  lport;		/* associated offload logical port */
+	int8_t   mdio_addr;
+	uint8_t  port_type;
+	uint8_t  mod_type;
+	uint8_t  port_id;
+	uint8_t  tx_chan;
+	uint8_t  rx_chan_map;	/* rx MPS channel bitmap */
+
+	struct link_config link_cfg;
+
+	struct timeval last_refreshed;
+ 	struct port_stats stats;
+	u_int tx_parse_error;
+
+	struct callout tick;
+};
+
+#define	IS_MAIN_VI(vi)		((vi) == &((vi)->pi->vi[0]))
+
+/* Where the cluster came from, how it has been carved up. */
+struct cluster_layout {
+	int8_t zidx;
+	int8_t hwidx;
+	uint16_t region1;	/* mbufs laid out within this region */
+				/* region2 is the DMA region */
+	uint16_t region3;	/* cluster_metadata within this region */
+};
+
+struct cluster_metadata {
+	u_int refcount;
+	struct fl_sdesc *sd;	/* For debug only.  Could easily be stale */
+};
+
 struct fl_sdesc {
-	struct mbuf *m;
-	bus_dmamap_t map;
 	caddr_t cl;
-	uint8_t tag_idx;	/* the sc->fl_tag this map comes from */
-#ifdef INVARIANTS
-	__be64 ba_tag;
-#endif
+	uint16_t nmbuf;	/* # of driver originated mbufs with ref on cluster */
+	struct cluster_layout cll;
 };
 
 struct tx_desc {
@@ -253,24 +316,20 @@
 	__be64 flit[8];
 };
 
-struct tx_map {
-	struct mbuf *m;
-	bus_dmamap_t map;
+struct tx_sdesc {
+	struct mbuf *m;		/* m_nextpkt linked chain of frames */
+	uint8_t desc_used;	/* # of hardware descriptors used by the WR */
 };
 
-/* DMA maps used for tx */
-struct tx_maps {
-	struct tx_map *maps;
-	uint32_t map_total;	/* # of DMA maps */
-	uint32_t map_pidx;	/* next map to be used */
-	uint32_t map_cidx;	/* reclaimed up to this index */
-	uint32_t map_avail;	/* # of available maps */
-};
 
-struct tx_sdesc {
-	uint8_t desc_used;	/* # of hardware descriptors used by the WR */
-	uint8_t credits;	/* NIC txq: # of frames sent out in the WR */
+#define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
+struct iq_desc {
+	struct rss_header rss;
+	uint8_t cpl[IQ_PAD];
+	struct rsp_ctrl rsp;
 };
+#undef IQ_PAD
+CTASSERT(sizeof(struct iq_desc) == IQ_ESIZE);
 
 enum {
 	/* iq flags */
@@ -283,50 +342,62 @@
 	IQS_DISABLED	= 0,
 	IQS_BUSY	= 1,
 	IQS_IDLE	= 2,
+
+	/* netmap related flags */
+	NM_OFF	= 0,
+	NM_ON	= 1,
+	NM_BUSY	= 2,
 };
 
+struct sge_iq;
+struct rss_header;
+typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
+    struct mbuf *);
+typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
+typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
+
 /*
  * Ingress Queue: T4 is producer, driver is consumer.
  */
 struct sge_iq {
-	bus_dma_tag_t desc_tag;
-	bus_dmamap_t desc_map;
-	bus_addr_t ba;		/* bus address of descriptor ring */
 	uint32_t flags;
-	uint16_t abs_id;	/* absolute SGE id for the iq */
-	int8_t   intr_pktc_idx;	/* packet count threshold index */
-	int8_t   pad0;
-	__be64  *desc;		/* KVA of descriptor ring */
-
 	volatile int state;
 	struct adapter *adapter;
-	const __be64 *cdesc;	/* current descriptor */
+	cpl_handler_t set_tcb_rpl;
+	cpl_handler_t l2t_write_rpl;
+	struct iq_desc  *desc;	/* KVA of descriptor ring */
+	int8_t   intr_pktc_idx;	/* packet count threshold index */
 	uint8_t  gen;		/* generation bit */
 	uint8_t  intr_params;	/* interrupt holdoff parameters */
 	uint8_t  intr_next;	/* XXX: holdoff for next interrupt */
-	uint8_t  esize;		/* size (bytes) of each entry in the queue */
 	uint16_t qsize;		/* size (# of entries) of the queue */
+	uint16_t sidx;		/* index of the entry with the status page */
 	uint16_t cidx;		/* consumer index */
 	uint16_t cntxt_id;	/* SGE context id for the iq */
+	uint16_t abs_id;	/* absolute SGE id for the iq */
 
 	STAILQ_ENTRY(sge_iq) link;
+
+	bus_dma_tag_t desc_tag;
+	bus_dmamap_t desc_map;
+	bus_addr_t ba;		/* bus address of descriptor ring */
 };
 
 enum {
 	EQ_CTRL		= 1,
 	EQ_ETH		= 2,
-#ifdef TCP_OFFLOAD
 	EQ_OFLD		= 3,
-#endif
 
 	/* eq flags */
-	EQ_TYPEMASK	= 7,		/* 3 lsbits hold the type */
-	EQ_ALLOCATED	= (1 << 3),	/* firmware resources allocated */
-	EQ_DOOMED	= (1 << 4),	/* about to be destroyed */
-	EQ_CRFLUSHED	= (1 << 5),	/* expecting an update from SGE */
-	EQ_STALLED	= (1 << 6),	/* out of hw descriptors or dmamaps */
+	EQ_TYPEMASK	= 0x3,		/* 2 lsbits hold the type (see above) */
+	EQ_ALLOCATED	= (1 << 2),	/* firmware resources allocated */
+	EQ_ENABLED	= (1 << 3),	/* open for business */
+	EQ_QFLUSH	= (1 << 4),	/* if_qflush in progress */
 };
 
+/* Listed in order of preference.  Update t4_sysctls too if you change these */
+enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
+
 /*
  * Egress Queue: driver is producer, T4 is consumer.
  *
@@ -336,75 +407,134 @@
 struct sge_eq {
 	unsigned int flags;	/* MUST be first */
 	unsigned int cntxt_id;	/* SGE context id for the eq */
-	bus_dma_tag_t desc_tag;
-	bus_dmamap_t desc_map;
-	char lockname[16];
+	unsigned int abs_id;	/* absolute SGE id for the eq */
 	struct mtx eq_lock;
 
 	struct tx_desc *desc;	/* KVA of descriptor ring */
-	bus_addr_t ba;		/* bus address of descriptor ring */
-	struct sge_qstat *spg;	/* status page, for convenience */
-	uint16_t cap;		/* max # of desc, for convenience */
-	uint16_t avail;		/* available descriptors, for convenience */
-	uint16_t qsize;		/* size (# of entries) of the queue */
+	uint16_t doorbells;
+	volatile uint32_t *udb;	/* KVA of doorbell (lies within BAR2) */
+	u_int udb_qid;		/* relative qid within the doorbell page */
+	uint16_t sidx;		/* index of the entry with the status page */
 	uint16_t cidx;		/* consumer idx (desc idx) */
 	uint16_t pidx;		/* producer idx (desc idx) */
-	uint16_t pending;	/* # of descriptors used since last doorbell */
+	uint16_t equeqidx;	/* EQUEQ last requested at this pidx */
+	uint16_t dbidx;		/* pidx of the most recent doorbell */
 	uint16_t iqid;		/* iq that gets egr_update for the eq */
 	uint8_t tx_chan;	/* tx channel used by the eq */
-	struct task tx_task;
-	struct callout tx_callout;
+	volatile u_int equiq;	/* EQUIQ outstanding */
 
-	/* stats */
+	bus_dma_tag_t desc_tag;
+	bus_dmamap_t desc_map;
+	bus_addr_t ba;		/* bus address of descriptor ring */
+	char lockname[16];
+};
 
-	uint32_t egr_update;	/* # of SGE_EGR_UPDATE notifications for eq */
-	uint32_t unstalled;	/* recovered from stall */
+struct sw_zone_info {
+	uma_zone_t zone;	/* zone that this cluster comes from */
+	int size;		/* size of cluster: 2K, 4K, 9K, 16K, etc. */
+	int type;		/* EXT_xxx type of the cluster */
+	int8_t head_hwidx;
+	int8_t tail_hwidx;
 };
 
+struct hw_buf_info {
+	int8_t zidx;		/* backpointer to zone; -ve means unused */
+	int8_t next;		/* next hwidx for this zone; -1 means no more */
+	int size;
+};
+
 enum {
+	NUM_MEMWIN = 3,
+
+	MEMWIN0_APERTURE = 2048,
+	MEMWIN0_BASE     = 0x1b800,
+
+	MEMWIN1_APERTURE = 32768,
+	MEMWIN1_BASE     = 0x28000,
+
+	MEMWIN2_APERTURE_T4 = 65536,
+	MEMWIN2_BASE_T4     = 0x30000,
+
+	MEMWIN2_APERTURE_T5 = 128 * 1024,
+	MEMWIN2_BASE_T5     = 0x60000,
+};
+
+struct memwin {
+	struct rwlock mw_lock __aligned(CACHE_LINE_SIZE);
+	uint32_t mw_base;	/* constant after setup_memwin */
+	uint32_t mw_aperture;	/* ditto */
+	uint32_t mw_curpos;	/* protected by mw_lock */
+};
+
+enum {
 	FL_STARVING	= (1 << 0), /* on the adapter's list of starving fl's */
 	FL_DOOMED	= (1 << 1), /* about to be destroyed */
+	FL_BUF_PACKING	= (1 << 2), /* buffer packing enabled */
+	FL_BUF_RESUME	= (1 << 3), /* resume from the middle of the frame */
 };
 
-#define FL_RUNNING_LOW(fl)	(fl->cap - fl->needed <= fl->lowat)
-#define FL_NOT_RUNNING_LOW(fl)	(fl->cap - fl->needed >= 2 * fl->lowat)
+#define FL_RUNNING_LOW(fl) \
+    (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) <= fl->lowat)
+#define FL_NOT_RUNNING_LOW(fl) \
+    (IDXDIFF(fl->dbidx * 8, fl->cidx, fl->sidx * 8) >= 2 * fl->lowat)
 
 struct sge_fl {
-	bus_dma_tag_t desc_tag;
-	bus_dmamap_t desc_map;
-	bus_dma_tag_t tag[FL_BUF_SIZES];
-	uint8_t tag_idx;
 	struct mtx fl_lock;
-	char lockname[16];
+	__be64 *desc;		/* KVA of descriptor ring, ptr to addresses */
+	struct fl_sdesc *sdesc;	/* KVA of software descriptor ring */
+	struct cluster_layout cll_def;	/* default refill zone, layout */
+	uint16_t lowat;		/* # of buffers <= this means fl needs help */
 	int flags;
+	uint16_t buf_boundary;
 
-	__be64 *desc;		/* KVA of descriptor ring, ptr to addresses */
-	bus_addr_t ba;		/* bus address of descriptor ring */
-	struct fl_sdesc *sdesc;	/* KVA of software descriptor ring */
-	uint32_t cap;		/* max # of buffers, for convenience */
-	uint16_t qsize;		/* size (# of entries) of the queue */
+	/* The 16b idx all deal with hw descriptors */
+	uint16_t dbidx;		/* hw pidx after last doorbell */
+	uint16_t sidx;		/* index of status page */
+	volatile uint16_t hw_cidx;
+
+	/* The 32b idx are all buffer idx, not hardware descriptor idx */
+	uint32_t cidx;		/* consumer index */
+	uint32_t pidx;		/* producer index */
+
+	uint32_t dbval;
+	u_int rx_offset;	/* offset in fl buf (when buffer packing) */
+	volatile uint32_t *udb;
+
+	uint64_t mbuf_allocated;/* # of mbuf allocated from zone_mbuf */
+	uint64_t mbuf_inlined;	/* # of mbuf created within clusters */
+	uint64_t cl_allocated;	/* # of clusters allocated */
+	uint64_t cl_recycled;	/* # of clusters recycled */
+	uint64_t cl_fast_recycled; /* # of clusters recycled (fast) */
+
+	/* These 3 are valid when FL_BUF_RESUME is set, stale otherwise. */
+	struct mbuf *m0;
+	struct mbuf **pnext;
+	u_int remaining;
+
+	uint16_t qsize;		/* # of hw descriptors (status page included) */
 	uint16_t cntxt_id;	/* SGE context id for the freelist */
-	uint32_t cidx;		/* consumer idx (buffer idx, NOT hw desc idx) */
-	uint32_t pidx;		/* producer idx (buffer idx, NOT hw desc idx) */
-	uint32_t needed;	/* # of buffers needed to fill up fl. */
-	uint32_t lowat;		/* # of buffers <= this means fl needs help */
-	uint32_t pending;	/* # of bufs allocated since last doorbell */
-	unsigned int dmamap_failed;
 	TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
+	bus_dma_tag_t desc_tag;
+	bus_dmamap_t desc_map;
+	char lockname[16];
+	bus_addr_t ba;		/* bus address of descriptor ring */
+	struct cluster_layout cll_alt;	/* alternate refill zone, layout */
 };
 
+struct mp_ring;
+
 /* txq: SGE egress queue + what's needed for Ethernet NIC */
 struct sge_txq {
 	struct sge_eq eq;	/* MUST be first */
 
 	struct ifnet *ifp;	/* the interface this txq belongs to */
-	bus_dma_tag_t tx_tag;	/* tag for transmit buffers */
-	struct buf_ring *br;	/* tx buffer ring */
+	struct mp_ring *r;	/* tx software ring */
 	struct tx_sdesc *sdesc;	/* KVA of software descriptor ring */
-	struct mbuf *m;		/* held up due to temporary resource shortage */
+	struct sglist *gl;
+	__be32 cpl_ctrl0;	/* for convenience */
+	int tc_idx;		/* traffic class */
 
-	struct tx_maps txmaps;
-
+	struct task tx_reclaim_task;
 	/* stats for common events first */
 
 	uint64_t txcsum;	/* # of times hardware assisted with checksum */
@@ -413,13 +543,12 @@
 	uint64_t imm_wrs;	/* # of work requests with immediate data */
 	uint64_t sgl_wrs;	/* # of work requests with direct SGL */
 	uint64_t txpkt_wrs;	/* # of txpkt work requests (not coalesced) */
-	uint64_t txpkts_wrs;	/* # of coalesced tx work requests */
-	uint64_t txpkts_pkts;	/* # of frames in coalesced tx work requests */
+	uint64_t txpkts0_wrs;	/* # of type0 coalesced tx work requests */
+	uint64_t txpkts1_wrs;	/* # of type1 coalesced tx work requests */
+	uint64_t txpkts0_pkts;	/* # of frames in type0 coalesced tx WRs */
+	uint64_t txpkts1_pkts;	/* # of frames in type1 coalesced tx WRs */
 
 	/* stats for not-that-common events */
-
-	uint32_t no_dmamap;	/* no DMA map to load the mbuf */
-	uint32_t no_desc;	/* out of hardware descriptors */
 } __aligned(CACHE_LINE_SIZE);
 
 /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
@@ -449,7 +578,6 @@
 }
 
 
-#ifdef TCP_OFFLOAD
 /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
 struct sge_ofld_rxq {
 	struct sge_iq iq;	/* MUST be first */
@@ -462,15 +590,20 @@
 
 	return (__containerof(iq, struct sge_ofld_rxq, iq));
 }
-#endif
 
 struct wrqe {
 	STAILQ_ENTRY(wrqe) link;
 	struct sge_wrq *wrq;
 	int wr_len;
-	uint64_t wr[] __aligned(16);
+	char wr[] __aligned(16);
 };
 
+struct wrq_cookie {
+	TAILQ_ENTRY(wrq_cookie) link;
+	int ndesc;
+	int pidx;
+};
+
 /*
  * wrq: SGE egress queue that is given prebuilt work requests.  Both the control
  * and offload tx queues are of this type.
@@ -479,30 +612,97 @@
 	struct sge_eq eq;	/* MUST be first */
 
 	struct adapter *adapter;
+	struct task wrq_tx_task;
 
-	/* List of WRs held up due to lack of tx descriptors */
+	/* Tx desc reserved but WR not "committed" yet. */
+	TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs;
+
+	/* List of WRs ready to go out as soon as descriptors are available. */
 	STAILQ_HEAD(, wrqe) wr_list;
+	u_int nwr_pending;
+	u_int ndesc_needed;
 
 	/* stats for common events first */
 
-	uint64_t tx_wrs;	/* # of tx work requests */
+	uint64_t tx_wrs_direct;	/* # of WRs written directly to desc ring. */
+	uint64_t tx_wrs_ss;	/* # of WRs copied from scratch space. */
+	uint64_t tx_wrs_copied;	/* # of WRs queued and copied to desc ring. */
 
 	/* stats for not-that-common events */
 
-	uint32_t no_desc;	/* out of hardware descriptors */
+	/*
+	 * Scratch space for work requests that wrap around after reaching the
+	 * status page, and some infomation about the last WR that used it.
+	 */
+	uint16_t ss_pidx;
+	uint16_t ss_len;
+	uint8_t ss[SGE_MAX_WR_LEN];
+
 } __aligned(CACHE_LINE_SIZE);
 
+
+struct sge_nm_rxq {
+	struct vi_info *vi;
+
+	struct iq_desc *iq_desc;
+	uint16_t iq_abs_id;
+	uint16_t iq_cntxt_id;
+	uint16_t iq_cidx;
+	uint16_t iq_sidx;
+	uint8_t iq_gen;
+
+	__be64  *fl_desc;
+	uint16_t fl_cntxt_id;
+	uint32_t fl_cidx;
+	uint32_t fl_pidx;
+	uint32_t fl_sidx;
+	uint32_t fl_db_val;
+	u_int fl_hwidx:4;
+
+	u_int nid;		/* netmap ring # for this queue */
+
+	/* infrequently used items after this */
+
+	bus_dma_tag_t iq_desc_tag;
+	bus_dmamap_t iq_desc_map;
+	bus_addr_t iq_ba;
+	int intr_idx;
+
+	bus_dma_tag_t fl_desc_tag;
+	bus_dmamap_t fl_desc_map;
+	bus_addr_t fl_ba;
+} __aligned(CACHE_LINE_SIZE);
+
+struct sge_nm_txq {
+	struct tx_desc *desc;
+	uint16_t cidx;
+	uint16_t pidx;
+	uint16_t sidx;
+	uint16_t equiqidx;	/* EQUIQ last requested at this pidx */
+	uint16_t equeqidx;	/* EQUEQ last requested at this pidx */
+	uint16_t dbidx;		/* pidx of the most recent doorbell */
+	uint16_t doorbells;
+	volatile uint32_t *udb;
+	u_int udb_qid;
+	u_int cntxt_id;
+	__be32 cpl_ctrl0;	/* for convenience */
+	u_int nid;		/* netmap ring # for this queue */
+
+	/* infrequently used items after this */
+
+	bus_dma_tag_t desc_tag;
+	bus_dmamap_t desc_map;
+	bus_addr_t ba;
+	int iqidx;
+} __aligned(CACHE_LINE_SIZE);
+
 struct sge {
-	int timer_val[SGE_NTIMERS];
-	int counter_val[SGE_NCOUNTERS];
-	int fl_starve_threshold;
-
 	int nrxq;	/* total # of Ethernet rx queues */
-	int ntxq;	/* total # of Ethernet tx tx queues */
-#ifdef TCP_OFFLOAD
+	int ntxq;	/* total # of Ethernet tx queues */
 	int nofldrxq;	/* total # of TOE rx queues */
 	int nofldtxq;	/* total # of TOE tx queues */
-#endif
+	int nnmrxq;	/* total # of netmap rx queues */
+	int nnmtxq;	/* total # of netmap tx queues */
 	int niq;	/* total # of ingress queues */
 	int neq;	/* total # of egress queues */
 
@@ -511,27 +711,38 @@
 	struct sge_wrq *ctrlq;	/* Control queues */
 	struct sge_txq *txq;	/* NIC tx queues */
 	struct sge_rxq *rxq;	/* NIC rx queues */
-#ifdef TCP_OFFLOAD
 	struct sge_wrq *ofld_txq;	/* TOE tx queues */
 	struct sge_ofld_rxq *ofld_rxq;	/* TOE rx queues */
-#endif
+	struct sge_nm_txq *nm_txq;	/* netmap tx queues */
+	struct sge_nm_rxq *nm_rxq;	/* netmap rx queues */
 
-	uint16_t iq_start;
-	int eq_start;
+	uint16_t iq_start;	/* first cntxt_id */
+	uint16_t iq_base;	/* first abs_id */
+	int eq_start;		/* first cntxt_id */
+	int eq_base;		/* first abs_id */
 	struct sge_iq **iqmap;	/* iq->cntxt_id to iq mapping */
 	struct sge_eq **eqmap;	/* eq->cntxt_id to eq mapping */
+
+	int8_t safe_hwidx1;	/* may not have room for metadata */
+	int8_t safe_hwidx2;	/* with room for metadata and maybe more */
+	struct sw_zone_info sw_zone_info[SW_ZONE_SIZES];
+	struct hw_buf_info hw_buf_info[SGE_FLBUF_SIZES];
 };
 
-struct rss_header;
-typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
-    struct mbuf *);
-typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
-typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
+struct devnames {
+	const char *nexus_name;
+	const char *ifnet_name;
+	const char *vi_ifnet_name;
+	const char *pf03_drv_name;
+	const char *vf_nexus_name;
+	const char *vf_ifnet_name;
+};
 
 struct adapter {
 	SLIST_ENTRY(adapter) link;
 	device_t dev;
 	struct cdev *cdev;
+	const struct devnames *names;
 
 	/* PCIe register resources */
 	int regs_rid;
@@ -541,9 +752,14 @@
 	bus_space_handle_t bh;
 	bus_space_tag_t bt;
 	bus_size_t mmio_len;
+	int udbs_rid;
+	struct resource *udbs_res;
+	volatile uint8_t *udbs_base;
 
 	unsigned int pf;
 	unsigned int mbox;
+	unsigned int vpd_busy;
+	unsigned int vpd_flag;
 
 	/* Interrupt information */
 	int intr_type;
@@ -551,41 +767,62 @@
 	struct irq {
 		struct resource *res;
 		int rid;
+		volatile int nm_state;	/* NM_OFF, NM_ON, or NM_BUSY */
 		void *tag;
-	} *irq;
+		struct sge_rxq *rxq;
+		struct sge_nm_rxq *nm_rxq;
+	} __aligned(CACHE_LINE_SIZE) *irq;
+	int sge_gts_reg;
+	int sge_kdoorbell_reg;
 
 	bus_dma_tag_t dmat;	/* Parent DMA tag */
 
 	struct sge sge;
+	int lro_timeout;
+	int sc_do_rxcopy;
 
-	struct taskqueue *tq[NCHAN];	/* taskqueues that flush data out */
+	struct taskqueue *tq[MAX_NCHAN];	/* General purpose taskqueues */
 	struct port_info *port[MAX_NPORTS];
-	uint8_t chan_map[NCHAN];
-	uint32_t filter_mode;
+	uint8_t chan_map[MAX_NCHAN];
 
-#ifdef TCP_OFFLOAD
 	void *tom_softc;	/* (struct tom_data *) */
 	struct tom_tunables tt;
-#endif
+	void *iwarp_softc;	/* (struct c4iw_dev *) */
+	void *iscsi_ulp_softc;	/* (struct cxgbei_data *) */
 	struct l2t_data *l2t;	/* L2 table */
 	struct tid_info tids;
 
-	int open_device_map;
-#ifdef TCP_OFFLOAD
-	int offload_map;
-#endif
+	uint16_t doorbells;
+	int offload_map;	/* ports with IFCAP_TOE enabled */
+	int active_ulds;	/* ULDs activated on this adapter */
 	int flags;
+	int debug_flags;
 
-	char fw_version[32];
+	char ifp_lockname[16];
+	struct mtx ifp_lock;
+	struct ifnet *ifp;	/* tracer ifp */
+	struct ifmedia media;
+	int traceq;		/* iq used by all tracers, -1 if none */
+	int tracer_valid;	/* bitmap of valid tracers */
+	int tracer_enabled;	/* bitmap of enabled tracers */
+
+	char fw_version[16];
+	char tp_version[16];
+	char er_version[16];
+	char bs_version[16];
 	char cfg_file[32];
 	u_int cfcsum;
 	struct adapter_params params;
+	const struct chip_params *chip_params;
 	struct t4_virt_res vres;
 
+	uint16_t nbmcaps;
 	uint16_t linkcaps;
+	uint16_t switchcaps;
 	uint16_t niccaps;
 	uint16_t toecaps;
 	uint16_t rdmacaps;
+	uint16_t cryptocaps;
 	uint16_t iscsicaps;
 	uint16_t fcoecaps;
 
@@ -599,14 +836,16 @@
 	TAILQ_HEAD(, sge_fl) sfl;
 	struct callout sfl_callout;
 
-	an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
-	fw_msg_handler_t fw_msg_handler[5];	/* NUM_FW6_TYPES */
-	cpl_handler_t cpl_handler[0xef];	/* NUM_CPL_CMDS */
+	struct mtx reg_lock;	/* for indirect register access */
 
-#ifdef INVARIANTS
+	struct memwin memwin[NUM_MEMWIN];	/* memory windows */
+
+	struct mtx tc_lock;
+	struct task tc_task;
+
 	const char *last_op;
 	const void *last_op_thr;
-#endif
+	int last_op_flags;
 };
 
 #define ADAPTER_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
@@ -614,7 +853,6 @@
 #define ADAPTER_LOCK_ASSERT_OWNED(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
 
-/* XXX: not bulletproof, but much better than nothing */
 #define ASSERT_SYNCHRONIZED_OP(sc)	\
     KASSERT(IS_BUSY(sc) && \
 	(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
@@ -648,22 +886,58 @@
 #define TXQ_LOCK_ASSERT_OWNED(txq)	EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
 #define TXQ_LOCK_ASSERT_NOTOWNED(txq)	EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
 
-#define for_each_txq(pi, iter, q) \
-	for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
-	    iter < pi->ntxq; ++iter, ++q)
-#define for_each_rxq(pi, iter, q) \
-	for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
-	    iter < pi->nrxq; ++iter, ++q)
-#define for_each_ofld_txq(pi, iter, q) \
-	for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
-	    iter < pi->nofldtxq; ++iter, ++q)
-#define for_each_ofld_rxq(pi, iter, q) \
-	for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
-	    iter < pi->nofldrxq; ++iter, ++q)
+#define CH_DUMP_MBOX(sc, mbox, data_reg) \
+	do { \
+		if (sc->debug_flags & DF_DUMP_MBOX) { \
+			log(LOG_NOTICE, \
+			    "%s mbox %u: %016llx %016llx %016llx %016llx " \
+			    "%016llx %016llx %016llx %016llx\n", \
+			    device_get_nameunit(sc->dev), mbox, \
+			    (unsigned long long)t4_read_reg64(sc, data_reg), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 8), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 16), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 24), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 32), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 40), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 48), \
+			    (unsigned long long)t4_read_reg64(sc, data_reg + 56)); \
+		} \
+	} while (0)
 
+#define for_each_txq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.txq[vi->first_txq], iter = 0; \
+	    iter < vi->ntxq; ++iter, ++q)
+#define for_each_rxq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.rxq[vi->first_rxq], iter = 0; \
+	    iter < vi->nrxq; ++iter, ++q)
+#define for_each_ofld_txq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.ofld_txq[vi->first_ofld_txq], iter = 0; \
+	    iter < vi->nofldtxq; ++iter, ++q)
+#define for_each_ofld_rxq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.ofld_rxq[vi->first_ofld_rxq], iter = 0; \
+	    iter < vi->nofldrxq; ++iter, ++q)
+#define for_each_nm_txq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.nm_txq[vi->first_nm_txq], iter = 0; \
+	    iter < vi->nnmtxq; ++iter, ++q)
+#define for_each_nm_rxq(vi, iter, q) \
+	for (q = &vi->pi->adapter->sge.nm_rxq[vi->first_nm_rxq], iter = 0; \
+	    iter < vi->nnmrxq; ++iter, ++q)
+#define for_each_vi(_pi, _iter, _vi) \
+	for ((_vi) = (_pi)->vi, (_iter) = 0; (_iter) < (_pi)->nvi; \
+	     ++(_iter), ++(_vi))
+
+#define IDXINCR(idx, incr, wrap) do { \
+	idx = wrap - idx > incr ? idx + incr : incr - (wrap - idx); \
+} while (0)
+#define IDXDIFF(head, tail, wrap) \
+	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
+
 /* One for errors, one for firmware events */
 #define T4_EXTRA_INTR 2
 
+/* One for firmware events */
+#define T4VF_EXTRA_INTR 1
+
 static inline uint32_t
 t4_read_reg(struct adapter *sc, uint32_t reg)
 {
@@ -682,7 +956,13 @@
 t4_read_reg64(struct adapter *sc, uint32_t reg)
 {
 
-	return t4_bus_space_read_8(sc->bt, sc->bh, reg);
+#if defined(__LP64__) && !defined(__ia64__)
+	return bus_space_read_8(sc->bt, sc->bh, reg);
+#else
+	return (uint64_t)bus_space_read_4(sc->bt, sc->bh, reg) +
+	    ((uint64_t)bus_space_read_4(sc->bt, sc->bh, reg + 4) << 32);
+
+#endif
 }
 
 static inline void
@@ -689,7 +969,12 @@
 t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
 {
 
-	t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
+#if defined(__LP64__) && !defined(__ia64__)
+	bus_space_write_8(sc->bt, sc->bh, reg, val);
+#else
+	bus_space_write_4(sc->bt, sc->bh, reg, val);
+	bus_space_write_4(sc->bt, sc->bh, reg + 4, val>> 32);
+#endif
 }
 
 static inline void
@@ -745,24 +1030,106 @@
 t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
 {
 
-	bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
+	bcopy(hw_addr, sc->port[idx]->vi[0].hw_addr, ETHER_ADDR_LEN);
 }
 
-static inline bool is_10G_port(const struct port_info *pi)
+static inline bool
+is_10G_port(const struct port_info *pi)
 {
 
 	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
 }
 
-static inline int tx_resume_threshold(struct sge_eq *eq)
+static inline bool
+is_25G_port(const struct port_info *pi)
 {
 
-	return (eq->qsize / 4);
+	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) != 0);
 }
 
+static inline bool
+is_40G_port(const struct port_info *pi)
+{
+
+	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
+}
+
+static inline bool
+is_100G_port(const struct port_info *pi)
+{
+
+	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) != 0);
+}
+
+static inline int
+port_top_speed(const struct port_info *pi)
+{
+
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+		return (100);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+		return (40);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+		return (25);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+		return (10);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+		return (1);
+
+	return (0);
+}
+
+static inline int
+port_top_speed_raw(const struct port_info *pi)
+{
+
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+		return (FW_PORT_CAP_SPEED_100G);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+		return (FW_PORT_CAP_SPEED_40G);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+		return (FW_PORT_CAP_SPEED_25G);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+		return (FW_PORT_CAP_SPEED_10G);
+	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+		return (FW_PORT_CAP_SPEED_1G);
+
+	return (0);
+}
+
+static inline int
+tx_resume_threshold(struct sge_eq *eq)
+{
+
+	/* not quite the same as qsize / 4, but this will do. */
+	return (eq->sidx / 4);
+}
+
+static inline int
+t4_use_ldst(struct adapter *sc)
+{
+
+#ifdef notyet
+	return (sc->flags & FW_OK || !sc->use_bd);
+#else
+	return (0);
+#endif
+}
+
 /* t4_main.c */
-void t4_tx_task(void *, int);
-void t4_tx_callout(void *);
+extern int t4_ntxq10g;
+extern int t4_nrxq10g;
+extern int t4_ntxq1g;
+extern int t4_nrxq1g;
+extern int t4_intr_types;
+extern int t4_tmr_idx_10g;
+extern int t4_pktc_idx_10g;
+extern int t4_tmr_idx_1g;
+extern int t4_pktc_idx_1g;
+extern unsigned int t4_qsize_rxq;
+extern unsigned int t4_qsize_txq;
+extern device_method_t cxgbe_methods[];
+
 int t4_os_find_pci_capability(struct adapter *, int);
 int t4_os_pci_save_state(struct adapter *);
 int t4_os_pci_restore_state(struct adapter *);
@@ -769,33 +1136,81 @@
 void t4_os_portmod_changed(const struct adapter *, int);
 void t4_os_link_changed(struct adapter *, int, int);
 void t4_iterate(void (*)(struct adapter *, void *), void *);
-int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
-int t4_register_an_handler(struct adapter *, an_handler_t);
-int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
+void t4_init_devnames(struct adapter *);
+void t4_add_adapter(struct adapter *);
+int t4_detach_common(device_t);
 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
-int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
+int t4_map_bars_0_and_4(struct adapter *);
+int t4_map_bar_2(struct adapter *);
+int t4_setup_intr_handlers(struct adapter *);
+void t4_sysctls(struct adapter *);
+int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
+void doom_vi(struct adapter *, struct vi_info *);
 void end_synchronized_op(struct adapter *, int);
+int update_mac_settings(struct ifnet *, int);
+int adapter_full_init(struct adapter *);
+int adapter_full_uninit(struct adapter *);
+uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
+int vi_full_init(struct vi_info *);
+int vi_full_uninit(struct vi_info *);
+void vi_sysctls(struct vi_info *);
+void vi_tick(void *);
 
+#ifdef DEV_NETMAP
+/* t4_netmap.c */
+void cxgbe_nm_attach(struct vi_info *);
+void cxgbe_nm_detach(struct vi_info *);
+void t4_nm_intr(void *);
+#endif
+
 /* t4_sge.c */
 void t4_sge_modload(void);
-int t4_sge_init(struct adapter *);
+void t4_sge_modunload(void);
+uint64_t t4_sge_extfree_refs(void);
+void t4_tweak_chip_settings(struct adapter *);
+int t4_read_chip_settings(struct adapter *);
 int t4_create_dma_tag(struct adapter *);
+void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
+    struct sysctl_oid_list *);
 int t4_destroy_dma_tag(struct adapter *);
 int t4_setup_adapter_queues(struct adapter *);
 int t4_teardown_adapter_queues(struct adapter *);
-int t4_setup_port_queues(struct port_info *);
-int t4_teardown_port_queues(struct port_info *);
-int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
-void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
+int t4_setup_vi_queues(struct vi_info *);
+int t4_teardown_vi_queues(struct vi_info *);
 void t4_intr_all(void *);
 void t4_intr(void *);
+void t4_vi_intr(void *);
 void t4_intr_err(void *);
 void t4_intr_evt(void *);
 void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
-int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
 void t4_update_fl_bufsize(struct ifnet *);
-int can_resume_tx(struct sge_eq *);
+int parse_pkt(struct adapter *, struct mbuf **);
+void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *);
+void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *);
+int tnl_cong(struct port_info *, int);
+int t4_register_an_handler(an_handler_t);
+int t4_register_fw_msg_handler(int, fw_msg_handler_t);
+int t4_register_cpl_handler(int, cpl_handler_t);
 
+/* t4_tracer.c */
+struct t4_tracer;
+void t4_tracer_modload(void);
+void t4_tracer_modunload(void);
+void t4_tracer_port_detach(struct adapter *);
+int t4_get_tracer(struct adapter *, struct t4_tracer *);
+int t4_set_tracer(struct adapter *, struct t4_tracer *);
+int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
+int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
+
+/* t4_sched.c */
+int t4_set_sched_class(struct adapter *, struct t4_sched_params *);
+int t4_set_sched_queue(struct adapter *, struct t4_sched_queue *);
+int t4_init_tx_sched(struct adapter *);
+int t4_free_tx_sched(struct adapter *);
+void t4_update_tx_sched(struct adapter *);
+int t4_reserve_cl_rl_kbps(struct adapter *, int, u_int, int *);
+void t4_release_cl_rl_kbps(struct adapter *, int, int);
+
 static inline struct wrqe *
 alloc_wrqe(int wr_len, struct sge_wrq *wrq)
 {

Modified: trunk/sys/dev/cxgbe/common/common.h
===================================================================
--- trunk/sys/dev/cxgbe/common/common.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/common.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/common.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/common.h 318851 2017-05-25 01:43:28Z np $
  *
  */
 
@@ -32,6 +33,9 @@
 
 #include "t4_hw.h"
 
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
+		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+		F_CPL_SWITCH | F_SGE | F_ULP_TX)
 
 enum {
 	MAX_NPORTS     = 4,     /* max # of ports */
@@ -42,17 +46,13 @@
 	MACADDR_LEN    = 12,    /* MAC Address length */
 };
 
-enum { MEM_EDC0, MEM_EDC1, MEM_MC };
-
 enum {
-	MEMWIN0_APERTURE = 2048,
-	MEMWIN0_BASE     = 0x1b800,
-	MEMWIN1_APERTURE = 32768,
-	MEMWIN1_BASE     = 0x28000,
-	MEMWIN2_APERTURE = 65536,
-	MEMWIN2_BASE     = 0x30000,
+	T4_REGMAP_SIZE = (160 * 1024),
+	T5_REGMAP_SIZE = (332 * 1024),
 };
 
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
 enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
 
 enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
@@ -63,16 +63,12 @@
 	PAUSE_AUTONEG = 1 << 2
 };
 
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 8
-#define FW_VERSION_MICRO 4
-#define FW_VERSION_BUILD 0
+enum {
+	FEC_RS        = 1 << 0,
+	FEC_BASER_RS  = 1 << 1,
+	FEC_RESERVED  = 1 << 2,
+};
 
-#define FW_VERSION (V_FW_HDR_FW_VER_MAJOR(FW_VERSION_MAJOR) | \
-    V_FW_HDR_FW_VER_MINOR(FW_VERSION_MINOR) | \
-    V_FW_HDR_FW_VER_MICRO(FW_VERSION_MICRO) | \
-    V_FW_HDR_FW_VER_BUILD(FW_VERSION_BUILD))
-
 struct port_stats {
 	u64 tx_octets;            /* total # of octets in good frames */
 	u64 tx_frames;            /* all good frames */
@@ -169,10 +165,10 @@
 };
 
 struct tp_tcp_stats {
-	u32 tcpOutRsts;
-	u64 tcpInSegs;
-	u64 tcpOutSegs;
-	u64 tcpRetransSegs;
+	u32 tcp_out_rsts;
+	u64 tcp_in_segs;
+	u64 tcp_out_segs;
+	u64 tcp_retrans_segs;
 };
 
 struct tp_usm_stats {
@@ -182,44 +178,74 @@
 };
 
 struct tp_fcoe_stats {
-	u32 framesDDP;
-	u32 framesDrop;
-	u64 octetsDDP;
+	u32 frames_ddp;
+	u32 frames_drop;
+	u64 octets_ddp;
 };
 
 struct tp_err_stats {
-	u32 macInErrs[4];
-	u32 hdrInErrs[4];
-	u32 tcpInErrs[4];
-	u32 tnlCongDrops[4];
-	u32 ofldChanDrops[4];
-	u32 tnlTxDrops[4];
-	u32 ofldVlanDrops[4];
-	u32 tcp6InErrs[4];
-	u32 ofldNoNeigh;
-	u32 ofldCongDefer;
+	u32 mac_in_errs[MAX_NCHAN];
+	u32 hdr_in_errs[MAX_NCHAN];
+	u32 tcp_in_errs[MAX_NCHAN];
+	u32 tnl_cong_drops[MAX_NCHAN];
+	u32 ofld_chan_drops[MAX_NCHAN];
+	u32 tnl_tx_drops[MAX_NCHAN];
+	u32 ofld_vlan_drops[MAX_NCHAN];
+	u32 tcp6_in_errs[MAX_NCHAN];
+	u32 ofld_no_neigh;
+	u32 ofld_cong_defer;
 };
 
 struct tp_proxy_stats {
-	u32 proxy[4];
+	u32 proxy[MAX_NCHAN];
 };
 
 struct tp_cpl_stats {
-	u32 req[4];
-	u32 rsp[4];
+	u32 req[MAX_NCHAN];
+	u32 rsp[MAX_NCHAN];
 };
 
 struct tp_rdma_stats {
+	u32 rqe_dfr_pkt;
 	u32 rqe_dfr_mod;
-	u32 rqe_dfr_pkt;
 };
 
+struct sge_params {
+	int timer_val[SGE_NTIMERS];	/* final, scaled values */
+	int counter_val[SGE_NCOUNTERS];
+	int fl_starve_threshold;
+	int fl_starve_threshold2;
+	int page_shift;
+	int eq_s_qpp;
+	int iq_s_qpp;
+	int spg_len;
+	int pad_boundary;
+	int pack_boundary;
+	int fl_pktshift;
+	u32 sge_control;
+	u32 sge_fl_buffer_size[SGE_FLBUF_SIZES];
+};
+
 struct tp_params {
-	unsigned int ntxchan;        /* # of Tx channels */
 	unsigned int tre;            /* log2 of core clocks per TP tick */
 	unsigned int dack_re;        /* DACK timer resolution */
 	unsigned int la_mask;        /* what events are recorded by TP LA */
-	unsigned short tx_modq[NCHAN];  /* channel to modulation queue map */
+	unsigned short tx_modq[MAX_NCHAN];  /* channel to modulation queue map */
+
+	uint32_t vlan_pri_map;
+	uint32_t ingress_config;
+	__be16 err_vec_mask;
+
+	int8_t fcoe_shift;
+	int8_t port_shift;
+	int8_t vnic_shift;
+	int8_t vlan_shift;
+	int8_t tos_shift;
+	int8_t protocol_shift;
+	int8_t ethertype_shift;
+	int8_t macmatch_shift;
+	int8_t matchtype_shift;
+	int8_t frag_shift;
 };
 
 struct vpd_params {
@@ -233,6 +259,7 @@
 
 struct pci_params {
 	unsigned int vpd_cap_addr;
+	unsigned int mps;
 	unsigned short speed;
 	unsigned short width;
 };
@@ -241,43 +268,124 @@
  * Firmware device log.
  */
 struct devlog_params {
-	u32 memtype;			/* which memory (EDC0, EDC1, MC) */
+	u32 memtype;			/* which memory (FW_MEMTYPE_* ) */
 	u32 start;			/* start of log in firmware memory */
 	u32 size;			/* size of log */
+	u32 addr;			/* start address in flat addr space */
 };
 
+/* Stores chip specific parameters */
+struct chip_params {
+	u8 nchan;
+	u8 pm_stats_cnt;
+	u8 cng_ch_bits_log;		/* congestion channel map bits width */
+	u8 nsched_cls;
+	u8 cim_num_obq;
+	u16 mps_rplc_size;
+	u16 vfcount;
+	u32 sge_fl_db;
+	u16 mps_tcam_size;
+};
+
+/* VF-only parameters. */
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+	unsigned int mode;		/* RSS mode */
+	union {
+	    struct {
+		u_int synmapen:1;	/* SYN Map Enable */
+		u_int syn4tupenipv6:1;	/* enable hashing 4-tuple IPv6 SYNs */
+		u_int syn2tupenipv6:1;	/* enable hashing 2-tuple IPv6 SYNs */
+		u_int syn4tupenipv4:1;	/* enable hashing 4-tuple IPv4 SYNs */
+		u_int syn2tupenipv4:1;	/* enable hashing 2-tuple IPv4 SYNs */
+		u_int ofdmapen:1;	/* Offload Map Enable */
+		u_int tnlmapen:1;	/* Tunnel Map Enable */
+		u_int tnlalllookup:1;	/* Tunnel All Lookup */
+		u_int hashtoeplitz:1;	/* use Toeplitz hash */
+	    } basicvirtual;
+	} u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+	unsigned int nvi;		/* N virtual interfaces */
+	unsigned int neq;		/* N egress Qs */
+	unsigned int nethctrl;		/* N egress ETH or CTRL Qs */
+	unsigned int niqflint;		/* N ingress Qs/w free list(s) & intr */
+	unsigned int niq;		/* N ingress Qs */
+	unsigned int tc;		/* PCI-E traffic class */
+	unsigned int pmask;		/* port access rights mask */
+	unsigned int nexactf;		/* N exact MPS filters */
+	unsigned int r_caps;		/* read capabilities */
+	unsigned int wx_caps;		/* write/execute capabilities */
+};
+
 struct adapter_params {
-	struct tp_params  tp;
+	struct sge_params sge;
+	struct tp_params  tp;		/* PF-only */
 	struct vpd_params vpd;
 	struct pci_params pci;
-	struct devlog_params devlog;
+	struct devlog_params devlog;	/* PF-only */
+	struct rss_params rss;		/* VF-only */
+	struct vf_resources vfres;	/* VF-only */
 
 	unsigned int sf_size;             /* serial flash size in bytes */
 	unsigned int sf_nsec;             /* # of flash sectors */
 
-	unsigned int fw_vers;
-	unsigned int tp_vers;
+	unsigned int fw_vers;		/* firmware version */
+	unsigned int bs_vers;		/* bootstrap version */
+	unsigned int tp_vers;		/* TP microcode version */
+	unsigned int er_vers;		/* expansion ROM version */
+	unsigned int scfg_vers;		/* Serial Configuration version */
+	unsigned int vpd_vers;		/* VPD version */
 
 	unsigned short mtus[NMTUS];
 	unsigned short a_wnd[NCCTRL_WIN];
 	unsigned short b_wnd[NCCTRL_WIN];
 
-	unsigned int mc_size;		/* MC memory size */
-	unsigned int nfilters;		/* size of filter region */
+	u_int ftid_min;
+	u_int ftid_max;
+	u_int etid_min;
+	u_int netids;
 
 	unsigned int cim_la_size;
 
-	/* Used as int in sysctls, do not reduce size */
-	unsigned int nports;		/* # of ethernet ports */
-	unsigned int portvec;
-	unsigned int rev;		/* chip revision */
-	unsigned int offload;
+	uint8_t nports;		/* # of ethernet ports */
+	uint8_t portvec;
+	unsigned int chipid:4;	/* chip ID.  T4 = 4, T5 = 5, ... */
+	unsigned int rev:4;	/* chip revision */
+	unsigned int fpga:1;	/* this is an FPGA */
+	unsigned int offload:1;	/* hw is TOE capable, fw has divvied up card
+				   resources for TOE operation. */
+	unsigned int bypass:1;	/* this is a bypass card */
+	unsigned int ethoffload:1;
 
 	unsigned int ofldq_wr_cred;
+	unsigned int eo_wr_cred;
+
+	unsigned int max_ordird_qp;
+	unsigned int max_ird_adapter;
 };
 
-enum {					    /* chip revisions */
-	T4_REV_A  = 0,
+#define CHELSIO_T4		0x4
+#define CHELSIO_T5		0x5
+#define CHELSIO_T6		0x6
+
+/*
+ * State needed to monitor the forward progress of SGE Ingress DMA activities
+ * and possible hangs.
+ */
+struct sge_idma_monitor_state {
+	unsigned int idma_1s_thresh;	/* 1s threshold in Core Clock ticks */
+	unsigned int idma_stalled[2];	/* synthesized stalled timers in HZ */
+	unsigned int idma_state[2];	/* IDMA Hang detect state */
+	unsigned int idma_qid[2];	/* IDMA Hung Ingress Queue ID */
+	unsigned int idma_warn[2];	/* time to warning in HZ */
 };
 
 struct trace_params {
@@ -294,12 +402,16 @@
 struct link_config {
 	unsigned short supported;        /* link capabilities */
 	unsigned short advertising;      /* advertised capabilities */
-	unsigned short requested_speed;  /* speed user has requested */
-	unsigned short speed;            /* actual link speed */
+	unsigned short lp_advertising;   /* peer advertised capabilities */
+	unsigned int   requested_speed;  /* speed user has requested */
+	unsigned int   speed;            /* actual link speed */
 	unsigned char  requested_fc;     /* flow control user has requested */
 	unsigned char  fc;               /* actual link flow control */
+	unsigned char  requested_fec;    /* FEC user has requested */
+	unsigned char  fec;              /* actual FEC */
 	unsigned char  autoneg;          /* autonegotiating? */
 	unsigned char  link_ok;          /* link up? */
+	unsigned char  link_down_rc;     /* link down reason */
 };
 
 #include "adapter.h"
@@ -311,11 +423,58 @@
 #define for_each_port(adapter, iter) \
 	for (iter = 0; iter < (adapter)->params.nports; ++iter)
 
+static inline int is_ftid(const struct adapter *sc, u_int tid)
+{
+
+	return (tid >= sc->params.ftid_min && tid <= sc->params.ftid_max);
+}
+
+static inline int is_etid(const struct adapter *sc, u_int tid)
+{
+
+	return (tid >= sc->params.etid_min);
+}
+
 static inline int is_offload(const struct adapter *adap)
 {
 	return adap->params.offload;
 }
 
+static inline int is_ethoffload(const struct adapter *adap)
+{
+	return adap->params.ethoffload;
+}
+
+static inline int chip_id(struct adapter *adap)
+{
+	return adap->params.chipid;
+}
+
+static inline int chip_rev(struct adapter *adap)
+{
+	return adap->params.rev;
+}
+
+static inline int is_t4(struct adapter *adap)
+{
+	return adap->params.chipid == CHELSIO_T4;
+}
+
+static inline int is_t5(struct adapter *adap)
+{
+	return adap->params.chipid == CHELSIO_T5;
+}
+
+static inline int is_t6(struct adapter *adap)
+{
+	return adap->params.chipid == CHELSIO_T6;
+}
+
+static inline int is_fpga(struct adapter *adap)
+{
+	 return adap->params.fpga;
+}
+
 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
 {
 	return adap->params.vpd.cclk / 1000;
@@ -327,6 +486,14 @@
 	return (us * adap->params.vpd.cclk) / 1000;
 }
 
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+					    unsigned int ticks)
+{
+	/* add Core Clock / 2 to round ticks to nearest uS */
+	return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
+		adapter->params.vpd.cclk);
+}
+
 static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
 					      unsigned int ticks)
 {
@@ -334,19 +501,20 @@
 }
 
 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val);
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, int polarity,
-			int attempts, int delay, u32 *valp);
 
-static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
-				  int polarity, int attempts, int delay)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout);
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+				     const void *cmd, int size, void *rpl,
+				     int timeout)
 {
-	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
-				   delay, NULL);
+	return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+				       timeout);
 }
 
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
-		    void *rpl, bool sleep_ok);
-
 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
 			     int size, void *rpl)
 {
@@ -376,7 +544,7 @@
 int t4_slow_intr_handler(struct adapter *adapter);
 
 int t4_hash_mac_addr(const u8 *addr);
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
@@ -385,19 +553,36 @@
 int t4_seeprom_wp(struct adapter *adapter, int enable);
 int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords,
 		  u32 *data, int byte_oriented);
+int t4_write_flash(struct adapter *adapter, unsigned int addr,
+		   unsigned int n, const u8 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
+int t5_fw_init_extern_mem(struct adapter *adap);
+int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
 int t4_load_boot(struct adapter *adap, u8 *boot_data,
                  unsigned int boot_addr, unsigned int size);
-unsigned int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
+int t4_flash_cfg_addr(struct adapter *adapter);
 int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
-int t4_check_fw_version(struct adapter *adapter);
+int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
+int t4_get_scfg_version(struct adapter *adapter, u32 *vers);
+int t4_get_vpd_version(struct adapter *adapter, u32 *vers);
+int t4_get_version_info(struct adapter *adapter);
 int t4_init_hw(struct adapter *adapter, u32 fw_params);
-int t4_prep_adapter(struct adapter *adapter);
-int t4_port_init(struct port_info *p, int mbox, int pf, int vf);
-int t4_reinit_adapter(struct adapter *adap);
+const struct chip_params *t4_get_chip_params(int chipid);
+int t4_prep_adapter(struct adapter *adapter, u8 *buf);
+int t4_shutdown_adapter(struct adapter *adapter);
+int t4_init_devlog_params(struct adapter *adapter, int fw_attach);
+int t4_init_sge_params(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
 void t4_fatal_err(struct adapter *adapter);
+void t4_db_full(struct adapter *adapter);
+void t4_db_dropped(struct adapter *adapter);
 int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
 			int filter_index, int enable);
 void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -407,10 +592,13 @@
 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
 		       unsigned int flags);
 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
-		     unsigned int flags, unsigned int defq);
+		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
+		     unsigned int skey);
 int t4_read_rss(struct adapter *adapter, u16 *entries);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+		  unsigned int start_index, unsigned int rw);
 void t4_read_rss_key(struct adapter *adapter, u32 *key);
-void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp);
 void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val);
 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
@@ -437,11 +625,24 @@
 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
 		unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_get_flash_params(struct adapter *adapter);
+
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
+int t4_mc_read(struct adapter *adap, int idx, u32 addr,
+	       __be32 *data, u64 *parity);
 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *parity);
 int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 size,
 		__be32 *data);
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma);
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks);
 
+unsigned int t4_get_regs_len(struct adapter *adapter);
+void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size);
+
+const char *t4_get_port_type_description(enum fw_port_type port_type);
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
 void t4_get_port_stats_offset(struct adapter *adap, int idx,
 		struct port_stats *stats,
@@ -495,6 +696,13 @@
 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		    unsigned int vf, unsigned int nparams, const u32 *params,
 		    u32 *val);
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+			  unsigned int pf, unsigned int vf,
+			  unsigned int nparams, const u32 *params,
+			  const u32 *val, int timeout);
 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 		  unsigned int vf, unsigned int nparams, const u32 *params,
 		  const u32 *val);
@@ -505,11 +713,11 @@
 		unsigned int exactf, unsigned int rcaps, unsigned int wxcaps);
 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
 		     unsigned int port, unsigned int pf, unsigned int vf,
-		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
+		     unsigned int nmac, u8 *mac, u16 *rss_size,
 		     unsigned int portfunc, unsigned int idstype);
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
-		unsigned int *rss_size);
+		u16 *rss_size);
 int t4_free_vi(struct adapter *adap, unsigned int mbox,
 	       unsigned int pf, unsigned int vf,
 	       unsigned int viid);
@@ -523,19 +731,27 @@
 		  int idx, const u8 *addr, bool persist, bool add_smt);
 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		     bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		 bool rx_en, bool tx_en);
 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		     unsigned int nblinks);
-int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
-	      u8 dev_addr, u8 offset, u8 *valp);
 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 	       unsigned int mmd, unsigned int reg, unsigned int *valp);
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 	       unsigned int mmd, unsigned int reg, unsigned int val);
-int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
-		     unsigned int pf, unsigned int vf, unsigned int iqid,
-		     unsigned int fl0id, unsigned int fl1id);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
+	      int port, unsigned int devid,
+	      unsigned int offset, unsigned int len,
+	      u8 *buf);
+int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
+	      int port, unsigned int devid,
+	      unsigned int offset, unsigned int len,
+	      u8 *buf);
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id);
 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
 	       unsigned int fl0id, unsigned int fl1id);
@@ -550,6 +766,54 @@
 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
 		      u32 *data);
 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+const char *t4_link_down_rc_str(unsigned char link_down_rc);
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val);
+int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
+		    int sleep_ok);
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+		    int rateunit, int ratemode, int channel, int cl,
+		    int minrate, int maxrate, int weight, int pktsize,
+		    int sleep_ok);
+int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
+			  unsigned int maxrate, int sleep_ok);
+int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
+			   int weight, int sleep_ok);
+int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
+			       int mode, unsigned int maxrate, int pktsize,
+			       int sleep_ok);
+int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
+		       unsigned int pf, unsigned int vf,
+		       unsigned int timeout, unsigned int action);
+int t4_get_devlog_level(struct adapter *adapter, unsigned int *level);
+int t4_set_devlog_level(struct adapter *adapter, unsigned int level);
+void t4_sge_decode_idma_state(struct adapter *adapter, int state);
+
+static inline int t4vf_query_params(struct adapter *adapter,
+				    unsigned int nparams, const u32 *params,
+				    u32 *vals)
+{
+	return t4_query_params(adapter, 0, 0, 0, nparams, params, vals);
+}
+
+static inline int t4vf_set_params(struct adapter *adapter,
+				  unsigned int nparams, const u32 *params,
+				  const u32 *vals)
+{
+	return t4_set_params(adapter, 0, 0, 0, nparams, params, vals);
+}
+
+static inline int t4vf_wr_mbox(struct adapter *adap, const void *cmd,
+			       int size, void *rpl)
+{
+	return t4_wr_mbox(adap, adap->mbox, cmd, size, rpl);
+}
+
+int t4vf_wait_dev_ready(struct adapter *adapter);
+int t4vf_fw_reset(struct adapter *adapter);
+int t4vf_get_sge_params(struct adapter *adapter);
+int t4vf_get_rss_glb_config(struct adapter *adapter);
+int t4vf_get_vfres(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
+
 #endif /* __CHELSIO_COMMON_H */

Modified: trunk/sys/dev/cxgbe/common/t4_hw.c
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_hw.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_hw.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2012 Chelsio Communications, Inc.
+ * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -25,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/common/t4_hw.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.c 318851 2017-05-25 01:43:28Z np $");
 
 #include "opt_inet.h"
 
@@ -57,8 +58,8 @@
  *	at the time it indicated completion is stored there.  Returns 0 if the
  *	operation completes and	-EAGAIN	otherwise.
  */
-int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
-		        int polarity, int attempts, int delay, u32 *valp)
+static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			       int polarity, int attempts, int delay, u32 *valp)
 {
 	while (1) {
 		u32 val = t4_read_reg(adapter, reg);
@@ -75,6 +76,13 @@
 	}
 }
 
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+				  int polarity, int attempts, int delay)
+{
+	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+				   delay, NULL);
+}
+
 /**
  *	t4_set_reg_field - set a register field to a value
  *	@adapter: the adapter to program
@@ -107,8 +115,8 @@
  *	register pair.
  */
 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
-		      unsigned int data_reg, u32 *vals, unsigned int nregs,
-		      unsigned int start_idx)
+			     unsigned int data_reg, u32 *vals,
+			     unsigned int nregs, unsigned int start_idx)
 {
 	while (nregs--) {
 		t4_write_reg(adap, addr_reg, start_idx);
@@ -144,26 +152,49 @@
  * mechanism.  This guarantees that we get the real value even if we're
  * operating within a Virtual Machine and the Hypervisor is trapping our
  * Configuration Space accesses.
+ *
+ * N.B. This routine should only be used as a last resort: the firmware uses
+ *      the backdoor registers on a regular basis and we can end up
+ *      conflicting with it's uses!
  */
 u32 t4_hw_pci_read_cfg4(adapter_t *adap, int reg)
 {
-	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ,
-		     F_ENABLE | F_LOCALCFG | V_FUNCTION(adap->pf) |
-		     V_REGISTER(reg));
-	return t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
+	u32 req = V_FUNCTION(adap->pf) | V_REGISTER(reg);
+	u32 val;
+
+	if (chip_id(adap) <= CHELSIO_T5)
+		req |= F_ENABLE;
+	else
+		req |= F_T6_ENABLE;
+
+	if (is_t4(adap))
+		req |= F_LOCALCFG;
+
+	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, req);
+	val = t4_read_reg(adap, A_PCIE_CFG_SPACE_DATA);
+
+	/*
+	 * Reset F_ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
+	 * Configuration Space read.  (None of the other fields matter when
+	 * F_ENABLE is 0 so a simple register write is easier than a
+	 * read-modify-write via t4_set_reg_field().)
+	 */
+	t4_write_reg(adap, A_PCIE_CFG_SPACE_REQ, 0);
+
+	return val;
 }
 
 /*
- *	t4_report_fw_error - report firmware error
- *	@adap: the adapter
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
  *
- *	The adapter firmware can indicate error conditions to the host.
- *	This routine prints out the reason for the firmware error (as
- *	reported by the firmware).
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
  */
 static void t4_report_fw_error(struct adapter *adap)
 {
-	static const char *reason[] = {
+	static const char *const reason[] = {
 		"Crash",			/* PCIE_FW_EVAL_CRASH */
 		"During Device Preparation",	/* PCIE_FW_EVAL_PREP */
 		"During Device Configuration",	/* PCIE_FW_EVAL_CONF */
@@ -176,11 +207,9 @@
 	u32 pcie_fw;
 
 	pcie_fw = t4_read_reg(adap, A_PCIE_FW);
-	if (!(pcie_fw & F_PCIE_FW_ERR))
-		CH_ERR(adap, "Firmware error report called with no error\n");
-	else
+	if (pcie_fw & F_PCIE_FW_ERR)
 		CH_ERR(adap, "Firmware reports adapter error: %s\n",
-		       reason[G_PCIE_FW_EVAL(pcie_fw)]);
+			reason[G_PCIE_FW_EVAL(pcie_fw)]);
 }
 
 /*
@@ -196,19 +225,19 @@
 /*
  * Handle a FW assertion reported in a mailbox.
  */
-static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+static void fw_asrt(struct adapter *adap, struct fw_debug_cmd *asrt)
 {
-	struct fw_debug_cmd asrt;
-
-	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
-	CH_ALERT(adap, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
-		 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
-		 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+	CH_ALERT(adap,
+		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+		  asrt->u.assert.filename_0_7,
+		  be32_to_cpu(asrt->u.assert.line),
+		  be32_to_cpu(asrt->u.assert.x),
+		  be32_to_cpu(asrt->u.assert.y));
 }
 
 #define X_CIM_PF_NOACCESS 0xeeeeeeee
 /**
- *	t4_wr_mbox_meat - send a command to FW through the given mailbox
+ *	t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  *	@adap: the adapter
  *	@mbox: index of the mailbox to use
  *	@cmd: the command to write
@@ -215,6 +244,8 @@
  *	@size: command length in bytes
  *	@rpl: where to optionally store the reply
  *	@sleep_ok: if true we may sleep while awaiting command completion
+ *	@timeout: time to wait for command to finish before timing out
+ *		(negative implies @sleep_ok=false)
  *
  *	Sends the given command to FW through the selected mailbox and waits
  *	for the FW to execute the command.  If @rpl is not %NULL it is used to
@@ -223,6 +254,9 @@
  *	INITIALIZE can take a considerable amount of time to execute.
  *	@sleep_ok determines whether we may sleep while awaiting the response.
  *	If sleeping is allowed we use progressive backoff otherwise we spin.
+ *	Note that passing in a negative @timeout is an alternate mechanism
+ *	for specifying @sleep_ok=false.  This is useful when a higher level
+ *	interface allows for specification of @timeout but not @sleep_ok ...
  *
  *	The return value is 0 on success or a negative errno on failure.  A
  *	failure can happen either because we are not able to execute the
@@ -229,8 +263,8 @@
  *	command or FW executes it but signals an error.  In the latter case
  *	the return value is the error code indicated by FW (negated).
  */
-int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
-		    void *rpl, bool sleep_ok)
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+			    int size, void *rpl, bool sleep_ok, int timeout)
 {
 	/*
 	 * We delay in small increments at first in an effort to maintain
@@ -240,41 +274,124 @@
 	static const int delay[] = {
 		1, 1, 3, 5, 10, 10, 20, 50, 100
 	};
-
 	u32 v;
 	u64 res;
-	int i, ms, delay_idx;
+	int i, ms, delay_idx, ret;
 	const __be64 *p = cmd;
 	u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
 	u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+	u32 ctl;
+	__be64 cmd_rpl[MBOX_LEN/8];
+	u32 pcie_fw;
 
 	if ((size & 15) || size > MBOX_LEN)
 		return -EINVAL;
 
-	v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
-	for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
-		v = G_MBOWNER(t4_read_reg(adap, ctl_reg));
+	if (adap->flags & IS_VF) {
+		if (is_t6(adap))
+			data_reg = FW_T6VF_MBDATA_BASE_ADDR;
+		else
+			data_reg = FW_T4VF_MBDATA_BASE_ADDR;
+		ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
+	}
 
-	if (v != X_MBOWNER_PL)
-		return v ? -EBUSY : -ETIMEDOUT;
+	/*
+	 * If we have a negative timeout, that implies that we can't sleep.
+	 */
+	if (timeout < 0) {
+		sleep_ok = false;
+		timeout = -timeout;
+	}
 
+	/*
+	 * Attempt to gain access to the mailbox.
+	 */
+	for (i = 0; i < 4; i++) {
+		ctl = t4_read_reg(adap, ctl_reg);
+		v = G_MBOWNER(ctl);
+		if (v != X_MBOWNER_NONE)
+			break;
+	}
+
+	/*
+	 * If we were unable to gain access, dequeue ourselves from the
+	 * mailbox atomic access list and report the error to our caller.
+	 */
+	if (v != X_MBOWNER_PL) {
+		t4_report_fw_error(adap);
+		ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+		return ret;
+	}
+
+	/*
+	 * If we gain ownership of the mailbox and there's a "valid" message
+	 * in it, this is likely an asynchronous error message from the
+	 * firmware.  So we'll report that and then proceed on with attempting
+	 * to issue our own command ... which may well fail if the error
+	 * presaged the firmware crashing ...
+	 */
+	if (ctl & F_MBMSGVALID) {
+		CH_ERR(adap, "found VALID command in mbox %u: "
+		       "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+		       (unsigned long long)t4_read_reg64(adap, data_reg),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+		       (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+	}
+
+	/*
+	 * Copy in the new mailbox command and send it on its way ...
+	 */
 	for (i = 0; i < size; i += 8, p++)
 		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
 
+	if (adap->flags & IS_VF) {
+		/*
+		 * For the VFs, the Mailbox Data "registers" are
+		 * actually backed by T4's "MA" interface rather than
+		 * PL Registers (as is the case for the PFs).  Because
+		 * these are in different coherency domains, the write
+		 * to the VF's PL-register-backed Mailbox Control can
+		 * race in front of the writes to the MA-backed VF
+		 * Mailbox Data "registers".  So we need to do a
+		 * read-back on at least one byte of the VF Mailbox
+		 * Data registers before doing the write to the VF
+		 * Mailbox Control register.
+		 */
+		t4_read_reg(adap, data_reg);
+	}
+
+	CH_DUMP_MBOX(adap, mbox, data_reg);
+
 	t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
-	t4_read_reg(adap, ctl_reg);          /* flush write */
+	t4_read_reg(adap, ctl_reg);	/* flush write */
 
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+	/*
+	 * Loop waiting for the reply; bail out if we time out or the firmware
+	 * reports an error.
+	 */
+	pcie_fw = 0;
+	for (i = 0; i < timeout; i += ms) {
+		if (!(adap->flags & IS_VF)) {
+			pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+			if (pcie_fw & F_PCIE_FW_ERR)
+				break;
+		}
 		if (sleep_ok) {
 			ms = delay[delay_idx];  /* last element may repeat */
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
 				delay_idx++;
 			msleep(ms);
-		} else
+		} else {
 			mdelay(ms);
+		}
 
 		v = t4_read_reg(adap, ctl_reg);
 		if (v == X_CIM_PF_NOACCESS)
@@ -286,13 +403,20 @@
 				continue;
 			}
 
-			res = t4_read_reg64(adap, data_reg);
+			/*
+			 * Retrieve the command reply and release the mailbox.
+			 */
+			get_mbox_rpl(adap, cmd_rpl, MBOX_LEN/8, data_reg);
+			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+
+			CH_DUMP_MBOX(adap, mbox, data_reg);
+
+			res = be64_to_cpu(cmd_rpl[0]);
 			if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
-				fw_asrt(adap, data_reg);
+				fw_asrt(adap, (struct fw_debug_cmd *)cmd_rpl);
 				res = V_FW_CMD_RETVAL(EIO);
 			} else if (rpl)
-				get_mbox_rpl(adap, rpl, size / 8, data_reg);
-			t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+				memcpy(rpl, cmd_rpl, size);
 			return -G_FW_CMD_RETVAL((int)res);
 		}
 	}
@@ -302,16 +426,79 @@
 	 * the error and also check to see if the firmware reported any
 	 * errors ...
 	 */
+	ret = (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
 	CH_ERR(adap, "command %#x in mailbox %d timed out\n",
 	       *(const u8 *)cmd, mbox);
-	if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
-		t4_report_fw_error(adap);
-	return -ETIMEDOUT;
+
+	/* If DUMP_MBOX is set the mbox has already been dumped */
+	if ((adap->debug_flags & DF_DUMP_MBOX) == 0) {
+		p = cmd;
+		CH_ERR(adap, "mbox: %016llx %016llx %016llx %016llx "
+		    "%016llx %016llx %016llx %016llx\n",
+		    (unsigned long long)be64_to_cpu(p[0]),
+		    (unsigned long long)be64_to_cpu(p[1]),
+		    (unsigned long long)be64_to_cpu(p[2]),
+		    (unsigned long long)be64_to_cpu(p[3]),
+		    (unsigned long long)be64_to_cpu(p[4]),
+		    (unsigned long long)be64_to_cpu(p[5]),
+		    (unsigned long long)be64_to_cpu(p[6]),
+		    (unsigned long long)be64_to_cpu(p[7]));
+	}
+
+	t4_report_fw_error(adap);
+	t4_fatal_err(adap);
+	return ret;
 }
 
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok)
+{
+		return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
+					       sleep_ok, FW_CMD_MAX_TIMEOUT);
+
+}
+
+static int t4_edc_err_read(struct adapter *adap, int idx)
+{
+	u32 edc_ecc_err_addr_reg;
+	u32 edc_bist_status_rdata_reg;
+
+	if (is_t4(adap)) {
+		CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
+		return 0;
+	}
+	if (idx != 0 && idx != 1) {
+		CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
+		return 0;
+	}
+
+	edc_ecc_err_addr_reg = EDC_T5_REG(A_EDC_H_ECC_ERR_ADDR, idx);
+	edc_bist_status_rdata_reg = EDC_T5_REG(A_EDC_H_BIST_STATUS_RDATA, idx);
+
+	CH_WARN(adap,
+		"edc%d err addr 0x%x: 0x%x.\n",
+		idx, edc_ecc_err_addr_reg,
+		t4_read_reg(adap, edc_ecc_err_addr_reg));
+	CH_WARN(adap,
+	 	"bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
+		edc_bist_status_rdata_reg,
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 56),
+		(unsigned long long)t4_read_reg64(adap, edc_bist_status_rdata_reg + 64));
+
+	return 0;
+}
+
 /**
  *	t4_mc_read - read from MC through backdoor accesses
  *	@adap: the adapter
+ *	@idx: which MC to access
  *	@addr: address of first byte requested
  *	@data: 64 bytes of data containing the requested address
  *	@ecc: where to store the corresponding 64-bit ECC word
@@ -320,22 +507,40 @@
  *	that covers the requested address @addr.  If @parity is not %NULL it
  *	is assigned the 64-bit ECC word for the read data.
  */
-int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
 {
 	int i;
+	u32 mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
+	u32 mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
 
-	if (t4_read_reg(adap, A_MC_BIST_CMD) & F_START_BIST)
+	if (is_t4(adap)) {
+		mc_bist_cmd_reg = A_MC_BIST_CMD;
+		mc_bist_cmd_addr_reg = A_MC_BIST_CMD_ADDR;
+		mc_bist_cmd_len_reg = A_MC_BIST_CMD_LEN;
+		mc_bist_status_rdata_reg = A_MC_BIST_STATUS_RDATA;
+		mc_bist_data_pattern_reg = A_MC_BIST_DATA_PATTERN;
+	} else {
+		mc_bist_cmd_reg = MC_REG(A_MC_P_BIST_CMD, idx);
+		mc_bist_cmd_addr_reg = MC_REG(A_MC_P_BIST_CMD_ADDR, idx);
+		mc_bist_cmd_len_reg = MC_REG(A_MC_P_BIST_CMD_LEN, idx);
+		mc_bist_status_rdata_reg = MC_REG(A_MC_P_BIST_STATUS_RDATA,
+						  idx);
+		mc_bist_data_pattern_reg = MC_REG(A_MC_P_BIST_DATA_PATTERN,
+						  idx);
+	}
+
+	if (t4_read_reg(adap, mc_bist_cmd_reg) & F_START_BIST)
 		return -EBUSY;
-	t4_write_reg(adap, A_MC_BIST_CMD_ADDR, addr & ~0x3fU);
-	t4_write_reg(adap, A_MC_BIST_CMD_LEN, 64);
-	t4_write_reg(adap, A_MC_BIST_DATA_PATTERN, 0xc);
-	t4_write_reg(adap, A_MC_BIST_CMD, V_BIST_OPCODE(1) | F_START_BIST |
-		     V_BIST_CMD_GAP(1));
-	i = t4_wait_op_done(adap, A_MC_BIST_CMD, F_START_BIST, 0, 10, 1);
+	t4_write_reg(adap, mc_bist_cmd_addr_reg, addr & ~0x3fU);
+	t4_write_reg(adap, mc_bist_cmd_len_reg, 64);
+	t4_write_reg(adap, mc_bist_data_pattern_reg, 0xc);
+	t4_write_reg(adap, mc_bist_cmd_reg, V_BIST_OPCODE(1) |
+		     F_START_BIST | V_BIST_CMD_GAP(1));
+	i = t4_wait_op_done(adap, mc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
 	if (i)
 		return i;
 
-#define MC_DATA(i) MC_BIST_STATUS_REG(A_MC_BIST_STATUS_RDATA, i)
+#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata_reg, i)
 
 	for (i = 15; i >= 0; i--)
 		*data++ = ntohl(t4_read_reg(adap, MC_DATA(i)));
@@ -360,20 +565,47 @@
 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
 {
 	int i;
+	u32 edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
+	u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
 
-	idx *= EDC_STRIDE;
-	if (t4_read_reg(adap, A_EDC_BIST_CMD + idx) & F_START_BIST)
+	if (is_t4(adap)) {
+		edc_bist_cmd_reg = EDC_REG(A_EDC_BIST_CMD, idx);
+		edc_bist_cmd_addr_reg = EDC_REG(A_EDC_BIST_CMD_ADDR, idx);
+		edc_bist_cmd_len_reg = EDC_REG(A_EDC_BIST_CMD_LEN, idx);
+		edc_bist_cmd_data_pattern = EDC_REG(A_EDC_BIST_DATA_PATTERN,
+						    idx);
+		edc_bist_status_rdata_reg = EDC_REG(A_EDC_BIST_STATUS_RDATA,
+						    idx);
+	} else {
+/*
+ * These macro are missing in t4_regs.h file.
+ * Added temporarily for testing.
+ */
+#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+		edc_bist_cmd_reg = EDC_REG_T5(A_EDC_H_BIST_CMD, idx);
+		edc_bist_cmd_addr_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_ADDR, idx);
+		edc_bist_cmd_len_reg = EDC_REG_T5(A_EDC_H_BIST_CMD_LEN, idx);
+		edc_bist_cmd_data_pattern = EDC_REG_T5(A_EDC_H_BIST_DATA_PATTERN,
+						    idx);
+		edc_bist_status_rdata_reg = EDC_REG_T5(A_EDC_H_BIST_STATUS_RDATA,
+						    idx);
+#undef EDC_REG_T5
+#undef EDC_STRIDE_T5
+	}
+
+	if (t4_read_reg(adap, edc_bist_cmd_reg) & F_START_BIST)
 		return -EBUSY;
-	t4_write_reg(adap, A_EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
-	t4_write_reg(adap, A_EDC_BIST_CMD_LEN + idx, 64);
-	t4_write_reg(adap, A_EDC_BIST_DATA_PATTERN + idx, 0xc);
-	t4_write_reg(adap, A_EDC_BIST_CMD + idx,
+	t4_write_reg(adap, edc_bist_cmd_addr_reg, addr & ~0x3fU);
+	t4_write_reg(adap, edc_bist_cmd_len_reg, 64);
+	t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
+	t4_write_reg(adap, edc_bist_cmd_reg,
 		     V_BIST_OPCODE(1) | V_BIST_CMD_GAP(1) | F_START_BIST);
-	i = t4_wait_op_done(adap, A_EDC_BIST_CMD + idx, F_START_BIST, 0, 10, 1);
+	i = t4_wait_op_done(adap, edc_bist_cmd_reg, F_START_BIST, 0, 10, 1);
 	if (i)
 		return i;
 
-#define EDC_DATA(i) (EDC_BIST_STATUS_REG(A_EDC_BIST_STATUS_RDATA, i) + idx)
+#define EDC_DATA(i) EDC_BIST_STATUS_REG(edc_bist_status_rdata_reg, i)
 
 	for (i = 15; i >= 0; i--)
 		*data++ = ntohl(t4_read_reg(adap, EDC_DATA(i)));
@@ -425,8 +657,8 @@
 		/*
 		 * Read the chip's memory block and bail if there's an error.
 		 */
-		if (mtype == MEM_MC)
-			ret = t4_mc_read(adap, pos, data, NULL);
+		if ((mtype == MEM_MC) || (mtype == MEM_MC1))
+			ret = t4_mc_read(adap, mtype - MEM_MC, pos, data, NULL);
 		else
 			ret = t4_edc_read(adap, mtype, pos, data, NULL);
 		if (ret)
@@ -445,8 +677,2011 @@
 }
 
 /*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command (if fw_attach != 0)
+ * since we prefer to let the firmware own all of these registers, but if that
+ * fails we go for it directly ourselves.
+ */
+u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach)
+{
+
+	/*
+	 * If fw_attach != 0, construct and send the Firmware LDST Command to
+	 * retrieve the specified PCI-E Configuration Space register.
+	 */
+	if (drv_fw_attach != 0) {
+		struct fw_ldst_cmd ldst_cmd;
+		int ret;
+
+		memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+		ldst_cmd.op_to_addrspace =
+			cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+				    F_FW_CMD_REQUEST |
+				    F_FW_CMD_READ |
+				    V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+		ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+		ldst_cmd.u.pcie.select_naccess = V_FW_LDST_CMD_NACCESS(1);
+		ldst_cmd.u.pcie.ctrl_to_fn =
+			(F_FW_LDST_CMD_LC | V_FW_LDST_CMD_FN(adap->pf));
+		ldst_cmd.u.pcie.r = reg;
+
+		/*
+		 * If the LDST Command succeeds, return the result, otherwise
+		 * fall through to reading it directly ourselves ...
+		 */
+		ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+				 &ldst_cmd);
+		if (ret == 0)
+			return be32_to_cpu(ldst_cmd.u.pcie.data[0]);
+
+		CH_WARN(adap, "Firmware failed to return "
+			"Configuration Space register %d, err = %d\n",
+			reg, -ret);
+	}
+
+	/*
+	 * Read the desired Configuration Space register via the PCI-E
+	 * Backdoor mechanism.
+	 */
+	return t4_hw_pci_read_cfg4(adap, reg);
+}
+
+/**
+ *	t4_get_regs_len - return the size of the chips register set
+ *	@adapter: the adapter
+ *
+ *	Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+	unsigned int chip_version = chip_id(adapter);
+
+	switch (chip_version) {
+	case CHELSIO_T4:
+		if (adapter->flags & IS_VF)
+			return FW_T4VF_REGMAP_SIZE;
+		return T4_REGMAP_SIZE;
+
+	case CHELSIO_T5:
+	case CHELSIO_T6:
+		if (adapter->flags & IS_VF)
+			return FW_T4VF_REGMAP_SIZE;
+		return T5_REGMAP_SIZE;
+	}
+
+	CH_ERR(adapter,
+		"Unsupported chip version %d\n", chip_version);
+	return 0;
+}
+
+/**
+ *	t4_get_regs - read chip registers into provided buffer
+ *	@adap: the adapter
+ *	@buf: register buffer
+ *	@buf_size: size (in bytes) of register buffer
+ *
+ *	If the provided register buffer isn't large enough for the chip's
+ *	full register range, the register dump will be truncated to the
+ *	register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
+{
+	static const unsigned int t4_reg_ranges[] = {
+		0x1008, 0x1108,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x123c,
+		0x1300, 0x173c,
+		0x1800, 0x18fc,
+		0x3000, 0x30d8,
+		0x30e0, 0x30e4,
+		0x30ec, 0x5910,
+		0x5920, 0x5924,
+		0x5960, 0x5960,
+		0x5968, 0x5968,
+		0x5970, 0x5970,
+		0x5978, 0x5978,
+		0x5980, 0x5980,
+		0x5988, 0x5988,
+		0x5990, 0x5990,
+		0x5998, 0x5998,
+		0x59a0, 0x59d4,
+		0x5a00, 0x5ae0,
+		0x5ae8, 0x5ae8,
+		0x5af0, 0x5af0,
+		0x5af8, 0x5af8,
+		0x6000, 0x6098,
+		0x6100, 0x6150,
+		0x6200, 0x6208,
+		0x6240, 0x6248,
+		0x6280, 0x62b0,
+		0x62c0, 0x6338,
+		0x6370, 0x638c,
+		0x6400, 0x643c,
+		0x6500, 0x6524,
+		0x6a00, 0x6a04,
+		0x6a14, 0x6a38,
+		0x6a60, 0x6a70,
+		0x6a78, 0x6a78,
+		0x6b00, 0x6b0c,
+		0x6b1c, 0x6b84,
+		0x6bf0, 0x6bf8,
+		0x6c00, 0x6c0c,
+		0x6c1c, 0x6c84,
+		0x6cf0, 0x6cf8,
+		0x6d00, 0x6d0c,
+		0x6d1c, 0x6d84,
+		0x6df0, 0x6df8,
+		0x6e00, 0x6e0c,
+		0x6e1c, 0x6e84,
+		0x6ef0, 0x6ef8,
+		0x6f00, 0x6f0c,
+		0x6f1c, 0x6f84,
+		0x6ff0, 0x6ff8,
+		0x7000, 0x700c,
+		0x701c, 0x7084,
+		0x70f0, 0x70f8,
+		0x7100, 0x710c,
+		0x711c, 0x7184,
+		0x71f0, 0x71f8,
+		0x7200, 0x720c,
+		0x721c, 0x7284,
+		0x72f0, 0x72f8,
+		0x7300, 0x730c,
+		0x731c, 0x7384,
+		0x73f0, 0x73f8,
+		0x7400, 0x7450,
+		0x7500, 0x7530,
+		0x7600, 0x760c,
+		0x7614, 0x761c,
+		0x7680, 0x76cc,
+		0x7700, 0x7798,
+		0x77c0, 0x77fc,
+		0x7900, 0x79fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c38,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7ea4,
+		0x7eac, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8e04,
+		0x8e10, 0x8e1c,
+		0x8e30, 0x8e78,
+		0x8ea0, 0x8eb8,
+		0x8ec0, 0x8f6c,
+		0x8fc0, 0x9008,
+		0x9010, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x9074,
+		0x90fc, 0x90fc,
+		0x9400, 0x9408,
+		0x9410, 0x9458,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96bc,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0x9fec,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xea7c,
+		0xf000, 0x11190,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e4,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x1924c,
+		0x193f8, 0x1943c,
+		0x1944c, 0x19474,
+		0x19490, 0x194e0,
+		0x194f0, 0x194f8,
+		0x19800, 0x19c08,
+		0x19c10, 0x19c90,
+		0x19ca0, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e40,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f4c,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f4,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e040, 0x1e04c,
+		0x1e284, 0x1e28c,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e440, 0x1e44c,
+		0x1e684, 0x1e68c,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e840, 0x1e84c,
+		0x1ea84, 0x1ea8c,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec40, 0x1ec4c,
+		0x1ee84, 0x1ee8c,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f040, 0x1f04c,
+		0x1f284, 0x1f28c,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f440, 0x1f44c,
+		0x1f684, 0x1f68c,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f840, 0x1f84c,
+		0x1fa84, 0x1fa8c,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc40, 0x1fc4c,
+		0x1fe84, 0x1fe8c,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x20000, 0x2002c,
+		0x20100, 0x2013c,
+		0x20190, 0x201a0,
+		0x201a8, 0x201b8,
+		0x201c4, 0x201c8,
+		0x20200, 0x20318,
+		0x20400, 0x204b4,
+		0x204c0, 0x20528,
+		0x20540, 0x20614,
+		0x21000, 0x21040,
+		0x2104c, 0x21060,
+		0x210c0, 0x210ec,
+		0x21200, 0x21268,
+		0x21270, 0x21284,
+		0x212fc, 0x21388,
+		0x21400, 0x21404,
+		0x21500, 0x21500,
+		0x21510, 0x21518,
+		0x2152c, 0x21530,
+		0x2153c, 0x2153c,
+		0x21550, 0x21554,
+		0x21600, 0x21600,
+		0x21608, 0x2161c,
+		0x21624, 0x21628,
+		0x21630, 0x21634,
+		0x2163c, 0x2163c,
+		0x21700, 0x2171c,
+		0x21780, 0x2178c,
+		0x21800, 0x21818,
+		0x21820, 0x21828,
+		0x21830, 0x21848,
+		0x21850, 0x21854,
+		0x21860, 0x21868,
+		0x21870, 0x21870,
+		0x21878, 0x21898,
+		0x218a0, 0x218a8,
+		0x218b0, 0x218c8,
+		0x218d0, 0x218d4,
+		0x218e0, 0x218e8,
+		0x218f0, 0x218f0,
+		0x218f8, 0x21a18,
+		0x21a20, 0x21a28,
+		0x21a30, 0x21a48,
+		0x21a50, 0x21a54,
+		0x21a60, 0x21a68,
+		0x21a70, 0x21a70,
+		0x21a78, 0x21a98,
+		0x21aa0, 0x21aa8,
+		0x21ab0, 0x21ac8,
+		0x21ad0, 0x21ad4,
+		0x21ae0, 0x21ae8,
+		0x21af0, 0x21af0,
+		0x21af8, 0x21c18,
+		0x21c20, 0x21c20,
+		0x21c28, 0x21c30,
+		0x21c38, 0x21c38,
+		0x21c80, 0x21c98,
+		0x21ca0, 0x21ca8,
+		0x21cb0, 0x21cc8,
+		0x21cd0, 0x21cd4,
+		0x21ce0, 0x21ce8,
+		0x21cf0, 0x21cf0,
+		0x21cf8, 0x21d7c,
+		0x21e00, 0x21e04,
+		0x22000, 0x2202c,
+		0x22100, 0x2213c,
+		0x22190, 0x221a0,
+		0x221a8, 0x221b8,
+		0x221c4, 0x221c8,
+		0x22200, 0x22318,
+		0x22400, 0x224b4,
+		0x224c0, 0x22528,
+		0x22540, 0x22614,
+		0x23000, 0x23040,
+		0x2304c, 0x23060,
+		0x230c0, 0x230ec,
+		0x23200, 0x23268,
+		0x23270, 0x23284,
+		0x232fc, 0x23388,
+		0x23400, 0x23404,
+		0x23500, 0x23500,
+		0x23510, 0x23518,
+		0x2352c, 0x23530,
+		0x2353c, 0x2353c,
+		0x23550, 0x23554,
+		0x23600, 0x23600,
+		0x23608, 0x2361c,
+		0x23624, 0x23628,
+		0x23630, 0x23634,
+		0x2363c, 0x2363c,
+		0x23700, 0x2371c,
+		0x23780, 0x2378c,
+		0x23800, 0x23818,
+		0x23820, 0x23828,
+		0x23830, 0x23848,
+		0x23850, 0x23854,
+		0x23860, 0x23868,
+		0x23870, 0x23870,
+		0x23878, 0x23898,
+		0x238a0, 0x238a8,
+		0x238b0, 0x238c8,
+		0x238d0, 0x238d4,
+		0x238e0, 0x238e8,
+		0x238f0, 0x238f0,
+		0x238f8, 0x23a18,
+		0x23a20, 0x23a28,
+		0x23a30, 0x23a48,
+		0x23a50, 0x23a54,
+		0x23a60, 0x23a68,
+		0x23a70, 0x23a70,
+		0x23a78, 0x23a98,
+		0x23aa0, 0x23aa8,
+		0x23ab0, 0x23ac8,
+		0x23ad0, 0x23ad4,
+		0x23ae0, 0x23ae8,
+		0x23af0, 0x23af0,
+		0x23af8, 0x23c18,
+		0x23c20, 0x23c20,
+		0x23c28, 0x23c30,
+		0x23c38, 0x23c38,
+		0x23c80, 0x23c98,
+		0x23ca0, 0x23ca8,
+		0x23cb0, 0x23cc8,
+		0x23cd0, 0x23cd4,
+		0x23ce0, 0x23ce8,
+		0x23cf0, 0x23cf0,
+		0x23cf8, 0x23d7c,
+		0x23e00, 0x23e04,
+		0x24000, 0x2402c,
+		0x24100, 0x2413c,
+		0x24190, 0x241a0,
+		0x241a8, 0x241b8,
+		0x241c4, 0x241c8,
+		0x24200, 0x24318,
+		0x24400, 0x244b4,
+		0x244c0, 0x24528,
+		0x24540, 0x24614,
+		0x25000, 0x25040,
+		0x2504c, 0x25060,
+		0x250c0, 0x250ec,
+		0x25200, 0x25268,
+		0x25270, 0x25284,
+		0x252fc, 0x25388,
+		0x25400, 0x25404,
+		0x25500, 0x25500,
+		0x25510, 0x25518,
+		0x2552c, 0x25530,
+		0x2553c, 0x2553c,
+		0x25550, 0x25554,
+		0x25600, 0x25600,
+		0x25608, 0x2561c,
+		0x25624, 0x25628,
+		0x25630, 0x25634,
+		0x2563c, 0x2563c,
+		0x25700, 0x2571c,
+		0x25780, 0x2578c,
+		0x25800, 0x25818,
+		0x25820, 0x25828,
+		0x25830, 0x25848,
+		0x25850, 0x25854,
+		0x25860, 0x25868,
+		0x25870, 0x25870,
+		0x25878, 0x25898,
+		0x258a0, 0x258a8,
+		0x258b0, 0x258c8,
+		0x258d0, 0x258d4,
+		0x258e0, 0x258e8,
+		0x258f0, 0x258f0,
+		0x258f8, 0x25a18,
+		0x25a20, 0x25a28,
+		0x25a30, 0x25a48,
+		0x25a50, 0x25a54,
+		0x25a60, 0x25a68,
+		0x25a70, 0x25a70,
+		0x25a78, 0x25a98,
+		0x25aa0, 0x25aa8,
+		0x25ab0, 0x25ac8,
+		0x25ad0, 0x25ad4,
+		0x25ae0, 0x25ae8,
+		0x25af0, 0x25af0,
+		0x25af8, 0x25c18,
+		0x25c20, 0x25c20,
+		0x25c28, 0x25c30,
+		0x25c38, 0x25c38,
+		0x25c80, 0x25c98,
+		0x25ca0, 0x25ca8,
+		0x25cb0, 0x25cc8,
+		0x25cd0, 0x25cd4,
+		0x25ce0, 0x25ce8,
+		0x25cf0, 0x25cf0,
+		0x25cf8, 0x25d7c,
+		0x25e00, 0x25e04,
+		0x26000, 0x2602c,
+		0x26100, 0x2613c,
+		0x26190, 0x261a0,
+		0x261a8, 0x261b8,
+		0x261c4, 0x261c8,
+		0x26200, 0x26318,
+		0x26400, 0x264b4,
+		0x264c0, 0x26528,
+		0x26540, 0x26614,
+		0x27000, 0x27040,
+		0x2704c, 0x27060,
+		0x270c0, 0x270ec,
+		0x27200, 0x27268,
+		0x27270, 0x27284,
+		0x272fc, 0x27388,
+		0x27400, 0x27404,
+		0x27500, 0x27500,
+		0x27510, 0x27518,
+		0x2752c, 0x27530,
+		0x2753c, 0x2753c,
+		0x27550, 0x27554,
+		0x27600, 0x27600,
+		0x27608, 0x2761c,
+		0x27624, 0x27628,
+		0x27630, 0x27634,
+		0x2763c, 0x2763c,
+		0x27700, 0x2771c,
+		0x27780, 0x2778c,
+		0x27800, 0x27818,
+		0x27820, 0x27828,
+		0x27830, 0x27848,
+		0x27850, 0x27854,
+		0x27860, 0x27868,
+		0x27870, 0x27870,
+		0x27878, 0x27898,
+		0x278a0, 0x278a8,
+		0x278b0, 0x278c8,
+		0x278d0, 0x278d4,
+		0x278e0, 0x278e8,
+		0x278f0, 0x278f0,
+		0x278f8, 0x27a18,
+		0x27a20, 0x27a28,
+		0x27a30, 0x27a48,
+		0x27a50, 0x27a54,
+		0x27a60, 0x27a68,
+		0x27a70, 0x27a70,
+		0x27a78, 0x27a98,
+		0x27aa0, 0x27aa8,
+		0x27ab0, 0x27ac8,
+		0x27ad0, 0x27ad4,
+		0x27ae0, 0x27ae8,
+		0x27af0, 0x27af0,
+		0x27af8, 0x27c18,
+		0x27c20, 0x27c20,
+		0x27c28, 0x27c30,
+		0x27c38, 0x27c38,
+		0x27c80, 0x27c98,
+		0x27ca0, 0x27ca8,
+		0x27cb0, 0x27cc8,
+		0x27cd0, 0x27cd4,
+		0x27ce0, 0x27ce8,
+		0x27cf0, 0x27cf0,
+		0x27cf8, 0x27d7c,
+		0x27e00, 0x27e04,
+	};
+
+	static const unsigned int t4vf_reg_ranges[] = {
+		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+		VF_MPS_REG(A_MPS_VF_CTL),
+		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+		FW_T4VF_MBDATA_BASE_ADDR,
+		FW_T4VF_MBDATA_BASE_ADDR +
+		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+	};
+
+	static const unsigned int t5_reg_ranges[] = {
+		0x1008, 0x10c0,
+		0x10cc, 0x10f8,
+		0x1100, 0x1100,
+		0x110c, 0x1148,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x123c,
+		0x1280, 0x173c,
+		0x1800, 0x18fc,
+		0x3000, 0x3028,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
+		0x30e0, 0x30fc,
+		0x3140, 0x357c,
+		0x35a8, 0x35cc,
+		0x35ec, 0x35ec,
+		0x3600, 0x5624,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
+		0x580c, 0x5814,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x59c8,
+		0x59d0, 0x59dc,
+		0x59fc, 0x5a18,
+		0x5a60, 0x5a70,
+		0x5a80, 0x5a9c,
+		0x5b94, 0x5bfc,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x614c,
+		0x7700, 0x7798,
+		0x77c0, 0x78fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d80,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8de0,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
+		0x8ea0, 0x8f84,
+		0x8fc0, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9400, 0x9408,
+		0x9410, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x96f4,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0xa020,
+		0xd004, 0xd004,
+		0xd010, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0x1106c,
+		0x11074, 0x11088,
+		0x1109c, 0x1117c,
+		0x11190, 0x11204,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x19290,
+		0x193f8, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
+		0x19490, 0x194cc,
+		0x194f0, 0x194f8,
+		0x19c00, 0x19c08,
+		0x19c10, 0x19c60,
+		0x19c94, 0x19ce4,
+		0x19cf0, 0x19d40,
+		0x19d50, 0x19d94,
+		0x19da0, 0x19de8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e90,
+		0x19ea0, 0x19f24,
+		0x19f34, 0x19f34,
+		0x19f40, 0x19f50,
+		0x19f90, 0x19fb4,
+		0x19fc4, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e008, 0x1e00c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
+		0x1e284, 0x1e290,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e408, 0x1e40c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
+		0x1e684, 0x1e690,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e808, 0x1e80c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
+		0x1ea84, 0x1ea90,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec08, 0x1ec0c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
+		0x1ee84, 0x1ee90,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f008, 0x1f00c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
+		0x1f284, 0x1f290,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f408, 0x1f40c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
+		0x1f684, 0x1f690,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f808, 0x1f80c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
+		0x1fa84, 0x1fa90,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc08, 0x1fc0c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
+		0x1fe84, 0x1fe90,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30100, 0x30144,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
+		0x30200, 0x30318,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
+		0x30540, 0x3061c,
+		0x30800, 0x30828,
+		0x30834, 0x30834,
+		0x308c0, 0x30908,
+		0x30910, 0x309ac,
+		0x30a00, 0x30a14,
+		0x30a1c, 0x30a2c,
+		0x30a44, 0x30a50,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d00,
+		0x30d08, 0x30d14,
+		0x30d1c, 0x30d20,
+		0x30d3c, 0x30d3c,
+		0x30d48, 0x30d50,
+		0x31200, 0x3120c,
+		0x31220, 0x31220,
+		0x31240, 0x31240,
+		0x31600, 0x3160c,
+		0x31a00, 0x31a1c,
+		0x31e00, 0x31e20,
+		0x31e38, 0x31e3c,
+		0x31e80, 0x31e80,
+		0x31e88, 0x31ea8,
+		0x31eb0, 0x31eb4,
+		0x31ec8, 0x31ed4,
+		0x31fb8, 0x32004,
+		0x32200, 0x32200,
+		0x32208, 0x32240,
+		0x32248, 0x32280,
+		0x32288, 0x322c0,
+		0x322c8, 0x322fc,
+		0x32600, 0x32630,
+		0x32a00, 0x32abc,
+		0x32b00, 0x32b10,
+		0x32b20, 0x32b30,
+		0x32b40, 0x32b50,
+		0x32b60, 0x32b70,
+		0x33000, 0x33028,
+		0x33030, 0x33048,
+		0x33060, 0x33068,
+		0x33070, 0x3309c,
+		0x330f0, 0x33128,
+		0x33130, 0x33148,
+		0x33160, 0x33168,
+		0x33170, 0x3319c,
+		0x331f0, 0x33238,
+		0x33240, 0x33240,
+		0x33248, 0x33250,
+		0x3325c, 0x33264,
+		0x33270, 0x332b8,
+		0x332c0, 0x332e4,
+		0x332f8, 0x33338,
+		0x33340, 0x33340,
+		0x33348, 0x33350,
+		0x3335c, 0x33364,
+		0x33370, 0x333b8,
+		0x333c0, 0x333e4,
+		0x333f8, 0x33428,
+		0x33430, 0x33448,
+		0x33460, 0x33468,
+		0x33470, 0x3349c,
+		0x334f0, 0x33528,
+		0x33530, 0x33548,
+		0x33560, 0x33568,
+		0x33570, 0x3359c,
+		0x335f0, 0x33638,
+		0x33640, 0x33640,
+		0x33648, 0x33650,
+		0x3365c, 0x33664,
+		0x33670, 0x336b8,
+		0x336c0, 0x336e4,
+		0x336f8, 0x33738,
+		0x33740, 0x33740,
+		0x33748, 0x33750,
+		0x3375c, 0x33764,
+		0x33770, 0x337b8,
+		0x337c0, 0x337e4,
+		0x337f8, 0x337fc,
+		0x33814, 0x33814,
+		0x3382c, 0x3382c,
+		0x33880, 0x3388c,
+		0x338e8, 0x338ec,
+		0x33900, 0x33928,
+		0x33930, 0x33948,
+		0x33960, 0x33968,
+		0x33970, 0x3399c,
+		0x339f0, 0x33a38,
+		0x33a40, 0x33a40,
+		0x33a48, 0x33a50,
+		0x33a5c, 0x33a64,
+		0x33a70, 0x33ab8,
+		0x33ac0, 0x33ae4,
+		0x33af8, 0x33b10,
+		0x33b28, 0x33b28,
+		0x33b3c, 0x33b50,
+		0x33bf0, 0x33c10,
+		0x33c28, 0x33c28,
+		0x33c3c, 0x33c50,
+		0x33cf0, 0x33cfc,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34100, 0x34144,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
+		0x34200, 0x34318,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
+		0x34540, 0x3461c,
+		0x34800, 0x34828,
+		0x34834, 0x34834,
+		0x348c0, 0x34908,
+		0x34910, 0x349ac,
+		0x34a00, 0x34a14,
+		0x34a1c, 0x34a2c,
+		0x34a44, 0x34a50,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d00,
+		0x34d08, 0x34d14,
+		0x34d1c, 0x34d20,
+		0x34d3c, 0x34d3c,
+		0x34d48, 0x34d50,
+		0x35200, 0x3520c,
+		0x35220, 0x35220,
+		0x35240, 0x35240,
+		0x35600, 0x3560c,
+		0x35a00, 0x35a1c,
+		0x35e00, 0x35e20,
+		0x35e38, 0x35e3c,
+		0x35e80, 0x35e80,
+		0x35e88, 0x35ea8,
+		0x35eb0, 0x35eb4,
+		0x35ec8, 0x35ed4,
+		0x35fb8, 0x36004,
+		0x36200, 0x36200,
+		0x36208, 0x36240,
+		0x36248, 0x36280,
+		0x36288, 0x362c0,
+		0x362c8, 0x362fc,
+		0x36600, 0x36630,
+		0x36a00, 0x36abc,
+		0x36b00, 0x36b10,
+		0x36b20, 0x36b30,
+		0x36b40, 0x36b50,
+		0x36b60, 0x36b70,
+		0x37000, 0x37028,
+		0x37030, 0x37048,
+		0x37060, 0x37068,
+		0x37070, 0x3709c,
+		0x370f0, 0x37128,
+		0x37130, 0x37148,
+		0x37160, 0x37168,
+		0x37170, 0x3719c,
+		0x371f0, 0x37238,
+		0x37240, 0x37240,
+		0x37248, 0x37250,
+		0x3725c, 0x37264,
+		0x37270, 0x372b8,
+		0x372c0, 0x372e4,
+		0x372f8, 0x37338,
+		0x37340, 0x37340,
+		0x37348, 0x37350,
+		0x3735c, 0x37364,
+		0x37370, 0x373b8,
+		0x373c0, 0x373e4,
+		0x373f8, 0x37428,
+		0x37430, 0x37448,
+		0x37460, 0x37468,
+		0x37470, 0x3749c,
+		0x374f0, 0x37528,
+		0x37530, 0x37548,
+		0x37560, 0x37568,
+		0x37570, 0x3759c,
+		0x375f0, 0x37638,
+		0x37640, 0x37640,
+		0x37648, 0x37650,
+		0x3765c, 0x37664,
+		0x37670, 0x376b8,
+		0x376c0, 0x376e4,
+		0x376f8, 0x37738,
+		0x37740, 0x37740,
+		0x37748, 0x37750,
+		0x3775c, 0x37764,
+		0x37770, 0x377b8,
+		0x377c0, 0x377e4,
+		0x377f8, 0x377fc,
+		0x37814, 0x37814,
+		0x3782c, 0x3782c,
+		0x37880, 0x3788c,
+		0x378e8, 0x378ec,
+		0x37900, 0x37928,
+		0x37930, 0x37948,
+		0x37960, 0x37968,
+		0x37970, 0x3799c,
+		0x379f0, 0x37a38,
+		0x37a40, 0x37a40,
+		0x37a48, 0x37a50,
+		0x37a5c, 0x37a64,
+		0x37a70, 0x37ab8,
+		0x37ac0, 0x37ae4,
+		0x37af8, 0x37b10,
+		0x37b28, 0x37b28,
+		0x37b3c, 0x37b50,
+		0x37bf0, 0x37c10,
+		0x37c28, 0x37c28,
+		0x37c3c, 0x37c50,
+		0x37cf0, 0x37cfc,
+		0x38000, 0x38030,
+		0x38038, 0x38038,
+		0x38040, 0x38040,
+		0x38100, 0x38144,
+		0x38190, 0x381a0,
+		0x381a8, 0x381b8,
+		0x381c4, 0x381c8,
+		0x381d0, 0x381d0,
+		0x38200, 0x38318,
+		0x38400, 0x384b4,
+		0x384c0, 0x3852c,
+		0x38540, 0x3861c,
+		0x38800, 0x38828,
+		0x38834, 0x38834,
+		0x388c0, 0x38908,
+		0x38910, 0x389ac,
+		0x38a00, 0x38a14,
+		0x38a1c, 0x38a2c,
+		0x38a44, 0x38a50,
+		0x38a74, 0x38a74,
+		0x38a7c, 0x38afc,
+		0x38b08, 0x38c24,
+		0x38d00, 0x38d00,
+		0x38d08, 0x38d14,
+		0x38d1c, 0x38d20,
+		0x38d3c, 0x38d3c,
+		0x38d48, 0x38d50,
+		0x39200, 0x3920c,
+		0x39220, 0x39220,
+		0x39240, 0x39240,
+		0x39600, 0x3960c,
+		0x39a00, 0x39a1c,
+		0x39e00, 0x39e20,
+		0x39e38, 0x39e3c,
+		0x39e80, 0x39e80,
+		0x39e88, 0x39ea8,
+		0x39eb0, 0x39eb4,
+		0x39ec8, 0x39ed4,
+		0x39fb8, 0x3a004,
+		0x3a200, 0x3a200,
+		0x3a208, 0x3a240,
+		0x3a248, 0x3a280,
+		0x3a288, 0x3a2c0,
+		0x3a2c8, 0x3a2fc,
+		0x3a600, 0x3a630,
+		0x3aa00, 0x3aabc,
+		0x3ab00, 0x3ab10,
+		0x3ab20, 0x3ab30,
+		0x3ab40, 0x3ab50,
+		0x3ab60, 0x3ab70,
+		0x3b000, 0x3b028,
+		0x3b030, 0x3b048,
+		0x3b060, 0x3b068,
+		0x3b070, 0x3b09c,
+		0x3b0f0, 0x3b128,
+		0x3b130, 0x3b148,
+		0x3b160, 0x3b168,
+		0x3b170, 0x3b19c,
+		0x3b1f0, 0x3b238,
+		0x3b240, 0x3b240,
+		0x3b248, 0x3b250,
+		0x3b25c, 0x3b264,
+		0x3b270, 0x3b2b8,
+		0x3b2c0, 0x3b2e4,
+		0x3b2f8, 0x3b338,
+		0x3b340, 0x3b340,
+		0x3b348, 0x3b350,
+		0x3b35c, 0x3b364,
+		0x3b370, 0x3b3b8,
+		0x3b3c0, 0x3b3e4,
+		0x3b3f8, 0x3b428,
+		0x3b430, 0x3b448,
+		0x3b460, 0x3b468,
+		0x3b470, 0x3b49c,
+		0x3b4f0, 0x3b528,
+		0x3b530, 0x3b548,
+		0x3b560, 0x3b568,
+		0x3b570, 0x3b59c,
+		0x3b5f0, 0x3b638,
+		0x3b640, 0x3b640,
+		0x3b648, 0x3b650,
+		0x3b65c, 0x3b664,
+		0x3b670, 0x3b6b8,
+		0x3b6c0, 0x3b6e4,
+		0x3b6f8, 0x3b738,
+		0x3b740, 0x3b740,
+		0x3b748, 0x3b750,
+		0x3b75c, 0x3b764,
+		0x3b770, 0x3b7b8,
+		0x3b7c0, 0x3b7e4,
+		0x3b7f8, 0x3b7fc,
+		0x3b814, 0x3b814,
+		0x3b82c, 0x3b82c,
+		0x3b880, 0x3b88c,
+		0x3b8e8, 0x3b8ec,
+		0x3b900, 0x3b928,
+		0x3b930, 0x3b948,
+		0x3b960, 0x3b968,
+		0x3b970, 0x3b99c,
+		0x3b9f0, 0x3ba38,
+		0x3ba40, 0x3ba40,
+		0x3ba48, 0x3ba50,
+		0x3ba5c, 0x3ba64,
+		0x3ba70, 0x3bab8,
+		0x3bac0, 0x3bae4,
+		0x3baf8, 0x3bb10,
+		0x3bb28, 0x3bb28,
+		0x3bb3c, 0x3bb50,
+		0x3bbf0, 0x3bc10,
+		0x3bc28, 0x3bc28,
+		0x3bc3c, 0x3bc50,
+		0x3bcf0, 0x3bcfc,
+		0x3c000, 0x3c030,
+		0x3c038, 0x3c038,
+		0x3c040, 0x3c040,
+		0x3c100, 0x3c144,
+		0x3c190, 0x3c1a0,
+		0x3c1a8, 0x3c1b8,
+		0x3c1c4, 0x3c1c8,
+		0x3c1d0, 0x3c1d0,
+		0x3c200, 0x3c318,
+		0x3c400, 0x3c4b4,
+		0x3c4c0, 0x3c52c,
+		0x3c540, 0x3c61c,
+		0x3c800, 0x3c828,
+		0x3c834, 0x3c834,
+		0x3c8c0, 0x3c908,
+		0x3c910, 0x3c9ac,
+		0x3ca00, 0x3ca14,
+		0x3ca1c, 0x3ca2c,
+		0x3ca44, 0x3ca50,
+		0x3ca74, 0x3ca74,
+		0x3ca7c, 0x3cafc,
+		0x3cb08, 0x3cc24,
+		0x3cd00, 0x3cd00,
+		0x3cd08, 0x3cd14,
+		0x3cd1c, 0x3cd20,
+		0x3cd3c, 0x3cd3c,
+		0x3cd48, 0x3cd50,
+		0x3d200, 0x3d20c,
+		0x3d220, 0x3d220,
+		0x3d240, 0x3d240,
+		0x3d600, 0x3d60c,
+		0x3da00, 0x3da1c,
+		0x3de00, 0x3de20,
+		0x3de38, 0x3de3c,
+		0x3de80, 0x3de80,
+		0x3de88, 0x3dea8,
+		0x3deb0, 0x3deb4,
+		0x3dec8, 0x3ded4,
+		0x3dfb8, 0x3e004,
+		0x3e200, 0x3e200,
+		0x3e208, 0x3e240,
+		0x3e248, 0x3e280,
+		0x3e288, 0x3e2c0,
+		0x3e2c8, 0x3e2fc,
+		0x3e600, 0x3e630,
+		0x3ea00, 0x3eabc,
+		0x3eb00, 0x3eb10,
+		0x3eb20, 0x3eb30,
+		0x3eb40, 0x3eb50,
+		0x3eb60, 0x3eb70,
+		0x3f000, 0x3f028,
+		0x3f030, 0x3f048,
+		0x3f060, 0x3f068,
+		0x3f070, 0x3f09c,
+		0x3f0f0, 0x3f128,
+		0x3f130, 0x3f148,
+		0x3f160, 0x3f168,
+		0x3f170, 0x3f19c,
+		0x3f1f0, 0x3f238,
+		0x3f240, 0x3f240,
+		0x3f248, 0x3f250,
+		0x3f25c, 0x3f264,
+		0x3f270, 0x3f2b8,
+		0x3f2c0, 0x3f2e4,
+		0x3f2f8, 0x3f338,
+		0x3f340, 0x3f340,
+		0x3f348, 0x3f350,
+		0x3f35c, 0x3f364,
+		0x3f370, 0x3f3b8,
+		0x3f3c0, 0x3f3e4,
+		0x3f3f8, 0x3f428,
+		0x3f430, 0x3f448,
+		0x3f460, 0x3f468,
+		0x3f470, 0x3f49c,
+		0x3f4f0, 0x3f528,
+		0x3f530, 0x3f548,
+		0x3f560, 0x3f568,
+		0x3f570, 0x3f59c,
+		0x3f5f0, 0x3f638,
+		0x3f640, 0x3f640,
+		0x3f648, 0x3f650,
+		0x3f65c, 0x3f664,
+		0x3f670, 0x3f6b8,
+		0x3f6c0, 0x3f6e4,
+		0x3f6f8, 0x3f738,
+		0x3f740, 0x3f740,
+		0x3f748, 0x3f750,
+		0x3f75c, 0x3f764,
+		0x3f770, 0x3f7b8,
+		0x3f7c0, 0x3f7e4,
+		0x3f7f8, 0x3f7fc,
+		0x3f814, 0x3f814,
+		0x3f82c, 0x3f82c,
+		0x3f880, 0x3f88c,
+		0x3f8e8, 0x3f8ec,
+		0x3f900, 0x3f928,
+		0x3f930, 0x3f948,
+		0x3f960, 0x3f968,
+		0x3f970, 0x3f99c,
+		0x3f9f0, 0x3fa38,
+		0x3fa40, 0x3fa40,
+		0x3fa48, 0x3fa50,
+		0x3fa5c, 0x3fa64,
+		0x3fa70, 0x3fab8,
+		0x3fac0, 0x3fae4,
+		0x3faf8, 0x3fb10,
+		0x3fb28, 0x3fb28,
+		0x3fb3c, 0x3fb50,
+		0x3fbf0, 0x3fc10,
+		0x3fc28, 0x3fc28,
+		0x3fc3c, 0x3fc50,
+		0x3fcf0, 0x3fcfc,
+		0x40000, 0x4000c,
+		0x40040, 0x40050,
+		0x40060, 0x40068,
+		0x4007c, 0x4008c,
+		0x40094, 0x400b0,
+		0x400c0, 0x40144,
+		0x40180, 0x4018c,
+		0x40200, 0x40254,
+		0x40260, 0x40264,
+		0x40270, 0x40288,
+		0x40290, 0x40298,
+		0x402ac, 0x402c8,
+		0x402d0, 0x402e0,
+		0x402f0, 0x402f0,
+		0x40300, 0x4033c,
+		0x403f8, 0x403fc,
+		0x41304, 0x413c4,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
+		0x41480, 0x414d0,
+		0x44000, 0x44054,
+		0x4405c, 0x44078,
+		0x440c0, 0x44174,
+		0x44180, 0x441ac,
+		0x441b4, 0x441b8,
+		0x441c0, 0x44254,
+		0x4425c, 0x44278,
+		0x442c0, 0x44374,
+		0x44380, 0x443ac,
+		0x443b4, 0x443b8,
+		0x443c0, 0x44454,
+		0x4445c, 0x44478,
+		0x444c0, 0x44574,
+		0x44580, 0x445ac,
+		0x445b4, 0x445b8,
+		0x445c0, 0x44654,
+		0x4465c, 0x44678,
+		0x446c0, 0x44774,
+		0x44780, 0x447ac,
+		0x447b4, 0x447b8,
+		0x447c0, 0x44854,
+		0x4485c, 0x44878,
+		0x448c0, 0x44974,
+		0x44980, 0x449ac,
+		0x449b4, 0x449b8,
+		0x449c0, 0x449fc,
+		0x45000, 0x45004,
+		0x45010, 0x45030,
+		0x45040, 0x45060,
+		0x45068, 0x45068,
+		0x45080, 0x45084,
+		0x450a0, 0x450b0,
+		0x45200, 0x45204,
+		0x45210, 0x45230,
+		0x45240, 0x45260,
+		0x45268, 0x45268,
+		0x45280, 0x45284,
+		0x452a0, 0x452b0,
+		0x460c0, 0x460e4,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
+		0x47200, 0x47250,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
+		0x47600, 0x47618,
+		0x47800, 0x47814,
+		0x48000, 0x4800c,
+		0x48040, 0x48050,
+		0x48060, 0x48068,
+		0x4807c, 0x4808c,
+		0x48094, 0x480b0,
+		0x480c0, 0x48144,
+		0x48180, 0x4818c,
+		0x48200, 0x48254,
+		0x48260, 0x48264,
+		0x48270, 0x48288,
+		0x48290, 0x48298,
+		0x482ac, 0x482c8,
+		0x482d0, 0x482e0,
+		0x482f0, 0x482f0,
+		0x48300, 0x4833c,
+		0x483f8, 0x483fc,
+		0x49304, 0x493c4,
+		0x49400, 0x4940c,
+		0x49414, 0x4941c,
+		0x49480, 0x494d0,
+		0x4c000, 0x4c054,
+		0x4c05c, 0x4c078,
+		0x4c0c0, 0x4c174,
+		0x4c180, 0x4c1ac,
+		0x4c1b4, 0x4c1b8,
+		0x4c1c0, 0x4c254,
+		0x4c25c, 0x4c278,
+		0x4c2c0, 0x4c374,
+		0x4c380, 0x4c3ac,
+		0x4c3b4, 0x4c3b8,
+		0x4c3c0, 0x4c454,
+		0x4c45c, 0x4c478,
+		0x4c4c0, 0x4c574,
+		0x4c580, 0x4c5ac,
+		0x4c5b4, 0x4c5b8,
+		0x4c5c0, 0x4c654,
+		0x4c65c, 0x4c678,
+		0x4c6c0, 0x4c774,
+		0x4c780, 0x4c7ac,
+		0x4c7b4, 0x4c7b8,
+		0x4c7c0, 0x4c854,
+		0x4c85c, 0x4c878,
+		0x4c8c0, 0x4c974,
+		0x4c980, 0x4c9ac,
+		0x4c9b4, 0x4c9b8,
+		0x4c9c0, 0x4c9fc,
+		0x4d000, 0x4d004,
+		0x4d010, 0x4d030,
+		0x4d040, 0x4d060,
+		0x4d068, 0x4d068,
+		0x4d080, 0x4d084,
+		0x4d0a0, 0x4d0b0,
+		0x4d200, 0x4d204,
+		0x4d210, 0x4d230,
+		0x4d240, 0x4d260,
+		0x4d268, 0x4d268,
+		0x4d280, 0x4d284,
+		0x4d2a0, 0x4d2b0,
+		0x4e0c0, 0x4e0e4,
+		0x4f000, 0x4f03c,
+		0x4f044, 0x4f08c,
+		0x4f200, 0x4f250,
+		0x4f400, 0x4f408,
+		0x4f414, 0x4f420,
+		0x4f600, 0x4f618,
+		0x4f800, 0x4f814,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50400, 0x50400,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50c00, 0x50c00,
+		0x51000, 0x5101c,
+		0x51300, 0x51308,
+	};
+
+	static const unsigned int t5vf_reg_ranges[] = {
+		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+		VF_MPS_REG(A_MPS_VF_CTL),
+		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+		FW_T4VF_MBDATA_BASE_ADDR,
+		FW_T4VF_MBDATA_BASE_ADDR +
+		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+	};
+
+	static const unsigned int t6_reg_ranges[] = {
+		0x1008, 0x101c,
+		0x1024, 0x10a8,
+		0x10b4, 0x10f8,
+		0x1100, 0x1114,
+		0x111c, 0x112c,
+		0x1138, 0x113c,
+		0x1144, 0x114c,
+		0x1180, 0x1184,
+		0x1190, 0x1194,
+		0x11a0, 0x11a4,
+		0x11b0, 0x11b4,
+		0x11fc, 0x1274,
+		0x1280, 0x133c,
+		0x1800, 0x18fc,
+		0x3000, 0x302c,
+		0x3060, 0x30b0,
+		0x30b8, 0x30d8,
+		0x30e0, 0x30fc,
+		0x3140, 0x357c,
+		0x35a8, 0x35cc,
+		0x35ec, 0x35ec,
+		0x3600, 0x5624,
+		0x56cc, 0x56ec,
+		0x56f4, 0x5720,
+		0x5728, 0x575c,
+		0x580c, 0x5814,
+		0x5890, 0x589c,
+		0x58a4, 0x58ac,
+		0x58b8, 0x58bc,
+		0x5940, 0x595c,
+		0x5980, 0x598c,
+		0x59b0, 0x59c8,
+		0x59d0, 0x59dc,
+		0x59fc, 0x5a18,
+		0x5a60, 0x5a6c,
+		0x5a80, 0x5a8c,
+		0x5a94, 0x5a9c,
+		0x5b94, 0x5bfc,
+		0x5c10, 0x5e48,
+		0x5e50, 0x5e94,
+		0x5ea0, 0x5eb0,
+		0x5ec0, 0x5ec0,
+		0x5ec8, 0x5ed0,
+		0x5ee0, 0x5ee0,
+		0x5ef0, 0x5ef0,
+		0x5f00, 0x5f00,
+		0x6000, 0x6020,
+		0x6028, 0x6040,
+		0x6058, 0x609c,
+		0x60a8, 0x619c,
+		0x7700, 0x7798,
+		0x77c0, 0x7880,
+		0x78cc, 0x78fc,
+		0x7b00, 0x7b58,
+		0x7b60, 0x7b84,
+		0x7b8c, 0x7c54,
+		0x7d00, 0x7d38,
+		0x7d40, 0x7d84,
+		0x7d8c, 0x7ddc,
+		0x7de4, 0x7e04,
+		0x7e10, 0x7e1c,
+		0x7e24, 0x7e38,
+		0x7e40, 0x7e44,
+		0x7e4c, 0x7e78,
+		0x7e80, 0x7edc,
+		0x7ee8, 0x7efc,
+		0x8dc0, 0x8de4,
+		0x8df8, 0x8e04,
+		0x8e10, 0x8e84,
+		0x8ea0, 0x8f88,
+		0x8fb8, 0x9058,
+		0x9060, 0x9060,
+		0x9068, 0x90f8,
+		0x9100, 0x9124,
+		0x9400, 0x9470,
+		0x9600, 0x9600,
+		0x9608, 0x9638,
+		0x9640, 0x9704,
+		0x9710, 0x971c,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0xa020,
+		0xd004, 0xd03c,
+		0xd100, 0xd118,
+		0xd200, 0xd214,
+		0xd220, 0xd234,
+		0xd240, 0xd254,
+		0xd260, 0xd274,
+		0xd280, 0xd294,
+		0xd2a0, 0xd2b4,
+		0xd2c0, 0xd2d4,
+		0xd2e0, 0xd2f4,
+		0xd300, 0xd31c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xf008,
+		0xf010, 0xf018,
+		0xf020, 0xf028,
+		0x11000, 0x11014,
+		0x11048, 0x1106c,
+		0x11074, 0x11088,
+		0x11098, 0x11120,
+		0x1112c, 0x1117c,
+		0x11190, 0x112e0,
+		0x11300, 0x1130c,
+		0x12000, 0x1206c,
+		0x19040, 0x1906c,
+		0x19078, 0x19080,
+		0x1908c, 0x190e8,
+		0x190f0, 0x190f8,
+		0x19100, 0x19110,
+		0x19120, 0x19124,
+		0x19150, 0x19194,
+		0x1919c, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x19290,
+		0x192a4, 0x192b0,
+		0x192bc, 0x192bc,
+		0x19348, 0x1934c,
+		0x193f8, 0x19418,
+		0x19420, 0x19428,
+		0x19430, 0x19444,
+		0x1944c, 0x1946c,
+		0x19474, 0x19474,
+		0x19490, 0x194cc,
+		0x194f0, 0x194f8,
+		0x19c00, 0x19c48,
+		0x19c50, 0x19c80,
+		0x19c94, 0x19c98,
+		0x19ca0, 0x19cbc,
+		0x19ce4, 0x19ce4,
+		0x19cf0, 0x19cf8,
+		0x19d00, 0x19d28,
+		0x19d50, 0x19d78,
+		0x19d94, 0x19d98,
+		0x19da0, 0x19dc8,
+		0x19df0, 0x19e10,
+		0x19e50, 0x19e6c,
+		0x19ea0, 0x19ebc,
+		0x19ec4, 0x19ef4,
+		0x19f04, 0x19f2c,
+		0x19f34, 0x19f34,
+		0x19f40, 0x19f50,
+		0x19f90, 0x19fac,
+		0x19fc4, 0x19fc8,
+		0x19fd0, 0x19fe4,
+		0x1a000, 0x1a004,
+		0x1a010, 0x1a06c,
+		0x1a0b0, 0x1a0e4,
+		0x1a0ec, 0x1a0f8,
+		0x1a100, 0x1a108,
+		0x1a114, 0x1a120,
+		0x1a128, 0x1a130,
+		0x1a138, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e008, 0x1e00c,
+		0x1e040, 0x1e044,
+		0x1e04c, 0x1e04c,
+		0x1e284, 0x1e290,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e408, 0x1e40c,
+		0x1e440, 0x1e444,
+		0x1e44c, 0x1e44c,
+		0x1e684, 0x1e690,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e808, 0x1e80c,
+		0x1e840, 0x1e844,
+		0x1e84c, 0x1e84c,
+		0x1ea84, 0x1ea90,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec08, 0x1ec0c,
+		0x1ec40, 0x1ec44,
+		0x1ec4c, 0x1ec4c,
+		0x1ee84, 0x1ee90,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f008, 0x1f00c,
+		0x1f040, 0x1f044,
+		0x1f04c, 0x1f04c,
+		0x1f284, 0x1f290,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f408, 0x1f40c,
+		0x1f440, 0x1f444,
+		0x1f44c, 0x1f44c,
+		0x1f684, 0x1f690,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f808, 0x1f80c,
+		0x1f840, 0x1f844,
+		0x1f84c, 0x1f84c,
+		0x1fa84, 0x1fa90,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc08, 0x1fc0c,
+		0x1fc40, 0x1fc44,
+		0x1fc4c, 0x1fc4c,
+		0x1fe84, 0x1fe90,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x30000, 0x30030,
+		0x30038, 0x30038,
+		0x30040, 0x30040,
+		0x30048, 0x30048,
+		0x30050, 0x30050,
+		0x3005c, 0x30060,
+		0x30068, 0x30068,
+		0x30070, 0x30070,
+		0x30100, 0x30168,
+		0x30190, 0x301a0,
+		0x301a8, 0x301b8,
+		0x301c4, 0x301c8,
+		0x301d0, 0x301d0,
+		0x30200, 0x30320,
+		0x30400, 0x304b4,
+		0x304c0, 0x3052c,
+		0x30540, 0x3061c,
+		0x30800, 0x308a0,
+		0x308c0, 0x30908,
+		0x30910, 0x309b8,
+		0x30a00, 0x30a04,
+		0x30a0c, 0x30a14,
+		0x30a1c, 0x30a2c,
+		0x30a44, 0x30a50,
+		0x30a74, 0x30a74,
+		0x30a7c, 0x30afc,
+		0x30b08, 0x30c24,
+		0x30d00, 0x30d14,
+		0x30d1c, 0x30d3c,
+		0x30d44, 0x30d4c,
+		0x30d54, 0x30d74,
+		0x30d7c, 0x30d7c,
+		0x30de0, 0x30de0,
+		0x30e00, 0x30ed4,
+		0x30f00, 0x30fa4,
+		0x30fc0, 0x30fc4,
+		0x31000, 0x31004,
+		0x31080, 0x310fc,
+		0x31208, 0x31220,
+		0x3123c, 0x31254,
+		0x31300, 0x31300,
+		0x31308, 0x3131c,
+		0x31338, 0x3133c,
+		0x31380, 0x31380,
+		0x31388, 0x313a8,
+		0x313b4, 0x313b4,
+		0x31400, 0x31420,
+		0x31438, 0x3143c,
+		0x31480, 0x31480,
+		0x314a8, 0x314a8,
+		0x314b0, 0x314b4,
+		0x314c8, 0x314d4,
+		0x31a40, 0x31a4c,
+		0x31af0, 0x31b20,
+		0x31b38, 0x31b3c,
+		0x31b80, 0x31b80,
+		0x31ba8, 0x31ba8,
+		0x31bb0, 0x31bb4,
+		0x31bc8, 0x31bd4,
+		0x32140, 0x3218c,
+		0x321f0, 0x321f4,
+		0x32200, 0x32200,
+		0x32218, 0x32218,
+		0x32400, 0x32400,
+		0x32408, 0x3241c,
+		0x32618, 0x32620,
+		0x32664, 0x32664,
+		0x326a8, 0x326a8,
+		0x326ec, 0x326ec,
+		0x32a00, 0x32abc,
+		0x32b00, 0x32b38,
+		0x32b40, 0x32b58,
+		0x32b60, 0x32b78,
+		0x32c00, 0x32c00,
+		0x32c08, 0x32c3c,
+		0x32e00, 0x32e2c,
+		0x32f00, 0x32f2c,
+		0x33000, 0x3302c,
+		0x33034, 0x33050,
+		0x33058, 0x33058,
+		0x33060, 0x3308c,
+		0x3309c, 0x330ac,
+		0x330c0, 0x330c0,
+		0x330c8, 0x330d0,
+		0x330d8, 0x330e0,
+		0x330ec, 0x3312c,
+		0x33134, 0x33150,
+		0x33158, 0x33158,
+		0x33160, 0x3318c,
+		0x3319c, 0x331ac,
+		0x331c0, 0x331c0,
+		0x331c8, 0x331d0,
+		0x331d8, 0x331e0,
+		0x331ec, 0x33290,
+		0x33298, 0x332c4,
+		0x332e4, 0x33390,
+		0x33398, 0x333c4,
+		0x333e4, 0x3342c,
+		0x33434, 0x33450,
+		0x33458, 0x33458,
+		0x33460, 0x3348c,
+		0x3349c, 0x334ac,
+		0x334c0, 0x334c0,
+		0x334c8, 0x334d0,
+		0x334d8, 0x334e0,
+		0x334ec, 0x3352c,
+		0x33534, 0x33550,
+		0x33558, 0x33558,
+		0x33560, 0x3358c,
+		0x3359c, 0x335ac,
+		0x335c0, 0x335c0,
+		0x335c8, 0x335d0,
+		0x335d8, 0x335e0,
+		0x335ec, 0x33690,
+		0x33698, 0x336c4,
+		0x336e4, 0x33790,
+		0x33798, 0x337c4,
+		0x337e4, 0x337fc,
+		0x33814, 0x33814,
+		0x33854, 0x33868,
+		0x33880, 0x3388c,
+		0x338c0, 0x338d0,
+		0x338e8, 0x338ec,
+		0x33900, 0x3392c,
+		0x33934, 0x33950,
+		0x33958, 0x33958,
+		0x33960, 0x3398c,
+		0x3399c, 0x339ac,
+		0x339c0, 0x339c0,
+		0x339c8, 0x339d0,
+		0x339d8, 0x339e0,
+		0x339ec, 0x33a90,
+		0x33a98, 0x33ac4,
+		0x33ae4, 0x33b10,
+		0x33b24, 0x33b28,
+		0x33b38, 0x33b50,
+		0x33bf0, 0x33c10,
+		0x33c24, 0x33c28,
+		0x33c38, 0x33c50,
+		0x33cf0, 0x33cfc,
+		0x34000, 0x34030,
+		0x34038, 0x34038,
+		0x34040, 0x34040,
+		0x34048, 0x34048,
+		0x34050, 0x34050,
+		0x3405c, 0x34060,
+		0x34068, 0x34068,
+		0x34070, 0x34070,
+		0x34100, 0x34168,
+		0x34190, 0x341a0,
+		0x341a8, 0x341b8,
+		0x341c4, 0x341c8,
+		0x341d0, 0x341d0,
+		0x34200, 0x34320,
+		0x34400, 0x344b4,
+		0x344c0, 0x3452c,
+		0x34540, 0x3461c,
+		0x34800, 0x348a0,
+		0x348c0, 0x34908,
+		0x34910, 0x349b8,
+		0x34a00, 0x34a04,
+		0x34a0c, 0x34a14,
+		0x34a1c, 0x34a2c,
+		0x34a44, 0x34a50,
+		0x34a74, 0x34a74,
+		0x34a7c, 0x34afc,
+		0x34b08, 0x34c24,
+		0x34d00, 0x34d14,
+		0x34d1c, 0x34d3c,
+		0x34d44, 0x34d4c,
+		0x34d54, 0x34d74,
+		0x34d7c, 0x34d7c,
+		0x34de0, 0x34de0,
+		0x34e00, 0x34ed4,
+		0x34f00, 0x34fa4,
+		0x34fc0, 0x34fc4,
+		0x35000, 0x35004,
+		0x35080, 0x350fc,
+		0x35208, 0x35220,
+		0x3523c, 0x35254,
+		0x35300, 0x35300,
+		0x35308, 0x3531c,
+		0x35338, 0x3533c,
+		0x35380, 0x35380,
+		0x35388, 0x353a8,
+		0x353b4, 0x353b4,
+		0x35400, 0x35420,
+		0x35438, 0x3543c,
+		0x35480, 0x35480,
+		0x354a8, 0x354a8,
+		0x354b0, 0x354b4,
+		0x354c8, 0x354d4,
+		0x35a40, 0x35a4c,
+		0x35af0, 0x35b20,
+		0x35b38, 0x35b3c,
+		0x35b80, 0x35b80,
+		0x35ba8, 0x35ba8,
+		0x35bb0, 0x35bb4,
+		0x35bc8, 0x35bd4,
+		0x36140, 0x3618c,
+		0x361f0, 0x361f4,
+		0x36200, 0x36200,
+		0x36218, 0x36218,
+		0x36400, 0x36400,
+		0x36408, 0x3641c,
+		0x36618, 0x36620,
+		0x36664, 0x36664,
+		0x366a8, 0x366a8,
+		0x366ec, 0x366ec,
+		0x36a00, 0x36abc,
+		0x36b00, 0x36b38,
+		0x36b40, 0x36b58,
+		0x36b60, 0x36b78,
+		0x36c00, 0x36c00,
+		0x36c08, 0x36c3c,
+		0x36e00, 0x36e2c,
+		0x36f00, 0x36f2c,
+		0x37000, 0x3702c,
+		0x37034, 0x37050,
+		0x37058, 0x37058,
+		0x37060, 0x3708c,
+		0x3709c, 0x370ac,
+		0x370c0, 0x370c0,
+		0x370c8, 0x370d0,
+		0x370d8, 0x370e0,
+		0x370ec, 0x3712c,
+		0x37134, 0x37150,
+		0x37158, 0x37158,
+		0x37160, 0x3718c,
+		0x3719c, 0x371ac,
+		0x371c0, 0x371c0,
+		0x371c8, 0x371d0,
+		0x371d8, 0x371e0,
+		0x371ec, 0x37290,
+		0x37298, 0x372c4,
+		0x372e4, 0x37390,
+		0x37398, 0x373c4,
+		0x373e4, 0x3742c,
+		0x37434, 0x37450,
+		0x37458, 0x37458,
+		0x37460, 0x3748c,
+		0x3749c, 0x374ac,
+		0x374c0, 0x374c0,
+		0x374c8, 0x374d0,
+		0x374d8, 0x374e0,
+		0x374ec, 0x3752c,
+		0x37534, 0x37550,
+		0x37558, 0x37558,
+		0x37560, 0x3758c,
+		0x3759c, 0x375ac,
+		0x375c0, 0x375c0,
+		0x375c8, 0x375d0,
+		0x375d8, 0x375e0,
+		0x375ec, 0x37690,
+		0x37698, 0x376c4,
+		0x376e4, 0x37790,
+		0x37798, 0x377c4,
+		0x377e4, 0x377fc,
+		0x37814, 0x37814,
+		0x37854, 0x37868,
+		0x37880, 0x3788c,
+		0x378c0, 0x378d0,
+		0x378e8, 0x378ec,
+		0x37900, 0x3792c,
+		0x37934, 0x37950,
+		0x37958, 0x37958,
+		0x37960, 0x3798c,
+		0x3799c, 0x379ac,
+		0x379c0, 0x379c0,
+		0x379c8, 0x379d0,
+		0x379d8, 0x379e0,
+		0x379ec, 0x37a90,
+		0x37a98, 0x37ac4,
+		0x37ae4, 0x37b10,
+		0x37b24, 0x37b28,
+		0x37b38, 0x37b50,
+		0x37bf0, 0x37c10,
+		0x37c24, 0x37c28,
+		0x37c38, 0x37c50,
+		0x37cf0, 0x37cfc,
+		0x40040, 0x40040,
+		0x40080, 0x40084,
+		0x40100, 0x40100,
+		0x40140, 0x401bc,
+		0x40200, 0x40214,
+		0x40228, 0x40228,
+		0x40240, 0x40258,
+		0x40280, 0x40280,
+		0x40304, 0x40304,
+		0x40330, 0x4033c,
+		0x41304, 0x413c8,
+		0x413d0, 0x413dc,
+		0x413f0, 0x413f0,
+		0x41400, 0x4140c,
+		0x41414, 0x4141c,
+		0x41480, 0x414d0,
+		0x44000, 0x4407c,
+		0x440c0, 0x441ac,
+		0x441b4, 0x4427c,
+		0x442c0, 0x443ac,
+		0x443b4, 0x4447c,
+		0x444c0, 0x445ac,
+		0x445b4, 0x4467c,
+		0x446c0, 0x447ac,
+		0x447b4, 0x4487c,
+		0x448c0, 0x449ac,
+		0x449b4, 0x44a7c,
+		0x44ac0, 0x44bac,
+		0x44bb4, 0x44c7c,
+		0x44cc0, 0x44dac,
+		0x44db4, 0x44e7c,
+		0x44ec0, 0x44fac,
+		0x44fb4, 0x4507c,
+		0x450c0, 0x451ac,
+		0x451b4, 0x451fc,
+		0x45800, 0x45804,
+		0x45810, 0x45830,
+		0x45840, 0x45860,
+		0x45868, 0x45868,
+		0x45880, 0x45884,
+		0x458a0, 0x458b0,
+		0x45a00, 0x45a04,
+		0x45a10, 0x45a30,
+		0x45a40, 0x45a60,
+		0x45a68, 0x45a68,
+		0x45a80, 0x45a84,
+		0x45aa0, 0x45ab0,
+		0x460c0, 0x460e4,
+		0x47000, 0x4703c,
+		0x47044, 0x4708c,
+		0x47200, 0x47250,
+		0x47400, 0x47408,
+		0x47414, 0x47420,
+		0x47600, 0x47618,
+		0x47800, 0x47814,
+		0x47820, 0x4782c,
+		0x50000, 0x50084,
+		0x50090, 0x500cc,
+		0x50300, 0x50384,
+		0x50400, 0x50400,
+		0x50800, 0x50884,
+		0x50890, 0x508cc,
+		0x50b00, 0x50b84,
+		0x50c00, 0x50c00,
+		0x51000, 0x51020,
+		0x51028, 0x510b0,
+		0x51300, 0x51324,
+	};
+
+	static const unsigned int t6vf_reg_ranges[] = {
+		VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+		VF_MPS_REG(A_MPS_VF_CTL),
+		VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+		VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+		VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+		FW_T6VF_MBDATA_BASE_ADDR,
+		FW_T6VF_MBDATA_BASE_ADDR +
+		((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+	};
+
+	u32 *buf_end = (u32 *)(buf + buf_size);
+	const unsigned int *reg_ranges;
+	int reg_ranges_size, range;
+	unsigned int chip_version = chip_id(adap);
+
+	/*
+	 * Select the right set of register ranges to dump depending on the
+	 * adapter chip type.
+	 */
+	switch (chip_version) {
+	case CHELSIO_T4:
+		if (adap->flags & IS_VF) {
+			reg_ranges = t4vf_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
+		} else {
+			reg_ranges = t4_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+		}
+		break;
+
+	case CHELSIO_T5:
+		if (adap->flags & IS_VF) {
+			reg_ranges = t5vf_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
+		} else {
+			reg_ranges = t5_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+		}
+		break;
+
+	case CHELSIO_T6:
+		if (adap->flags & IS_VF) {
+			reg_ranges = t6vf_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+		} else {
+			reg_ranges = t6_reg_ranges;
+			reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+		}
+		break;
+
+	default:
+		CH_ERR(adap,
+			"Unsupported chip version %d\n", chip_version);
+		return;
+	}
+
+	/*
+	 * Clear the register buffer and insert the appropriate register
+	 * values selected by the above register ranges.
+	 */
+	memset(buf, 0, buf_size);
+	for (range = 0; range < reg_ranges_size; range += 2) {
+		unsigned int reg = reg_ranges[range];
+		unsigned int last_reg = reg_ranges[range + 1];
+		u32 *bufp = (u32 *)(buf + reg);
+
+		/*
+		 * Iterate across the register range filling in the register
+		 * buffer but don't write past the end of the register buffer.
+		 */
+		while (reg <= last_reg && bufp < buf_end) {
+			*bufp++ = t4_read_reg(adap, reg);
+			reg += sizeof(u32);
+		}
+	}
+}
+
+/*
  * Partial EEPROM Vital Product Data structure.  Includes only the ID and
- * VPD-R header.
+ * VPD-R sections.
  */
 struct t4_vpd_hdr {
 	u8  id_tag;
@@ -459,14 +2694,66 @@
 /*
  * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
  */
-#define EEPROM_MAX_RD_POLL 40
-#define EEPROM_MAX_WR_POLL 6
-#define EEPROM_STAT_ADDR   0x7bfc
-#define VPD_BASE           0x400
-#define VPD_BASE_OLD       0
-#define VPD_LEN            512
+#define EEPROM_DELAY		10		/* 10us per poll spin */
+#define EEPROM_MAX_POLL		5000		/* x 5000 == 50ms */
+
+#define EEPROM_STAT_ADDR	0x7bfc
+#define VPD_BASE		0x400
+#define VPD_BASE_OLD		0
+#define VPD_LEN			1024
 #define VPD_INFO_FLD_HDR_SIZE	3
+#define CHELSIO_VPD_UNIQUE_ID	0x82
 
+/*
+ * Small utility function to wait till any outstanding VPD Access is complete.
+ * We have a per-adapter state variable "VPD Busy" to indicate when we have a
+ * VPD Access in flight.  This allows us to handle the problem of having a
+ * previous VPD Access time out and prevent an attempt to inject a new VPD
+ * Request before any in-flight VPD reguest has completed.
+ */
+static int t4_seeprom_wait(struct adapter *adapter)
+{
+	unsigned int base = adapter->params.pci.vpd_cap_addr;
+	int max_poll;
+
+	/*
+	 * If no VPD Access is in flight, we can just return success right
+	 * away.
+	 */
+	if (!adapter->vpd_busy)
+		return 0;
+
+	/*
+	 * Poll the VPD Capability Address/Flag register waiting for it
+	 * to indicate that the operation is complete.
+	 */
+	max_poll = EEPROM_MAX_POLL;
+	do {
+		u16 val;
+
+		udelay(EEPROM_DELAY);
+		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+
+		/*
+		 * If the operation is complete, mark the VPD as no longer
+		 * busy and return success.
+		 */
+		if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
+			adapter->vpd_busy = 0;
+			return 0;
+		}
+	} while (--max_poll);
+
+	/*
+	 * Failure!  Note that we leave the VPD Busy status set in order to
+	 * avoid pushing a new VPD Access request into the VPD Capability till
+	 * the current operation eventually succeeds.  It's a bug to issue a
+	 * new request when an existing request is in flight and will result
+	 * in corrupt hardware state.
+	 */
+	return -ETIMEDOUT;
+}
+
 /**
  *	t4_seeprom_read - read a serial EEPROM location
  *	@adapter: adapter to read
@@ -479,23 +2766,44 @@
  */
 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
 {
-	u16 val;
-	int attempts = EEPROM_MAX_RD_POLL;
 	unsigned int base = adapter->params.pci.vpd_cap_addr;
+	int ret;
 
+	/*
+	 * VPD Accesses must alway be 4-byte aligned!
+	 */
 	if (addr >= EEPROMVSIZE || (addr & 3))
 		return -EINVAL;
 
+	/*
+	 * Wait for any previous operation which may still be in flight to
+	 * complete.
+	 */
+	ret = t4_seeprom_wait(adapter);
+	if (ret) {
+		CH_ERR(adapter, "VPD still busy from previous operation\n");
+		return ret;
+	}
+
+	/*
+	 * Issue our new VPD Read request, mark the VPD as being busy and wait
+	 * for our request to complete.  If it doesn't complete, note the
+	 * error and return it to our caller.  Note that we do not reset the
+	 * VPD Busy status!
+	 */
 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
-	do {
-		udelay(10);
-		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
-	} while (!(val & PCI_VPD_ADDR_F) && --attempts);
+	adapter->vpd_busy = 1;
+	adapter->vpd_flag = PCI_VPD_ADDR_F;
+	ret = t4_seeprom_wait(adapter);
+	if (ret) {
+		CH_ERR(adapter, "VPD read of address %#x failed\n", addr);
+		return ret;
+	}
 
-	if (!(val & PCI_VPD_ADDR_F)) {
-		CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
-		return -EIO;
-	}
+	/*
+	 * Grab the returned data, swizzle it into our endianess and
+	 * return success.
+	 */
 	t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
 	*data = le32_to_cpu(*data);
 	return 0;
@@ -513,26 +2821,59 @@
  */
 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
 {
-	u16 val;
-	int attempts = EEPROM_MAX_WR_POLL;
 	unsigned int base = adapter->params.pci.vpd_cap_addr;
+	int ret;
+	u32 stats_reg;
+	int max_poll;
 
+	/*
+	 * VPD Accesses must alway be 4-byte aligned!
+	 */
 	if (addr >= EEPROMVSIZE || (addr & 3))
 		return -EINVAL;
 
+	/*
+	 * Wait for any previous operation which may still be in flight to
+	 * complete.
+	 */
+	ret = t4_seeprom_wait(adapter);
+	if (ret) {
+		CH_ERR(adapter, "VPD still busy from previous operation\n");
+		return ret;
+	}
+
+	/*
+	 * Issue our new VPD Read request, mark the VPD as being busy and wait
+	 * for our request to complete.  If it doesn't complete, note the
+	 * error and return it to our caller.  Note that we do not reset the
+	 * VPD Busy status!
+	 */
 	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
 				 cpu_to_le32(data));
 	t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
 				 (u16)addr | PCI_VPD_ADDR_F);
+	adapter->vpd_busy = 1;
+	adapter->vpd_flag = 0;
+	ret = t4_seeprom_wait(adapter);
+	if (ret) {
+		CH_ERR(adapter, "VPD write of address %#x failed\n", addr);
+		return ret;
+	}
+
+	/*
+	 * Reset PCI_VPD_DATA register after a transaction and wait for our
+	 * request to complete. If it doesn't complete, return error.
+	 */
+	t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
+	max_poll = EEPROM_MAX_POLL;
 	do {
-		msleep(1);
-		t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
-	} while ((val & PCI_VPD_ADDR_F) && --attempts);
+		udelay(EEPROM_DELAY);
+		t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
+	} while ((stats_reg & 0x1) && --max_poll);
+	if (!max_poll)
+		return -ETIMEDOUT;
 
-	if (val & PCI_VPD_ADDR_F) {
-		CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
-		return -EIO;
-	}
+	/* Return success! */
 	return 0;
 }
 
@@ -581,33 +2922,33 @@
  *	get_vpd_keyword_val - Locates an information field keyword in the VPD
  *	@v: Pointer to buffered vpd data structure
  *	@kw: The keyword to search for
- *	
+ *
  *	Returns the value of the information field keyword or
  *	-ENOENT otherwise.
  */
 static int get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
 {
-         int i;
-	 unsigned int offset , len;
-	 const u8 *buf = &v->id_tag;
-	 const u8 *vpdr_len = &v->vpdr_tag; 
-	 offset = sizeof(struct t4_vpd_hdr);
-	 len =  (u16)vpdr_len[1] + ((u16)vpdr_len[2] << 8);
-	 
-	 if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
-		 return -ENOENT;
-	 }
+	int i;
+	unsigned int offset , len;
+	const u8 *buf = (const u8 *)v;
+	const u8 *vpdr_len = &v->vpdr_len[0];
+	offset = sizeof(struct t4_vpd_hdr);
+	len =  (u16)vpdr_len[0] + ((u16)vpdr_len[1] << 8);
 
-         for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
-		 if(memcmp(buf + i , kw , 2) == 0){
-			 i += VPD_INFO_FLD_HDR_SIZE;
-                         return i;
-		  }
+	if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
+		return -ENOENT;
+	}
 
-                 i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
-         }
+	for (i = offset; i + VPD_INFO_FLD_HDR_SIZE <= offset + len;) {
+		if(memcmp(buf + i , kw , 2) == 0){
+			i += VPD_INFO_FLD_HDR_SIZE;
+			return i;
+		}
 
-         return -ENOENT;
+		i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+	}
+
+	return -ENOENT;
 }
 
 
@@ -615,14 +2956,16 @@
  *	get_vpd_params - read VPD parameters from VPD EEPROM
  *	@adapter: adapter to read
  *	@p: where to store the parameters
+ *	@vpd: caller provided temporary space to read the VPD into
  *
  *	Reads card parameters stored in VPD EEPROM.
  */
-static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
+    u8 *vpd)
 {
 	int i, ret, addr;
 	int ec, sn, pn, na;
-	u8 vpd[VPD_LEN], csum;
+	u8 csum;
 	const struct t4_vpd_hdr *v;
 
 	/*
@@ -630,15 +2973,25 @@
 	 * it at 0.
 	 */
 	ret = t4_seeprom_read(adapter, VPD_BASE, (u32 *)(vpd));
-	addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 
+	if (ret)
+		return (ret);
 
-	for (i = 0; i < sizeof(vpd); i += 4) {
+	/*
+	 * The VPD shall have a unique identifier specified by the PCI SIG.
+	 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
+	 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
+	 * is expected to automatically put this entry at the
+	 * beginning of the VPD.
+	 */
+	addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
+
+	for (i = 0; i < VPD_LEN; i += 4) {
 		ret = t4_seeprom_read(adapter, addr + i, (u32 *)(vpd + i));
 		if (ret)
 			return ret;
 	}
  	v = (const struct t4_vpd_hdr *)vpd;
-	
+
 #define FIND_VPD_KW(var,name) do { \
 	var = get_vpd_keyword_val(v , name); \
 	if (var < 0) { \
@@ -645,7 +2998,7 @@
 		CH_ERR(adapter, "missing VPD keyword " name "\n"); \
 		return -EINVAL; \
 	} \
-} while (0)	
+} while (0)
 
 	FIND_VPD_KW(i, "RV");
 	for (csum = 0; i >= 0; i--)
@@ -652,9 +3005,11 @@
 		csum += vpd[i];
 
 	if (csum) {
-		CH_ERR(adapter, "corrupted VPD EEPROM, actual csum %u\n", csum);
+		CH_ERR(adapter,
+			"corrupted VPD EEPROM, actual csum %u\n", csum);
 		return -EINVAL;
 	}
+
 	FIND_VPD_KW(ec, "EC");
 	FIND_VPD_KW(sn, "SN");
 	FIND_VPD_KW(pn, "PN");
@@ -668,8 +3023,10 @@
 	i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
 	memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
 	strstrip(p->sn);
+	i = vpd[pn - VPD_INFO_FLD_HDR_SIZE + 2];
 	memcpy(p->pn, vpd + pn, min(i, PN_LEN));
 	strstrip((char *)p->pn);
+	i = vpd[na - VPD_INFO_FLD_HDR_SIZE + 2];
 	memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
 	strstrip((char *)p->na);
 
@@ -678,16 +3035,16 @@
 
 /* serial flash and firmware constants and flash config file constants */
 enum {
-	SF_ATTEMPTS = 10,             /* max retries for SF operations */
+	SF_ATTEMPTS = 10,	/* max retries for SF operations */
 
 	/* flash command opcodes */
-	SF_PROG_PAGE    = 2,          /* program page */
-	SF_WR_DISABLE   = 4,          /* disable writes */
-	SF_RD_STATUS    = 5,          /* read status register */
-	SF_WR_ENABLE    = 6,          /* enable writes */
-	SF_RD_DATA_FAST = 0xb,        /* read flash */
-	SF_RD_ID        = 0x9f,       /* read ID */
-	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
+	SF_PROG_PAGE    = 2,	/* program page */
+	SF_WR_DISABLE   = 4,	/* disable writes */
+	SF_RD_STATUS    = 5,	/* read status register */
+	SF_WR_ENABLE    = 6,	/* enable writes */
+	SF_RD_DATA_FAST = 0xb,	/* read flash */
+	SF_RD_ID	= 0x9f,	/* read ID */
+	SF_ERASE_SECTOR = 0xd8,	/* erase sector */
 };
 
 /**
@@ -781,7 +3138,7 @@
  *	Read the specified number of 32-bit words from the serial flash.
  *	If @byte_oriented is set the read data is stored as a byte array
  *	(i.e., big-endian), otherwise as 32-bit words in the platform's
- *	natural endianess.
+ *	natural endianness.
  */
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
 		  unsigned int nwords, u32 *data, int byte_oriented)
@@ -804,7 +3161,7 @@
 		if (ret)
 			return ret;
 		if (byte_oriented)
-			*data = htonl(*data);
+			*data = (__force __u32)(cpu_to_be32(*data));
 	}
 	return 0;
 }
@@ -819,10 +3176,10 @@
  *
  *	Writes up to a page of data (256 bytes) to the serial flash starting
  *	at the given address.  All the data must be written to the same page.
- *	If @byte_oriented is set the write data is stored as byte stream 
+ *	If @byte_oriented is set the write data is stored as byte stream
  *	(i.e. matches what on disk), otherwise in big-endian.
  */
-static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+int t4_write_flash(struct adapter *adapter, unsigned int addr,
 			  unsigned int n, const u8 *data, int byte_oriented)
 {
 	int ret;
@@ -844,7 +3201,7 @@
 			val = (val << 8) + *data++;
 
 		if (!byte_oriented)
-			val = htonl(val);
+			val = cpu_to_be32(val);
 
 		ret = sf1_write(adapter, c, c != left, 1, val);
 		if (ret)
@@ -863,8 +3220,9 @@
 		return ret;
 
 	if (memcmp(data - n, (u8 *)buf + offset, n)) {
-		CH_ERR(adapter, "failed to correctly write the flash page "
-		       "at %#x\n", addr);
+		CH_ERR(adapter,
+			"failed to correctly write the flash page at %#x\n",
+			addr);
 		return -EIO;
 	}
 	return 0;
@@ -883,12 +3241,26 @@
  */
 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
 {
-	return t4_read_flash(adapter,
-			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver), 1,
+	return t4_read_flash(adapter, FLASH_FW_START +
+			     offsetof(struct fw_hdr, fw_ver), 1,
 			     vers, 0);
 }
 
 /**
+ *	t4_get_bs_version - read the firmware bootstrap version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+			     offsetof(struct fw_hdr, fw_ver), 1,
+			     vers, 0);
+}
+
+/**
  *	t4_get_tp_version - read the TP microcode version
  *	@adapter: the adapter
  *	@vers: where to place the version
@@ -897,48 +3269,153 @@
  */
 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
 {
-	return t4_read_flash(adapter, FLASH_FW_START + offsetof(struct fw_hdr,
-							      tp_microcode_ver),
+	return t4_read_flash(adapter, FLASH_FW_START +
+			     offsetof(struct fw_hdr, tp_microcode_ver),
 			     1, vers, 0);
 }
 
 /**
- *	t4_check_fw_version - check if the FW is compatible with this driver
+ *	t4_get_exprom_version - return the Expansion ROM version (if any)
  *	@adapter: the adapter
+ *	@vers: where to place the version
  *
- *	Checks if an adapter's FW is compatible with the driver.  Returns 0
- *	if there's exact match, a negative error if the version could not be
- *	read or there's a major version mismatch, and a positive value if the
- *	expected major version is found but there's a minor version mismatch.
+ *	Reads the Expansion ROM header from FLASH and returns the version
+ *	number (if present) through the @vers return value pointer.  We return
+ *	this in the Firmware Version Format since it's convenient.  Return
+ *	0 on success, -ENOENT if no Expansion ROM is present.
  */
-int t4_check_fw_version(struct adapter *adapter)
+int t4_get_exprom_version(struct adapter *adap, u32 *vers)
 {
-	int ret, major, minor, micro;
+	struct exprom_header {
+		unsigned char hdr_arr[16];	/* must start with 0x55aa */
+		unsigned char hdr_ver[4];	/* Expansion ROM version */
+	} *hdr;
+	u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+					   sizeof(u32))];
+	int ret;
 
-	ret = t4_get_fw_version(adapter, &adapter->params.fw_vers);
-	if (!ret)
-		ret = t4_get_tp_version(adapter, &adapter->params.tp_vers);
+	ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
+			    ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
+			    0);
 	if (ret)
 		return ret;
 
-	major = G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers);
-	minor = G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers);
-	micro = G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers);
+	hdr = (struct exprom_header *)exprom_header_buf;
+	if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+		return -ENOENT;
 
-	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
-		CH_ERR(adapter, "card FW has major version %u, driver wants "
-		       "%u\n", major, FW_VERSION_MAJOR);
-		return -EINVAL;
-	}
+	*vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
+		 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
+		 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
+		 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
+	return 0;
+}
 
-	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
-		return 0;                                   /* perfect match */
+/**
+ *	t4_get_scfg_version - return the Serial Configuration version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the Serial Configuration Version via the Firmware interface
+ *	(thus this can only be called once we're ready to issue Firmware
+ *	commands).  The format of the Serial Configuration version is
+ *	adapter specific.  Returns 0 on success, an error on failure.
+ *
+ *	Note that early versions of the Firmware didn't include the ability
+ *	to retrieve the Serial Configuration version, so we zero-out the
+ *	return-value parameter in that case to avoid leaving it with
+ *	garbage in it.
+ *
+ *	Also note that the Firmware will return its cached copy of the Serial
+ *	Initialization Revision ID, not the actual Revision ID as written in
+ *	the Serial EEPROM.  This is only an issue if a new VPD has been written
+ *	and the Firmware/Chip haven't yet gone through a RESET sequence.  So
+ *	it's best to defer calling this routine till after a FW_RESET_CMD has
+ *	been issued if the Host Driver will be performing a full adapter
+ *	initialization.
+ */
+int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
+{
+	u32 scfgrev_param;
+	int ret;
 
-	/* Minor/micro version mismatch.  Report it but often it's OK. */
-	return 1;
+	scfgrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_SCFGREV));
+	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+			      1, &scfgrev_param, vers);
+	if (ret)
+		*vers = 0;
+	return ret;
 }
 
 /**
+ *	t4_get_vpd_version - return the VPD version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the VPD via the Firmware interface (thus this can only be called
+ *	once we're ready to issue Firmware commands).  The format of the
+ *	VPD version is adapter specific.  Returns 0 on success, an error on
+ *	failure.
+ *
+ *	Note that early versions of the Firmware didn't include the ability
+ *	to retrieve the VPD version, so we zero-out the return-value parameter
+ *	in that case to avoid leaving it with garbage in it.
+ *
+ *	Also note that the Firmware will return its cached copy of the VPD
+ *	Revision ID, not the actual Revision ID as written in the Serial
+ *	EEPROM.  This is only an issue if a new VPD has been written and the
+ *	Firmware/Chip haven't yet gone through a RESET sequence.  So it's best
+ *	to defer calling this routine till after a FW_RESET_CMD has been issued
+ *	if the Host Driver will be performing a full adapter initialization.
+ */
+int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
+{
+	u32 vpdrev_param;
+	int ret;
+
+	vpdrev_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_VPDREV));
+	ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+			      1, &vpdrev_param, vers);
+	if (ret)
+		*vers = 0;
+	return ret;
+}
+
+/**
+ *	t4_get_version_info - extract various chip/firmware version information
+ *	@adapter: the adapter
+ *
+ *	Reads various chip/firmware version numbers and stores them into the
+ *	adapter Adapter Parameters structure.  If any of the efforts fails
+ *	the first failure will be returned, but all of the version numbers
+ *	will be read.
+ */
+int t4_get_version_info(struct adapter *adapter)
+{
+	int ret = 0;
+
+	#define FIRST_RET(__getvinfo) \
+	do { \
+		int __ret = __getvinfo; \
+		if (__ret && !ret) \
+			ret = __ret; \
+	} while (0)
+
+	FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
+	FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
+	FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
+	FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
+	FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
+	FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
+
+	#undef FIRST_RET
+
+	return ret;
+}
+
+/**
  *	t4_flash_erase_sectors - erase a range of flash sectors
  *	@adapter: the adapter
  *	@start: the first sector to erase
@@ -946,17 +3423,21 @@
  *
  *	Erases the sectors in the given inclusive range.
  */
-static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
 {
 	int ret = 0;
 
+	if (end >= adapter->params.sf_nsec)
+		return -EINVAL;
+
 	while (start <= end) {
 		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
 		    (ret = sf1_write(adapter, 4, 0, 1,
 				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
 		    (ret = flash_wait_op(adapter, 14, 500)) != 0) {
-			CH_ERR(adapter, "erase of flash sector %d failed, "
-			       "error %d\n", start, ret);
+			CH_ERR(adapter,
+				"erase of flash sector %d failed, error %d\n",
+				start, ret);
 			break;
 		}
 		start++;
@@ -970,73 +3451,45 @@
  *	@adapter: the adapter
  *
  *	Return the address within the flash where the Firmware Configuration
- *	File is stored.
+ *	File is stored, or an error if the device FLASH is too small to contain
+ *	a Firmware Configuration File.
  */
-unsigned int t4_flash_cfg_addr(struct adapter *adapter)
+int t4_flash_cfg_addr(struct adapter *adapter)
 {
-	if (adapter->params.sf_size == 0x100000)
-		return FLASH_FPGA_CFG_START;
-	else
-		return FLASH_CFG_START;
+	/*
+	 * If the device FLASH isn't large enough to hold a Firmware
+	 * Configuration File, return an error.
+	 */
+	if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+		return -ENOSPC;
+
+	return FLASH_CFG_START;
 }
 
-/**
- *	t4_load_cfg - download config file
- *	@adap: the adapter
- *	@cfg_data: the cfg text file to write
- *	@size: text file size
- *
- *	Write the supplied config text file to the card's serial flash.
+/*
+ * Return TRUE if the specified firmware matches the adapter.  I.e. T4
+ * firmware for T4 adapters, T5 firmware for T5 adapters, etc.  We go ahead
+ * and emit an error message for mismatched firmware to save our caller the
+ * effort ...
  */
-int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+static int t4_fw_matches_chip(struct adapter *adap,
+			      const struct fw_hdr *hdr)
 {
-	int ret, i, n;
-	unsigned int addr;
-	unsigned int flash_cfg_start_sec;
-	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
-	addr = t4_flash_cfg_addr(adap);
-	flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
-	if (size > FLASH_CFG_MAX_SIZE) {
-		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
-		       FLASH_CFG_MAX_SIZE);
-		return -EFBIG;
-	}
-
-	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
-			 sf_sec_size);
-	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
-				     flash_cfg_start_sec + i - 1);
 	/*
-	 * If size == 0 then we're simply erasing the FLASH sectors associated
-	 * with the on-adapter Firmware Configuration File.
+	 * The expression below will return FALSE for any unsupported adapter
+	 * which will keep us "honest" in the future ...
 	 */
-	if (ret || size == 0)
-		goto out;
+	if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
+	    (is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
+	    (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
+		return 1;
 
-	/* this will write to the flash up to SF_PAGE_SIZE at a time */
-	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
-		if ( (size - i) <  SF_PAGE_SIZE) 
-			n = size - i;
-		else 
-			n = SF_PAGE_SIZE;
-		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
-		if (ret)
-			goto out;
-		
-		addr += SF_PAGE_SIZE;
-		cfg_data += SF_PAGE_SIZE;
-	} 
-                
-out:
-	if (ret)
-		CH_ERR(adap, "config file %s failed %d\n",
-		       (size == 0 ? "clear" : "download"), ret);
-	return ret;
+	CH_ERR(adap,
+		"FW image (%d) is not suitable for this adapter (%d)\n",
+		hdr->chip, chip_id(adap));
+	return 0;
 }
 
-
 /**
  *	t4_load_fw - download firmware
  *	@adap: the adapter
@@ -1054,37 +3507,53 @@
 	const u32 *p = (const u32 *)fw_data;
 	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
 	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+	unsigned int fw_start_sec;
+	unsigned int fw_start;
+	unsigned int fw_size;
 
+	if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
+		fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
+		fw_start = FLASH_FWBOOTSTRAP_START;
+		fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
+	} else {
+		fw_start_sec = FLASH_FW_START_SEC;
+ 		fw_start = FLASH_FW_START;
+		fw_size = FLASH_FW_MAX_SIZE;
+	}
+
 	if (!size) {
 		CH_ERR(adap, "FW image has no data\n");
 		return -EINVAL;
 	}
 	if (size & 511) {
-		CH_ERR(adap, "FW image size not multiple of 512 bytes\n");
+		CH_ERR(adap,
+			"FW image size not multiple of 512 bytes\n");
 		return -EINVAL;
 	}
-	if (ntohs(hdr->len512) * 512 != size) {
-		CH_ERR(adap, "FW image size differs from size in FW header\n");
+	if ((unsigned int) be16_to_cpu(hdr->len512) * 512 != size) {
+		CH_ERR(adap,
+			"FW image size differs from size in FW header\n");
 		return -EINVAL;
 	}
-	if (size > FLASH_FW_MAX_SIZE) {
+	if (size > fw_size) {
 		CH_ERR(adap, "FW image too large, max is %u bytes\n",
-		       FLASH_FW_MAX_SIZE);
+			fw_size);
 		return -EFBIG;
 	}
+	if (!t4_fw_matches_chip(adap, hdr))
+		return -EINVAL;
 
 	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
-		csum += ntohl(p[i]);
+		csum += be32_to_cpu(p[i]);
 
 	if (csum != 0xffffffff) {
-		CH_ERR(adap, "corrupted firmware image, checksum %#x\n",
-		       csum);
+		CH_ERR(adap,
+			"corrupted firmware image, checksum %#x\n", csum);
 		return -EINVAL;
 	}
 
-	i = DIV_ROUND_UP(size, sf_sec_size);        /* # of sectors spanned */
-	ret = t4_flash_erase_sectors(adap, FLASH_FW_START_SEC,
-	    FLASH_FW_START_SEC + i - 1);
+	i = DIV_ROUND_UP(size, sf_sec_size);	/* # of sectors spanned */
+	ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
 	if (ret)
 		goto out;
 
@@ -1094,12 +3563,12 @@
 	 * first page with a bad version.
 	 */
 	memcpy(first_page, fw_data, SF_PAGE_SIZE);
-	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
-	ret = t4_write_flash(adap, FLASH_FW_START, SF_PAGE_SIZE, first_page, 1);
+	((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
+	ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, 1);
 	if (ret)
 		goto out;
 
-	addr = FLASH_FW_START;
+	addr = fw_start;
 	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
 		addr += SF_PAGE_SIZE;
 		fw_data += SF_PAGE_SIZE;
@@ -1109,541 +3578,39 @@
 	}
 
 	ret = t4_write_flash(adap,
-			     FLASH_FW_START + offsetof(struct fw_hdr, fw_ver),
+			     fw_start + offsetof(struct fw_hdr, fw_ver),
 			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver, 1);
 out:
 	if (ret)
-		CH_ERR(adap, "firmware download failed, error %d\n", ret);
+		CH_ERR(adap, "firmware download failed, error %d\n",
+			ret);
 	return ret;
 }
 
-/* BIOS boot headers */
-typedef struct pci_expansion_rom_header {
-	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
-	u8	reserved[22]; /* Reserved per processor Architecture data */
-	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
-} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
-
-/* Legacy PCI Expansion ROM Header */
-typedef struct legacy_pci_expansion_rom_header {
-	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
-	u8	size512; /* Current Image Size in units of 512 bytes */
-	u8	initentry_point[4];
-	u8	cksum; /* Checksum computed on the entire Image */
-	u8	reserved[16]; /* Reserved */
-	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
-} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
-
-/* EFI PCI Expansion ROM Header */
-typedef struct efi_pci_expansion_rom_header {
-	u8	signature[2]; // ROM signature. The value 0xaa55
-	u8	initialization_size[2]; /* Units 512. Includes this header */
-	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
-	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
-	u8	efi_machine_type[2]; /* Machine type from EFI image header */
-	u8	compression_type[2]; /* Compression type. */
-		/* 
-		 * Compression type definition
-		 * 0x0: uncompressed
-		 * 0x1: Compressed
-		 * 0x2-0xFFFF: Reserved
-		 */
-	u8	reserved[8]; /* Reserved */
-	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
-	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
-} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
-
-/* PCI Data Structure Format */
-typedef struct pcir_data_structure { /* PCI Data Structure */
-	u8	signature[4]; /* Signature. The string "PCIR" */
-	u8	vendor_id[2]; /* Vendor Identification */
-	u8	device_id[2]; /* Device Identification */
-	u8	vital_product[2]; /* Pointer to Vital Product Data */
-	u8	length[2]; /* PCIR Data Structure Length */
-	u8	revision; /* PCIR Data Structure Revision */
-	u8	class_code[3]; /* Class Code */
-	u8	image_length[2]; /* Image Length. Multiple of 512B */
-	u8	code_revision[2]; /* Revision Level of Code/Data */
-	u8	code_type; /* Code Type. */
-		/*
-		 * PCI Expansion ROM Code Types
-		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
-		 * 0x01: Open Firmware standard for PCI. FCODE
-		 * 0x02: Hewlett-Packard PA RISC. HP reserved
-		 * 0x03: EFI Image. EFI
-		 * 0x04-0xFF: Reserved.
-		 */
-	u8	indicator; /* Indicator. Identifies the last image in the ROM */
-	u8	reserved[2]; /* Reserved */
-} pcir_data_t; /* PCI__DATA_STRUCTURE */
-
-/* BOOT constants */
-enum {
-	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
-	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
-	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
-	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
-	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
-	VENDOR_ID = 0x1425, /* Vendor ID */
-	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
-};
-
-/*
- *	modify_device_id - Modifies the device ID of the Boot BIOS image 
- *	@adatper: the device ID to write.
- *	@boot_data: the boot image to modify.
- *
- *	Write the supplied device ID to the boot BIOS image.
- */
-static void modify_device_id(int device_id, u8 *boot_data)
-{
-	legacy_pci_exp_rom_header_t *header;
-	pcir_data_t *pcir_header;
-	u32 cur_header = 0;
-
-	/*
-	 * Loop through all chained images and change the device ID's
-	 */
-	while (1) {
-		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
-		pcir_header = (pcir_data_t *) &boot_data[cur_header +
-		    le16_to_cpu(*(u16*)header->pcir_offset)];
-
-		/*
-		 * Only modify the Device ID if code type is Legacy or HP.
-		 * 0x00: Okay to modify
-		 * 0x01: FCODE. Do not be modify
-		 * 0x03: Okay to modify
-		 * 0x04-0xFF: Do not modify
-		 */
-		if (pcir_header->code_type == 0x00) {
-			u8 csum = 0;
-			int i;
-
-			/*
-			 * Modify Device ID to match current adatper
-			 */
-			*(u16*) pcir_header->device_id = device_id;
-
-			/*
-			 * Set checksum temporarily to 0.
-			 * We will recalculate it later.
-			 */
-			header->cksum = 0x0;
-
-			/*
-			 * Calculate and update checksum
-			 */
-			for (i = 0; i < (header->size512 * 512); i++)
-				csum += (u8)boot_data[cur_header + i];
-
-			/*
-			 * Invert summed value to create the checksum
-			 * Writing new checksum value directly to the boot data
-			 */
-			boot_data[cur_header + 7] = -csum;
-
-		} else if (pcir_header->code_type == 0x03) {
-
-			/*
-			 * Modify Device ID to match current adatper
-			 */
-			*(u16*) pcir_header->device_id = device_id;
-
-		}
-
-
-		/*
-		 * Check indicator element to identify if this is the last
-		 * image in the ROM.
-		 */
-		if (pcir_header->indicator & 0x80)
-			break;
-
-		/*
-		 * Move header pointer up to the next image in the ROM.
-		 */
-		cur_header += header->size512 * 512;
-	}
-}
-
-/*
- *	t4_load_boot - download boot flash
- *	@adapter: the adapter
- *	@boot_data: the boot image to write
- *	@boot_addr: offset in flash to write boot_data
- *	@size: image size
- *
- *	Write the supplied boot image to the card's serial flash.
- *	The boot image has the following sections: a 28-byte header and the
- *	boot image.
- */
-int t4_load_boot(struct adapter *adap, u8 *boot_data, 
-		 unsigned int boot_addr, unsigned int size)
-{
-	pci_exp_rom_header_t *header;
-	int pcir_offset ;
-	pcir_data_t *pcir_header;
-	int ret, addr;
-	uint16_t device_id;
-	unsigned int i;
-	unsigned int boot_sector = boot_addr * 1024;
-	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
-
-	/*
-	 * Make sure the boot image does not encroach on the firmware region
-	 */
-	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
-		CH_ERR(adap, "boot image encroaching on firmware region\n");
-		return -EFBIG;
-	}
-
-	/*
-	 * Number of sectors spanned
-	 */
-	i = DIV_ROUND_UP(size ? size : FLASH_BOOTCFG_MAX_SIZE,
-			sf_sec_size);
-	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
-				     (boot_sector >> 16) + i - 1);
-
-	/*
-	 * If size == 0 then we're simply erasing the FLASH sectors associated
-	 * with the on-adapter option ROM file
-	 */
-	if (ret || (size == 0))
-		goto out;
-
-	/* Get boot header */
-	header = (pci_exp_rom_header_t *)boot_data;
-	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
-	/* PCIR Data Structure */
-	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
-
-	/*
-	 * Perform some primitive sanity testing to avoid accidentally
-	 * writing garbage over the boot sectors.  We ought to check for
-	 * more but it's not worth it for now ...
-	 */
-	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
-		CH_ERR(adap, "boot image too small/large\n");
-		return -EFBIG;
-	}
-
-	/*
-	 * Check BOOT ROM header signature
-	 */
-	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
-		CH_ERR(adap, "Boot image missing signature\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Check PCI header signature
-	 */
-	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
-		CH_ERR(adap, "PCI header missing signature\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Check Vendor ID matches Chelsio ID
-	 */
-	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
-		CH_ERR(adap, "Vendor ID missing signature\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * Retrieve adapter's device ID
-	 */
-	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
-	/* Want to deal with PF 0 so I strip off PF 4 indicator */
-	device_id = (device_id & 0xff) | 0x4000;
-
-	/*
-	 * Check PCIE Device ID
-	 */
-	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
-		/*
-		 * Change the device ID in the Boot BIOS image to match
-		 * the Device ID of the current adapter.
-		 */
-		modify_device_id(device_id, boot_data);
-	}
-
-	/*
-	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
-	 * we finish copying the rest of the boot image. This will ensure
-	 * that the BIOS boot header will only be written if the boot image
-	 * was written in full.
-	 */
-	addr = boot_sector;
-	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
-		addr += SF_PAGE_SIZE; 
-		boot_data += SF_PAGE_SIZE;
-		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
-		if (ret)
-			goto out;
-	}
-
-	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, boot_data, 0);
-
-out:
-	if (ret)
-		CH_ERR(adap, "boot image download failed, error %d\n", ret);
-	return ret;
-}
-
 /**
- *	t4_read_cimq_cfg - read CIM queue configuration
+ *	t4_fwcache - firmware cache operation
  *	@adap: the adapter
- *	@base: holds the queue base addresses in bytes
- *	@size: holds the queue sizes in bytes
- *	@thres: holds the queue full thresholds in bytes
- *
- *	Returns the current configuration of the CIM queues, starting with
- *	the IBQs, then the OBQs.
+ *	@op  : the operation (flush or flush and invalidate)
  */
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
 {
-	unsigned int i, v;
+	struct fw_params_cmd c;
 
-	for (i = 0; i < CIM_NUM_IBQ; i++) {
-		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
-			     V_QUENUMSELECT(i));
-		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
-		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
-		*thres++ = G_QUEFULLTHRSH(v) * 8;   /* 8-byte unit */
-	}
-	for (i = 0; i < CIM_NUM_OBQ; i++) {
-		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
-			     V_QUENUMSELECT(i));
-		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-		*base++ = G_CIMQBASE(v) * 256; /* value is in 256-byte units */
-		*size++ = G_CIMQSIZE(v) * 256; /* value is in 256-byte units */
-	}
-}
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn =
+	    cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+			    F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				V_FW_PARAMS_CMD_PFN(adap->pf) |
+				V_FW_PARAMS_CMD_VFN(0));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.param[0].mnem =
+	    cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+			    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
+	c.param[0].val = (__force __be32)op;
 
-/**
- *	t4_read_cim_ibq - read the contents of a CIM inbound queue
- *	@adap: the adapter
- *	@qid: the queue index
- *	@data: where to store the queue contents
- *	@n: capacity of @data in 32-bit words
- *
- *	Reads the contents of the selected CIM queue starting at address 0 up
- *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
- *	error and the number of 32-bit words actually read on success.
- */
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
-{
-	int i, err;
-	unsigned int addr;
-	const unsigned int nwords = CIM_IBQ_SIZE * 4;
-
-	if (qid > 5 || (n & 3))
-		return -EINVAL;
-
-	addr = qid * nwords;
-	if (n > nwords)
-		n = nwords;
-
-	for (i = 0; i < n; i++, addr++) {
-		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
-			     F_IBQDBGEN);
-		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
-				      2, 1);
-		if (err)
-			return err;
-		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
-	}
-	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
-	return i;
+	return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
 }
 
-/**
- *	t4_read_cim_obq - read the contents of a CIM outbound queue
- *	@adap: the adapter
- *	@qid: the queue index
- *	@data: where to store the queue contents
- *	@n: capacity of @data in 32-bit words
- *
- *	Reads the contents of the selected CIM queue starting at address 0 up
- *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
- *	error and the number of 32-bit words actually read on success.
- */
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
-{
-	int i, err;
-	unsigned int addr, v, nwords;
-
-	if (qid > 5 || (n & 3))
-		return -EINVAL;
-
-	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
-		     V_QUENUMSELECT(qid));
-	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-
-	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
-	nwords = G_CIMQSIZE(v) * 64;  /* same */
-	if (n > nwords)
-		n = nwords;
-
-	for (i = 0; i < n; i++, addr++) {
-		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
-			     F_OBQDBGEN);
-		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
-				      2, 1);
-		if (err)
-			return err;
-		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
-	}
-	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
-	return i;
-}
-
-enum {
-	CIM_QCTL_BASE     = 0,
-	CIM_CTL_BASE      = 0x2000,
-	CIM_PBT_ADDR_BASE = 0x2800,
-	CIM_PBT_LRF_BASE  = 0x3000,
-	CIM_PBT_DATA_BASE = 0x3800
-};
-
-/**
- *	t4_cim_read - read a block from CIM internal address space
- *	@adap: the adapter
- *	@addr: the start address within the CIM address space
- *	@n: number of words to read
- *	@valp: where to store the result
- *
- *	Reads a block of 4-byte words from the CIM intenal address space.
- */
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
-		unsigned int *valp)
-{
-	int ret = 0;
-
-	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
-		return -EBUSY;
-
-	for ( ; !ret && n--; addr += 4) {
-		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
-		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
-				      0, 5, 2);
-		if (!ret)
-			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
-	}
-	return ret;
-}
-
-/**
- *	t4_cim_write - write a block into CIM internal address space
- *	@adap: the adapter
- *	@addr: the start address within the CIM address space
- *	@n: number of words to write
- *	@valp: set of values to write
- *
- *	Writes a block of 4-byte words into the CIM intenal address space.
- */
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
-		 const unsigned int *valp)
-{
-	int ret = 0;
-
-	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
-		return -EBUSY;
-
-	for ( ; !ret && n--; addr += 4) {
-		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
-		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
-		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
-				      0, 5, 2);
-	}
-	return ret;
-}
-
-static int t4_cim_write1(struct adapter *adap, unsigned int addr, unsigned int val)
-{
-	return t4_cim_write(adap, addr, 1, &val);
-}
-
-/**
- *	t4_cim_ctl_read - read a block from CIM control region
- *	@adap: the adapter
- *	@addr: the start address within the CIM control region
- *	@n: number of words to read
- *	@valp: where to store the result
- *
- *	Reads a block of 4-byte words from the CIM control region.
- */
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
-		    unsigned int *valp)
-{
-	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
-}
-
-/**
- *	t4_cim_read_la - read CIM LA capture buffer
- *	@adap: the adapter
- *	@la_buf: where to store the LA data
- *	@wrptr: the HW write pointer within the capture buffer
- *
- *	Reads the contents of the CIM LA buffer with the most recent entry at
- *	the end	of the returned data and with the entry at @wrptr first.
- *	We try to leave the LA in the running state we find it in.
- */
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
-{
-	int i, ret;
-	unsigned int cfg, val, idx;
-
-	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
-	if (ret)
-		return ret;
-
-	if (cfg & F_UPDBGLAEN) {                /* LA is running, freeze it */
-		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
-		if (ret)
-			return ret;
-	}
-
-	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
-	if (ret)
-		goto restart;
-
-	idx = G_UPDBGLAWRPTR(val);
-	if (wrptr)
-		*wrptr = idx;
-
-	for (i = 0; i < adap->params.cim_la_size; i++) {
-		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
-				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
-		if (ret)
-			break;
-		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
-		if (ret)
-			break;
-		if (val & F_UPDBGLARDEN) {
-			ret = -ETIMEDOUT;
-			break;
-		}
-		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
-		if (ret)
-			break;
-		idx = (idx + 1) & M_UPDBGLARDPTR;
-	}
-restart:
-	if (cfg & F_UPDBGLAEN) {
-		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
-				      cfg & ~F_UPDBGLARDEN);
-		if (!ret)
-			ret = r;
-	}
-	return ret;
-}
-
 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
 			unsigned int *pif_req_wrptr,
 			unsigned int *pif_rsp_wrptr)
@@ -1699,53 +3666,6 @@
 	t4_write_reg(adap, A_CIM_DEBUGCFG, cfg);
 }
 
-/**
- *	t4_tp_read_la - read TP LA capture buffer
- *	@adap: the adapter
- *	@la_buf: where to store the LA data
- *	@wrptr: the HW write pointer within the capture buffer
- *
- *	Reads the contents of the TP LA buffer with the most recent entry at
- *	the end	of the returned data and with the entry at @wrptr first.
- *	We leave the LA in the running state we find it in.
- */
-void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
-{
-	bool last_incomplete;
-	unsigned int i, cfg, val, idx;
-
-	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
-	if (cfg & F_DBGLAENABLE)                    /* freeze LA */
-		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
-			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
-
-	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
-	idx = G_DBGLAWPTR(val);
-	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
-	if (last_incomplete)
-		idx = (idx + 1) & M_DBGLARPTR;
-	if (wrptr)
-		*wrptr = idx;
-
-	val &= 0xffff;
-	val &= ~V_DBGLARPTR(M_DBGLARPTR);
-	val |= adap->params.tp.la_mask;
-
-	for (i = 0; i < TPLA_SIZE; i++) {
-		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
-		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
-		idx = (idx + 1) & M_DBGLARPTR;
-	}
-
-	/* Wipe out last entry if it isn't valid */
-	if (last_incomplete)
-		la_buf[TPLA_SIZE - 1] = ~0ULL;
-
-	if (cfg & F_DBGLAENABLE)                    /* restore running state */
-		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
-			     cfg | adap->params.tp.la_mask);
-}
-
 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
 {
 	unsigned int i, j;
@@ -1761,11 +3681,11 @@
 	}
 }
 
-#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
-		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
+		     FW_PORT_CAP_ANEG)
 
 /**
- *	t4_link_start - apply link configuration to MAC/PHY
+ *	t4_link_l1cfg - apply link configuration to MAC/PHY
  *	@phy: the PHY to setup
  *	@mac: the MAC to setup
  *	@lc: the requested link configuration
@@ -1777,32 +3697,47 @@
  *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
  *	  otherwise do it later based on the outcome of auto-negotiation.
  */
-int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
 		  struct link_config *lc)
 {
 	struct fw_port_cmd c;
-	unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+	unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
+	unsigned int fc, fec;
 
-	lc->link_ok = 0;
+	fc = 0;
 	if (lc->requested_fc & PAUSE_RX)
 		fc |= FW_PORT_CAP_FC_RX;
 	if (lc->requested_fc & PAUSE_TX)
 		fc |= FW_PORT_CAP_FC_TX;
 
+	fec = 0;
+	if (lc->requested_fec & FEC_RS)
+		fec |= FW_PORT_CAP_FEC_RS;
+	if (lc->requested_fec & FEC_BASER_RS)
+		fec |= FW_PORT_CAP_FEC_BASER_RS;
+	if (lc->requested_fec & FEC_RESERVED)
+		fec |= FW_PORT_CAP_FEC_RESERVED;
+
 	memset(&c, 0, sizeof(c));
-	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
-			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
-	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
-				  FW_LEN16(c));
+	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				     V_FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 =
+		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
 
 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
-		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
-		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
+					     fc | fec);
+		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+		lc->fec = lc->requested_fec;
 	} else if (lc->autoneg == AUTONEG_DISABLE) {
-		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
-		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed |
+					     fc | fec | mdi);
+		lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+		lc->fec = lc->requested_fec;
 	} else
-		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+		c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
@@ -1820,19 +3755,24 @@
 	struct fw_port_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
-			       F_FW_CMD_EXEC | V_FW_PORT_CMD_PORTID(port));
-	c.action_to_len16 = htonl(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
-				  FW_LEN16(c));
-	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+	c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+				     F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				     V_FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 =
+		cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+			    FW_LEN16(c));
+	c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
+typedef void (*int_handler_t)(struct adapter *adap);
+
 struct intr_info {
-	unsigned int mask;       /* bits to check in interrupt status */
-	const char *msg;         /* message to print or NULL */
-	short stat_idx;          /* stat counter to increment or -1 */
-	unsigned short fatal;    /* whether the condition reported is fatal */
+	unsigned int mask;	/* bits to check in interrupt status */
+	const char *msg;	/* message to print or NULL */
+	short stat_idx;		/* stat counter to increment or -1 */
+	unsigned short fatal;	/* whether the condition reported is fatal */
+	int_handler_t int_handler;	/* platform-specific int handler */
 };
 
 /**
@@ -1843,7 +3783,7 @@
  *
  *	A table driven interrupt handler that applies a set of masks to an
  *	interrupt status word and performs the corresponding actions if the
- *	interrupts described by the mask have occured.  The actions include
+ *	interrupts described by the mask have occurred.  The actions include
  *	optionally emitting a warning or alert message.  The table is terminated
  *	by an entry specifying mask 0.  Returns the number of fatal interrupt
  *	conditions.
@@ -1860,15 +3800,17 @@
 			continue;
 		if (acts->fatal) {
 			fatal++;
-			CH_ALERT(adapter, "%s (0x%x)\n",
-				 acts->msg, status & acts->mask);
+			CH_ALERT(adapter, "%s (0x%x)\n", acts->msg,
+				  status & acts->mask);
 		} else if (acts->msg)
-			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n",
-					  acts->msg, status & acts->mask);
+			CH_WARN_RATELIMIT(adapter, "%s (0x%x)\n", acts->msg,
+				 status & acts->mask);
+		if (acts->int_handler)
+			acts->int_handler(adapter);
 		mask |= acts->mask;
 	}
 	status &= mask;
-	if (status)                           /* clear processed interrupts */
+	if (status)	/* clear processed interrupts */
 		t4_write_reg(adapter, reg, status);
 	return fatal;
 }
@@ -1878,7 +3820,7 @@
  */
 static void pcie_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info sysbus_intr_info[] = {
+	static const struct intr_info sysbus_intr_info[] = {
 		{ F_RNPP, "RXNP array parity error", -1, 1 },
 		{ F_RPCP, "RXPC array parity error", -1, 1 },
 		{ F_RCIP, "RXCIF array parity error", -1, 1 },
@@ -1886,7 +3828,7 @@
 		{ F_RFTP, "RXFT array parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info pcie_port_intr_info[] = {
+	static const struct intr_info pcie_port_intr_info[] = {
 		{ F_TPCP, "TXPC array parity error", -1, 1 },
 		{ F_TNPP, "TXNP array parity error", -1, 1 },
 		{ F_TFTP, "TXFT array parity error", -1, 1 },
@@ -1898,7 +3840,7 @@
 		{ F_TDUE, "Tx uncorrectable data error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info pcie_intr_info[] = {
+	static const struct intr_info pcie_intr_info[] = {
 		{ F_MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
 		{ F_MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
 		{ F_MSIDATAPERR, "MSI data parity error", -1, 1 },
@@ -1933,15 +3875,61 @@
 		{ 0 }
 	};
 
+	static const struct intr_info t5_pcie_intr_info[] = {
+		{ F_MSTGRPPERR, "Master Response Read Queue parity error",
+		  -1, 1 },
+		{ F_MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
+		{ F_MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
+		{ F_MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+		{ F_MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+		{ F_MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+		{ F_MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+		{ F_PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
+		  -1, 1 },
+		{ F_PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
+		  -1, 1 },
+		{ F_TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+		{ F_MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
+		{ F_CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+		{ F_CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+		{ F_DREQWRPERR, "PCI DMA channel write request parity error",
+		  -1, 1 },
+		{ F_DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+		{ F_DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+		{ F_HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
+		{ F_HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+		{ F_HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+		{ F_CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+		{ F_FIDPERR, "PCI FID parity error", -1, 1 },
+		{ F_VFIDPERR, "PCI INTx clear parity error", -1, 1 },
+		{ F_MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
+		{ F_PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+		{ F_IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
+		  -1, 1 },
+		{ F_IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
+		  -1, 1 },
+		{ F_RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
+		{ F_IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
+		{ F_TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
+		{ F_READRSPERR, "Outbound read error", -1,
+		  0 },
+		{ 0 }
+	};
+
 	int fat;
 
-	fat = t4_handle_intr_status(adapter,
-				    A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-				    sysbus_intr_info) +
-	      t4_handle_intr_status(adapter,
-				    A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
-				    pcie_port_intr_info) +
-	      t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE, pcie_intr_info);
+	if (is_t4(adapter))
+		fat = t4_handle_intr_status(adapter,
+				A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+				sysbus_intr_info) +
+			t4_handle_intr_status(adapter,
+					A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+					pcie_port_intr_info) +
+			t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
+					      pcie_intr_info);
+	else
+		fat = t4_handle_intr_status(adapter, A_PCIE_INT_CAUSE,
+					    t5_pcie_intr_info);
 	if (fat)
 		t4_fatal_err(adapter);
 }
@@ -1951,7 +3939,7 @@
  */
 static void tp_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info tp_intr_info[] = {
+	static const struct intr_info tp_intr_info[] = {
 		{ 0x3fffffff, "TP parity error", -1, 1 },
 		{ F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
 		{ 0 }
@@ -1969,13 +3957,13 @@
 	u64 v;
 	u32 err;
 
-	static struct intr_info sge_intr_info[] = {
+	static const struct intr_info sge_intr_info[] = {
 		{ F_ERR_CPL_EXCEED_IQE_SIZE,
 		  "SGE received CPL exceeding IQE size", -1, 1 },
 		{ F_ERR_INVALID_CIDX_INC,
 		  "SGE GTS CIDX increment too large", -1, 0 },
 		{ F_ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
-		{ F_ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+		{ F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
 		{ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_ERR_DATA_CPL_ON_HIGH_QID0,
 		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
 		{ F_ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
@@ -1988,23 +3976,47 @@
 		  0 },
 		{ F_ERR_ING_CTXT_PRIO,
 		  "SGE too many priority ingress contexts", -1, 0 },
-		{ F_ERR_EGR_CTXT_PRIO,
-		  "SGE too many priority egress contexts", -1, 0 },
 		{ F_INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
 		{ F_EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
 		{ 0 }
 	};
 
+	static const struct intr_info t4t5_sge_intr_info[] = {
+		{ F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
+		{ F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
+		{ F_ERR_EGR_CTXT_PRIO,
+		  "SGE too many priority egress contexts", -1, 0 },
+		{ 0 }
+	};
+
+	/*
+ 	* For now, treat below interrupts as fatal so that we disable SGE and
+ 	* get better debug */
+	static const struct intr_info t6_sge_intr_info[] = {
+		{ F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1,
+		  "SGE PCIe error for a DBP thread", -1, 1 },
+		{ F_FATAL_WRE_LEN,
+		  "SGE Actual WRE packet is less than advertized length",
+		  -1, 1 },
+		{ 0 }
+	};
+
 	v = (u64)t4_read_reg(adapter, A_SGE_INT_CAUSE1) |
-	    ((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
+		((u64)t4_read_reg(adapter, A_SGE_INT_CAUSE2) << 32);
 	if (v) {
 		CH_ALERT(adapter, "SGE parity error (%#llx)\n",
-			 (unsigned long long)v);
+				(unsigned long long)v);
 		t4_write_reg(adapter, A_SGE_INT_CAUSE1, v);
 		t4_write_reg(adapter, A_SGE_INT_CAUSE2, v >> 32);
 	}
 
 	v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3, sge_intr_info);
+	if (chip_id(adapter) <= CHELSIO_T5)
+		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
+					   t4t5_sge_intr_info);
+	else
+		v |= t4_handle_intr_status(adapter, A_SGE_INT_CAUSE3,
+					   t6_sge_intr_info);
 
 	err = t4_read_reg(adapter, A_SGE_ERROR_STATS);
 	if (err & F_ERROR_QID_VALID) {
@@ -2029,7 +4041,7 @@
  */
 static void cim_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info cim_intr_info[] = {
+	static const struct intr_info cim_intr_info[] = {
 		{ F_PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
 		{ CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
 		{ CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
@@ -2039,7 +4051,7 @@
 		{ F_TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info cim_upintr_info[] = {
+	static const struct intr_info cim_upintr_info[] = {
 		{ F_RSVDSPACEINT, "CIM reserved space access", -1, 1 },
 		{ F_ILLTRANSINT, "CIM illegal transaction", -1, 1 },
 		{ F_ILLWRINT, "CIM illegal write", -1, 1 },
@@ -2088,7 +4100,7 @@
  */
 static void ulprx_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info ulprx_intr_info[] = {
+	static const struct intr_info ulprx_intr_info[] = {
 		{ F_CAUSE_CTX_1, "ULPRX channel 1 context error", -1, 1 },
 		{ F_CAUSE_CTX_0, "ULPRX channel 0 context error", -1, 1 },
 		{ 0x7fffff, "ULPRX parity error", -1, 1 },
@@ -2104,7 +4116,7 @@
  */
 static void ulptx_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info ulptx_intr_info[] = {
+	static const struct intr_info ulptx_intr_info[] = {
 		{ F_PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
 		  0 },
 		{ F_PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
@@ -2126,7 +4138,7 @@
  */
 static void pmtx_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info pmtx_intr_info[] = {
+	static const struct intr_info pmtx_intr_info[] = {
 		{ F_PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
 		{ F_PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
 		{ F_PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
@@ -2149,7 +4161,7 @@
  */
 static void pmrx_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info pmrx_intr_info[] = {
+	static const struct intr_info pmrx_intr_info[] = {
 		{ F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
 		{ 0x3ffff0, "PMRX framing error", -1, 1 },
 		{ F_OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
@@ -2169,7 +4181,7 @@
  */
 static void cplsw_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info cplsw_intr_info[] = {
+	static const struct intr_info cplsw_intr_info[] = {
 		{ F_CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
 		{ F_CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
 		{ F_TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
@@ -2188,7 +4200,8 @@
  */
 static void le_intr_handler(struct adapter *adap)
 {
-	static struct intr_info le_intr_info[] = {
+	unsigned int chip_ver = chip_id(adap);
+	static const struct intr_info le_intr_info[] = {
 		{ F_LIPMISS, "LE LIP miss", -1, 0 },
 		{ F_LIP0, "LE 0 LIP error", -1, 0 },
 		{ F_PARITYERR, "LE parity error", -1, 1 },
@@ -2197,7 +4210,18 @@
 		{ 0 }
 	};
 
-	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE, le_intr_info))
+	static const struct intr_info t6_le_intr_info[] = {
+		{ F_T6_LIPMISS, "LE LIP miss", -1, 0 },
+		{ F_T6_LIP0, "LE 0 LIP error", -1, 0 },
+		{ F_TCAMINTPERR, "LE parity error", -1, 1 },
+		{ F_T6_UNKNOWNCMD, "LE unknown command", -1, 1 },
+		{ F_SSRAMINTPERR, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_LE_DB_INT_CAUSE,
+				  (chip_ver <= CHELSIO_T5) ?
+				  le_intr_info : t6_le_intr_info))
 		t4_fatal_err(adap);
 }
 
@@ -2206,11 +4230,11 @@
  */
 static void mps_intr_handler(struct adapter *adapter)
 {
-	static struct intr_info mps_rx_intr_info[] = {
+	static const struct intr_info mps_rx_intr_info[] = {
 		{ 0xffffff, "MPS Rx parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_tx_intr_info[] = {
+	static const struct intr_info mps_tx_intr_info[] = {
 		{ V_TPFIFO(M_TPFIFO), "MPS Tx TP FIFO parity error", -1, 1 },
 		{ F_NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
 		{ V_TXDATAFIFO(M_TXDATAFIFO), "MPS Tx data FIFO parity error",
@@ -2222,7 +4246,7 @@
 		{ F_FRMERR, "MPS Tx framing error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_trc_intr_info[] = {
+	static const struct intr_info mps_trc_intr_info[] = {
 		{ V_FILTMEM(M_FILTMEM), "MPS TRC filter parity error", -1, 1 },
 		{ V_PKTFIFO(M_PKTFIFO), "MPS TRC packet FIFO parity error", -1,
 		  1 },
@@ -2229,19 +4253,19 @@
 		{ F_MISCPERR, "MPS TRC misc parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_stat_sram_intr_info[] = {
+	static const struct intr_info mps_stat_sram_intr_info[] = {
 		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_stat_tx_intr_info[] = {
+	static const struct intr_info mps_stat_tx_intr_info[] = {
 		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_stat_rx_intr_info[] = {
+	static const struct intr_info mps_stat_rx_intr_info[] = {
 		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
 		{ 0 }
 	};
-	static struct intr_info mps_cls_intr_info[] = {
+	static const struct intr_info mps_cls_intr_info[] = {
 		{ F_MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
 		{ F_MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
 		{ F_HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
@@ -2266,12 +4290,13 @@
 				    mps_cls_intr_info);
 
 	t4_write_reg(adapter, A_MPS_INT_CAUSE, 0);
-	t4_read_reg(adapter, A_MPS_INT_CAUSE);                    /* flush */
+	t4_read_reg(adapter, A_MPS_INT_CAUSE);	/* flush */
 	if (fat)
 		t4_fatal_err(adapter);
 }
 
-#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | F_ECC_UE_INT_CAUSE)
+#define MEM_INT_MASK (F_PERR_INT_CAUSE | F_ECC_CE_INT_CAUSE | \
+		      F_ECC_UE_INT_CAUSE)
 
 /*
  * EDC/MC interrupt handler.
@@ -2278,7 +4303,7 @@
  */
 static void mem_intr_handler(struct adapter *adapter, int idx)
 {
-	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+	static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
 
 	unsigned int addr, cnt_addr, v;
 
@@ -2285,17 +4310,29 @@
 	if (idx <= MEM_EDC1) {
 		addr = EDC_REG(A_EDC_INT_CAUSE, idx);
 		cnt_addr = EDC_REG(A_EDC_ECC_STATUS, idx);
+	} else if (idx == MEM_MC) {
+		if (is_t4(adapter)) {
+			addr = A_MC_INT_CAUSE;
+			cnt_addr = A_MC_ECC_STATUS;
+		} else {
+			addr = A_MC_P_INT_CAUSE;
+			cnt_addr = A_MC_P_ECC_STATUS;
+		}
 	} else {
-		addr = A_MC_INT_CAUSE;
-		cnt_addr = A_MC_ECC_STATUS;
+		addr = MC_REG(A_MC_P_INT_CAUSE, 1);
+		cnt_addr = MC_REG(A_MC_P_ECC_STATUS, 1);
 	}
 
 	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
 	if (v & F_PERR_INT_CAUSE)
-		CH_ALERT(adapter, "%s FIFO parity error\n", name[idx]);
+		CH_ALERT(adapter, "%s FIFO parity error\n",
+			  name[idx]);
 	if (v & F_ECC_CE_INT_CAUSE) {
 		u32 cnt = G_ECC_CECNT(t4_read_reg(adapter, cnt_addr));
 
+		if (idx <= MEM_EDC1)
+			t4_edc_err_read(adapter, idx);
+
 		t4_write_reg(adapter, cnt_addr, V_ECC_CECNT(M_ECC_CECNT));
 		CH_WARN_RATELIMIT(adapter,
 				  "%u %s correctable ECC data error%s\n",
@@ -2302,8 +4339,8 @@
 				  cnt, name[idx], cnt > 1 ? "s" : "");
 	}
 	if (v & F_ECC_UE_INT_CAUSE)
-		CH_ALERT(adapter, "%s uncorrectable ECC data error\n",
-			 name[idx]);
+		CH_ALERT(adapter,
+			 "%s uncorrectable ECC data error\n", name[idx]);
 
 	t4_write_reg(adapter, addr, v);
 	if (v & (F_PERR_INT_CAUSE | F_ECC_UE_INT_CAUSE))
@@ -2317,14 +4354,22 @@
 {
 	u32 v, status = t4_read_reg(adapter, A_MA_INT_CAUSE);
 
-	if (status & F_MEM_PERR_INT_CAUSE)
-		CH_ALERT(adapter, "MA parity error, parity status %#x\n",
-			 t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS));
+	if (status & F_MEM_PERR_INT_CAUSE) {
+		CH_ALERT(adapter,
+			  "MA parity error, parity status %#x\n",
+			  t4_read_reg(adapter, A_MA_PARITY_ERROR_STATUS1));
+		if (is_t5(adapter))
+			CH_ALERT(adapter,
+				  "MA parity error, parity status %#x\n",
+				  t4_read_reg(adapter,
+					      A_MA_PARITY_ERROR_STATUS2));
+	}
 	if (status & F_MEM_WRAP_INT_CAUSE) {
 		v = t4_read_reg(adapter, A_MA_INT_WRAP_STATUS);
-		CH_ALERT(adapter, "MA address wrap-around error by client %u to"
-			 " address %#x\n", G_MEM_WRAP_CLIENT_NUM(v),
-			 G_MEM_WRAP_ADDRESS(v) << 4);
+		CH_ALERT(adapter, "MA address wrap-around error by "
+			  "client %u to address %#x\n",
+			  G_MEM_WRAP_CLIENT_NUM(v),
+			  G_MEM_WRAP_ADDRESS(v) << 4);
 	}
 	t4_write_reg(adapter, A_MA_INT_CAUSE, status);
 	t4_fatal_err(adapter);
@@ -2335,7 +4380,7 @@
  */
 static void smb_intr_handler(struct adapter *adap)
 {
-	static struct intr_info smb_intr_info[] = {
+	static const struct intr_info smb_intr_info[] = {
 		{ F_MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
 		{ F_MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
 		{ F_SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
@@ -2351,7 +4396,7 @@
  */
 static void ncsi_intr_handler(struct adapter *adap)
 {
-	static struct intr_info ncsi_intr_info[] = {
+	static const struct intr_info ncsi_intr_info[] = {
 		{ F_CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
 		{ F_MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
 		{ F_TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
@@ -2368,17 +4413,26 @@
  */
 static void xgmac_intr_handler(struct adapter *adap, int port)
 {
-	u32 v = t4_read_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE));
+	u32 v, int_cause_reg;
 
-	v &= F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+	if (is_t4(adap))
+		int_cause_reg = PORT_REG(port, A_XGMAC_PORT_INT_CAUSE);
+	else
+		int_cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
+
+	v = t4_read_reg(adap, int_cause_reg);
+
+	v &= (F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR);
 	if (!v)
 		return;
 
 	if (v & F_TXFIFO_PRTY_ERR)
-		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n", port);
+		CH_ALERT(adap, "XGMAC %d Tx FIFO parity error\n",
+			  port);
 	if (v & F_RXFIFO_PRTY_ERR)
-		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n", port);
-	t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_INT_CAUSE), v);
+		CH_ALERT(adap, "XGMAC %d Rx FIFO parity error\n",
+			  port);
+	t4_write_reg(adap, int_cause_reg, v);
 	t4_fatal_err(adap);
 }
 
@@ -2387,20 +4441,24 @@
  */
 static void pl_intr_handler(struct adapter *adap)
 {
-	static struct intr_info pl_intr_info[] = {
-		{ F_FATALPERR, "T4 fatal parity error", -1, 1 },
+	static const struct intr_info pl_intr_info[] = {
+		{ F_FATALPERR, "Fatal parity error", -1, 1 },
 		{ F_PERRVFID, "PL VFID_MAP parity error", -1, 1 },
 		{ 0 }
 	};
 
-	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE, pl_intr_info))
+	static const struct intr_info t5_pl_intr_info[] = {
+		{ F_FATALPERR, "Fatal parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, A_PL_PL_INT_CAUSE,
+				  is_t4(adap) ?
+				  pl_intr_info : t5_pl_intr_info))
 		t4_fatal_err(adap);
 }
 
 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
-#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC | F_EDC0 | \
-		F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
-		F_CPL_SWITCH | F_SGE | F_ULP_TX)
 
 /**
  *	t4_slow_intr_handler - control path interrupt handler
@@ -2426,18 +4484,20 @@
 		pl_intr_handler(adapter);
 	if (cause & F_SMB)
 		smb_intr_handler(adapter);
-	if (cause & F_XGMAC0)
+	if (cause & F_MAC0)
 		xgmac_intr_handler(adapter, 0);
-	if (cause & F_XGMAC1)
+	if (cause & F_MAC1)
 		xgmac_intr_handler(adapter, 1);
-	if (cause & F_XGMAC_KR0)
+	if (cause & F_MAC2)
 		xgmac_intr_handler(adapter, 2);
-	if (cause & F_XGMAC_KR1)
+	if (cause & F_MAC3)
 		xgmac_intr_handler(adapter, 3);
 	if (cause & F_PCIE)
 		pcie_intr_handler(adapter);
-	if (cause & F_MC)
+	if (cause & F_MC0)
 		mem_intr_handler(adapter, MEM_MC);
+	if (is_t5(adapter) && (cause & F_MC1))
+		mem_intr_handler(adapter, MEM_MC1);
 	if (cause & F_EDC0)
 		mem_intr_handler(adapter, MEM_EDC0);
 	if (cause & F_EDC1)
@@ -2463,7 +4523,7 @@
 
 	/* Clear the interrupts just processed for which we are the master. */
 	t4_write_reg(adapter, A_PL_INT_CAUSE, cause & GLBL_INTR_MASK);
-	(void) t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
+	(void)t4_read_reg(adapter, A_PL_INT_CAUSE); /* flush */
 	return 1;
 }
 
@@ -2482,16 +4542,23 @@
  */
 void t4_intr_enable(struct adapter *adapter)
 {
-	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+	u32 val = 0;
+	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+	u32 pf = (chip_id(adapter) <= CHELSIO_T5
+		  ? G_SOURCEPF(whoami)
+		  : G_T6_SOURCEPF(whoami));
 
+	if (chip_id(adapter) <= CHELSIO_T5)
+		val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+	else
+		val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
 	t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
 		     F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
-		     F_ERR_DROPPED_DB | F_ERR_DATA_CPL_ON_HIGH_QID1 |
+		     F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
 		     F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
 		     F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
 		     F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
-		     F_ERR_EGR_CTXT_PRIO | F_INGRESS_SIZE_ERR |
-		     F_EGRESS_SIZE_ERR);
+		     F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
 }
@@ -2506,7 +4573,10 @@
  */
 void t4_intr_disable(struct adapter *adapter)
 {
-	u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
+	u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+	u32 pf = (chip_id(adapter) <= CHELSIO_T5
+		  ? G_SOURCEPF(whoami)
+		  : G_T6_SOURCEPF(whoami));
 
 	t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
 	t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
@@ -2523,11 +4593,8 @@
 {
 	static const unsigned int cause_reg[] = {
 		A_SGE_INT_CAUSE1, A_SGE_INT_CAUSE2, A_SGE_INT_CAUSE3,
-		A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
-		A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
 		A_PCIE_NONFAT_ERR, A_PCIE_INT_CAUSE,
-		A_MC_INT_CAUSE,
-		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS, A_MA_INT_CAUSE,
+		A_MA_INT_WRAP_STATUS, A_MA_PARITY_ERROR_STATUS1, A_MA_INT_CAUSE,
 		A_EDC_INT_CAUSE, EDC_REG(A_EDC_INT_CAUSE, 1),
 		A_CIM_HOST_INT_CAUSE, A_CIM_HOST_UPACC_INT_CAUSE,
 		MYPF_REG(A_CIM_PF_HOST_INT_CAUSE),
@@ -2546,6 +4613,17 @@
 	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
 		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
 
+	t4_write_reg(adapter, is_t4(adapter) ? A_MC_INT_CAUSE :
+				A_MC_P_INT_CAUSE, 0xffffffff);
+
+	if (is_t4(adapter)) {
+		t4_write_reg(adapter, A_PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+				0xffffffff);
+		t4_write_reg(adapter, A_PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+				0xffffffff);
+	} else
+		t4_write_reg(adapter, A_MA_PARITY_ERROR_STATUS2, 0xffffffff);
+
 	t4_write_reg(adapter, A_PL_INT_CAUSE, GLBL_INTR_MASK);
 	(void) t4_read_reg(adapter, A_PL_INT_CAUSE);          /* flush */
 }
@@ -2593,12 +4671,11 @@
 	struct fw_rss_ind_tbl_cmd cmd;
 
 	memset(&cmd, 0, sizeof(cmd));
-	cmd.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
-			       F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
-			       V_FW_RSS_IND_TBL_CMD_VIID(viid));
-	cmd.retval_len16 = htonl(FW_LEN16(cmd));
+	cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+				     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				     V_FW_RSS_IND_TBL_CMD_VIID(viid));
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
 
-
 	/*
 	 * Each firmware RSS command can accommodate up to 32 RSS Ingress
 	 * Queue Identifiers.  These Ingress Queue IDs are packed three to
@@ -2614,8 +4691,8 @@
 		 * Set up the firmware RSS command header to send the next
 		 * "nq" Ingress Queue IDs to the firmware.
 		 */
-		cmd.niqid = htons(nq);
-		cmd.startidx = htons(start);
+		cmd.niqid = cpu_to_be16(nq);
+		cmd.startidx = cpu_to_be16(start);
 
 		/*
 		 * "nq" more done for the start of the next loop.
@@ -2661,7 +4738,6 @@
 		if (ret)
 			return ret;
 	}
-
 	return 0;
 }
 
@@ -2680,15 +4756,16 @@
 	struct fw_rss_glb_config_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_write = htonl(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
-			      F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
-	c.retval_len16 = htonl(FW_LEN16(c));
+	c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+				    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
-		c.u.manual.mode_pkd = htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+		c.u.manual.mode_pkd =
+			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
 	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
-		c.u.basicvirtual.mode_pkd =
-			htonl(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
-		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+		c.u.basicvirtual.mode_keymode =
+			cpu_to_be32(V_FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+		c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
 	} else
 		return -EINVAL;
 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
@@ -2701,21 +4778,28 @@
  *	@viid: the VI id
  *	@flags: RSS flags
  *	@defq: id of the default RSS queue for the VI.
+ *	@skeyidx: RSS secret key table index for non-global mode
+ *	@skey: RSS vf_scramble key for VI.
  *
  *	Configures VI-specific RSS properties.
  */
 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
-		     unsigned int flags, unsigned int defq)
+		     unsigned int flags, unsigned int defq, unsigned int skeyidx,
+		     unsigned int skey)
 {
 	struct fw_rss_vi_config_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
-			     F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
-			     V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	c.u.basicvirtual.defaultq_to_udpen = htonl(flags |
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				   V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
 					V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+	c.u.basicvirtual.secretkeyidx_pkd = cpu_to_be32(
+					V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(skeyidx));
+	c.u.basicvirtual.secretkeyxor = cpu_to_be32(skey);
+
 	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
 }
 
@@ -2726,7 +4810,7 @@
 	return t4_wait_op_done_val(adap, A_TP_RSS_LKP_TABLE, F_LKPTBLROWVLD, 1,
 				   5, 0, val);
 }
-	
+
 /**
  *	t4_read_rss - read the contents of the RSS mapping table
  *	@adapter: the adapter
@@ -2750,6 +4834,42 @@
 }
 
 /**
+ *	t4_fw_tp_pio_rw - Access TP PIO through LDST
+ *	@adap: the adapter
+ *	@vals: where the indirect register values are stored/written
+ *	@nregs: how many indirect registers to read/write
+ *	@start_idx: index of first indirect register to read/write
+ *	@rw: Read (1) or Write (0)
+ *
+ *	Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+		     unsigned int start_index, unsigned int rw)
+{
+	int ret, i;
+	int cmd = FW_LDST_ADDRSPC_TP_PIO;
+	struct fw_ldst_cmd c;
+
+	for (i = 0 ; i < nregs; i++) {
+		memset(&c, 0, sizeof(c));
+		c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+						F_FW_CMD_REQUEST |
+						(rw ? F_FW_CMD_READ :
+						     F_FW_CMD_WRITE) |
+						V_FW_LDST_CMD_ADDRSPACE(cmd));
+		c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+		c.u.addrval.addr = cpu_to_be32(start_index + i);
+		c.u.addrval.val  = rw ? 0 : cpu_to_be32(vals[i]);
+		ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+		if (ret == 0) {
+			if (rw)
+				vals[i] = be32_to_cpu(c.u.addrval.val);
+		}
+	}
+}
+
+/**
  *	t4_read_rss_key - read the global RSS key
  *	@adap: the adapter
  *	@key: 10-entry array holding the 320-bit RSS key
@@ -2758,8 +4878,11 @@
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-	t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
-			 A_TP_RSS_SECRET_KEY0);
+	if (t4_use_ldst(adap))
+		t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+	else
+		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+				 A_TP_RSS_SECRET_KEY0);
 }
 
 /**
@@ -2772,13 +4895,35 @@
  *	0..15 the corresponding entry in the RSS key table is written,
  *	otherwise the global RSS key is written.
  */
-void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
 {
-	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
-			  A_TP_RSS_SECRET_KEY0);
-	if (idx >= 0 && idx < 16)
-		t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
-			     V_KEYWRADDR(idx) | F_KEYWREN);
+	u8 rss_key_addr_cnt = 16;
+	u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+
+	/*
+	 * T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+	 * allows access to key addresses 16-63 by using KeyWrAddrX
+	 * as index[5:4](upper 2) into key table
+	 */
+	if ((chip_id(adap) > CHELSIO_T5) &&
+	    (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+		rss_key_addr_cnt = 32;
+
+	if (t4_use_ldst(adap))
+		t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+	else
+		t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, key, 10,
+				  A_TP_RSS_SECRET_KEY0);
+
+	if (idx >= 0 && idx < rss_key_addr_cnt) {
+		if (rss_key_addr_cnt > 16)
+			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+				     vrt | V_KEYWRADDRX(idx >> 4) |
+				     V_T6_VFWRADDR(idx) | F_KEYWREN);
+		else
+			t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+				     vrt| V_KEYWRADDR(idx) | F_KEYWREN);
+	}
 }
 
 /**
@@ -2790,10 +4935,15 @@
  *	Reads the PF RSS Configuration Table at the specified index and returns
  *	the value found there.
  */
-void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, u32 *valp)
+void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
+			   u32 *valp)
 {
-	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			 valp, 1, A_TP_RSS_PF0_CONFIG + index);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, valp, 1,
+				A_TP_RSS_PF0_CONFIG + index, 1);
+	else
+		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 valp, 1, A_TP_RSS_PF0_CONFIG + index);
 }
 
 /**
@@ -2805,10 +4955,15 @@
  *	Writes the PF RSS Configuration Table at the specified index with the
  *	specified value.
  */
-void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index, u32 val)
+void t4_write_rss_pf_config(struct adapter *adapter, unsigned int index,
+			    u32 val)
 {
-	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			  &val, 1, A_TP_RSS_PF0_CONFIG + index);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &val, 1,
+				A_TP_RSS_PF0_CONFIG + index, 0);
+	else
+		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				  &val, 1, A_TP_RSS_PF0_CONFIG + index);
 }
 
 /**
@@ -2824,28 +4979,40 @@
 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 			   u32 *vfl, u32 *vfh)
 {
-	u32 vrt;
+	u32 vrt, mask, data;
 
+	if (chip_id(adapter) <= CHELSIO_T5) {
+		mask = V_VFWRADDR(M_VFWRADDR);
+		data = V_VFWRADDR(index);
+	} else {
+		 mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
+		 data = V_T6_VFWRADDR(index);
+	}
 	/*
 	 * Request that the index'th VF Table values be read into VFL/VFH.
 	 */
 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
-	vrt &= ~(F_VFRDRG | V_VFWRADDR(M_VFWRADDR) | F_VFWREN | F_KEYWREN);
-	vrt |= V_VFWRADDR(index) | F_VFRDEN;
+	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
+	vrt |= data | F_VFRDEN;
 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
 
 	/*
 	 * Grab the VFL/VFH values ...
 	 */
-	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			 vfl, 1, A_TP_RSS_VFL_CONFIG);
-	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			 vfh, 1, A_TP_RSS_VFH_CONFIG);
+	if (t4_use_ldst(adapter)) {
+		t4_fw_tp_pio_rw(adapter, vfl, 1, A_TP_RSS_VFL_CONFIG, 1);
+		t4_fw_tp_pio_rw(adapter, vfh, 1, A_TP_RSS_VFH_CONFIG, 1);
+	} else {
+		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 vfl, 1, A_TP_RSS_VFL_CONFIG);
+		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 vfh, 1, A_TP_RSS_VFH_CONFIG);
+	}
 }
 
 /**
  *	t4_write_rss_vf_config - write VF RSS Configuration Table
- *	
+ *
  *	@adapter: the adapter
  *	@index: the entry in the VF RSS table to write
  *	@vfl: the VFL to store
@@ -2857,22 +5024,35 @@
 void t4_write_rss_vf_config(struct adapter *adapter, unsigned int index,
 			    u32 vfl, u32 vfh)
 {
-	u32 vrt;
+	u32 vrt, mask, data;
 
+	if (chip_id(adapter) <= CHELSIO_T5) {
+		mask = V_VFWRADDR(M_VFWRADDR);
+		data = V_VFWRADDR(index);
+	} else {
+		mask =  V_T6_VFWRADDR(M_T6_VFWRADDR);
+		data = V_T6_VFWRADDR(index);
+	}
+
 	/*
 	 * Load up VFL/VFH with the values to be written ...
 	 */
-	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			  &vfl, 1, A_TP_RSS_VFL_CONFIG);
-	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			  &vfh, 1, A_TP_RSS_VFH_CONFIG);
+	if (t4_use_ldst(adapter)) {
+		t4_fw_tp_pio_rw(adapter, &vfl, 1, A_TP_RSS_VFL_CONFIG, 0);
+		t4_fw_tp_pio_rw(adapter, &vfh, 1, A_TP_RSS_VFH_CONFIG, 0);
+	} else {
+		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				  &vfl, 1, A_TP_RSS_VFL_CONFIG);
+		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				  &vfh, 1, A_TP_RSS_VFH_CONFIG);
+	}
 
 	/*
 	 * Write the VFL/VFH into the VF Table at index'th location.
 	 */
 	vrt = t4_read_reg(adapter, A_TP_RSS_CONFIG_VRT);
-	vrt &= ~(F_VFRDRG | F_VFRDEN | V_VFWRADDR(M_VFWRADDR) | F_KEYWREN);
-	vrt |= V_VFWRADDR(index) | F_VFWREN;
+	vrt &= ~(F_VFRDRG | F_VFWREN | F_KEYWREN | mask);
+	vrt |= data | F_VFRDEN;
 	t4_write_reg(adapter, A_TP_RSS_CONFIG_VRT, vrt);
 }
 
@@ -2886,8 +5066,11 @@
 {
 	u32 pfmap;
 
-	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			 &pfmap, 1, A_TP_RSS_PF_MAP);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 1);
+	else
+		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 &pfmap, 1, A_TP_RSS_PF_MAP);
 	return pfmap;
 }
 
@@ -2900,8 +5083,11 @@
  */
 void t4_write_rss_pf_map(struct adapter *adapter, u32 pfmap)
 {
-	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			  &pfmap, 1, A_TP_RSS_PF_MAP);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmap, 1, A_TP_RSS_PF_MAP, 0);
+	else
+		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				  &pfmap, 1, A_TP_RSS_PF_MAP);
 }
 
 /**
@@ -2914,8 +5100,11 @@
 {
 	u32 pfmask;
 
-	t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			 &pfmask, 1, A_TP_RSS_PF_MSK);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 1);
+	else
+		t4_read_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 &pfmask, 1, A_TP_RSS_PF_MSK);
 	return pfmask;
 }
 
@@ -2928,37 +5117,14 @@
  */
 void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask)
 {
-	t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
-			  &pfmask, 1, A_TP_RSS_PF_MSK);
+	if (t4_use_ldst(adapter))
+		t4_fw_tp_pio_rw(adapter, &pfmask, 1, A_TP_RSS_PF_MSK, 0);
+	else
+		t4_write_indirect(adapter, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				  &pfmask, 1, A_TP_RSS_PF_MSK);
 }
 
 /**
- *	t4_set_filter_mode - configure the optional components of filter tuples
- *	@adap: the adapter
- *	@mode_map: a bitmap selcting which optional filter components to enable
- *
- *	Sets the filter mode by selecting the optional components to enable
- *	in filter tuples.  Returns 0 on success and a negative error if the
- *	requested mode needs more bits than are available for optional
- *	components.
- */
-int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
-{
-	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
-
-	int i, nbits = 0;
-
-	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
-		if (mode_map & (1 << i))
-			nbits += width[i];
-	if (nbits > FILTER_OPT_LEN)
-		return -EINVAL;
-	t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map, 1,
-			  A_TP_VLAN_PRI_MAP);
-	return 0;
-}
-
-/**
  *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
  *	@adap: the adapter
  *	@v4: holds the TCP/IP counter values
@@ -2979,18 +5145,18 @@
 	if (v4) {
 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
 				 ARRAY_SIZE(val), A_TP_MIB_TCP_OUT_RST);
-		v4->tcpOutRsts = STAT(OUT_RST);
-		v4->tcpInSegs  = STAT64(IN_SEG);
-		v4->tcpOutSegs = STAT64(OUT_SEG);
-		v4->tcpRetransSegs = STAT64(RXT_SEG);
+		v4->tcp_out_rsts = STAT(OUT_RST);
+		v4->tcp_in_segs  = STAT64(IN_SEG);
+		v4->tcp_out_segs = STAT64(OUT_SEG);
+		v4->tcp_retrans_segs = STAT64(RXT_SEG);
 	}
 	if (v6) {
 		t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
 				 ARRAY_SIZE(val), A_TP_MIB_TCP_V6OUT_RST);
-		v6->tcpOutRsts = STAT(OUT_RST);
-		v6->tcpInSegs  = STAT64(IN_SEG);
-		v6->tcpOutSegs = STAT64(OUT_SEG);
-		v6->tcpRetransSegs = STAT64(RXT_SEG);
+		v6->tcp_out_rsts = STAT(OUT_RST);
+		v6->tcp_in_segs  = STAT64(IN_SEG);
+		v6->tcp_out_segs = STAT64(OUT_SEG);
+		v6->tcp_retrans_segs = STAT64(RXT_SEG);
 	}
 #undef STAT64
 #undef STAT
@@ -3006,18 +5172,27 @@
  */
 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
 {
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->macInErrs,
-			 12, A_TP_MIB_MAC_IN_ERR_0);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlCongDrops,
-			 8, A_TP_MIB_TNL_CNG_DROP_0);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tnlTxDrops,
-			 4, A_TP_MIB_TNL_DROP_0);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->ofldVlanDrops,
-			 4, A_TP_MIB_OFD_VLN_DROP_0);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->tcp6InErrs,
-			 4, A_TP_MIB_TCP_V6IN_ERR_0);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->ofldNoNeigh,
-			 2, A_TP_MIB_OFD_ARP_DROP);
+	int nchan = adap->chip_params->nchan;
+
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->mac_in_errs, nchan, A_TP_MIB_MAC_IN_ERR_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->hdr_in_errs, nchan, A_TP_MIB_HDR_IN_ERR_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->tcp_in_errs, nchan, A_TP_MIB_TCP_IN_ERR_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->tnl_cong_drops, nchan, A_TP_MIB_TNL_CNG_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->ofld_chan_drops, nchan, A_TP_MIB_OFD_CHN_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->tnl_tx_drops, nchan, A_TP_MIB_TNL_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->ofld_vlan_drops, nchan, A_TP_MIB_OFD_VLN_DROP_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			st->tcp6_in_errs, nchan, A_TP_MIB_TCP_V6IN_ERR_0);
+
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA,
+			 &st->ofld_no_neigh, 2, A_TP_MIB_OFD_ARP_DROP);
 }
 
 /**
@@ -3029,8 +5204,10 @@
  */
 void t4_tp_get_proxy_stats(struct adapter *adap, struct tp_proxy_stats *st)
 {
+	int nchan = adap->chip_params->nchan;
+
 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->proxy,
-			 4, A_TP_MIB_TNL_LPBK_0);
+			 nchan, A_TP_MIB_TNL_LPBK_0);
 }
 
 /**
@@ -3042,8 +5219,12 @@
  */
 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
 {
+	int nchan = adap->chip_params->nchan;
+
 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->req,
-			 8, A_TP_MIB_CPL_IN_REQ_0);
+			 nchan, A_TP_MIB_CPL_IN_REQ_0);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, st->rsp,
+			 nchan, A_TP_MIB_CPL_OUT_RSP_0);
 }
 
 /**
@@ -3055,8 +5236,8 @@
  */
 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
 {
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_mod,
-			 2, A_TP_MIB_RQE_DFR_MOD);
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->rqe_dfr_pkt,
+			 2, A_TP_MIB_RQE_DFR_PKT);
 }
 
 /**
@@ -3072,13 +5253,13 @@
 {
 	u32 val[2];
 
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDDP,
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_ddp,
 			 1, A_TP_MIB_FCOE_DDP_0 + idx);
-	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->framesDrop,
+	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, &st->frames_drop,
 			 1, A_TP_MIB_FCOE_DROP_0 + idx);
 	t4_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_DATA, val,
 			 2, A_TP_MIB_FCOE_BYTE_0_HI + 2 * idx);
-	st->octetsDDP = ((u64)val[0] << 32) | val[1];
+	st->octets_ddp = ((u64)val[0] << 32) | val[1];
 }
 
 /**
@@ -3144,24 +5325,6 @@
 }
 
 /**
- *	t4_read_pace_tbl - read the pace table
- *	@adap: the adapter
- *	@pace_vals: holds the returned values
- *
- *	Returns the values of TP's pace table in microseconds.
- */
-void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
-{
-	unsigned int i, v;
-
-	for (i = 0; i < NTX_SCHED; i++) {
-		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
-		v = t4_read_reg(adap, A_TP_PACE_TABLE);
-		pace_vals[i] = dack_ticks_to_usec(adap, v);
-	}
-}
-
-/**
  *	t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
  *	@adap: the adapter
  *	@addr: the indirect TP register address
@@ -3185,7 +5348,7 @@
  *
  *	Initialize the congestion control parameters.
  */
-static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
 {
 	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
 	a[9] = 2;
@@ -3286,7 +5449,7 @@
 
 	if (n > NTX_SCHED)
 	    return -ERANGE;
-    
+
 	/* convert values from us to dack ticks, rounding to closest value */
 	for (i = 0; i < n; i++, pace_vals++) {
 		vals[i] = (1000 * *pace_vals + tick_ns / 2) / tick_ns;
@@ -3373,46 +5536,6 @@
 	return 0;
 }
 
-/**
- *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
- *	@adap: the adapter
- *	@sched: the scheduler index
- *	@kbps: the byte rate in Kbps
- *	@ipg: the interpacket delay in tenths of nanoseconds
- *
- *	Return the current configuration of a HW Tx scheduler.
- */
-void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
-		     unsigned int *ipg)
-{
-	unsigned int v, addr, bpt, cpt;
-
-	if (kbps) {
-		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
-		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
-		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
-		if (sched & 1)
-			v >>= 16;
-		bpt = (v >> 8) & 0xff;
-		cpt = v & 0xff;
-		if (!cpt)
-			*kbps = 0;        /* scheduler disabled */
-		else {
-			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
-			*kbps = (v * bpt) / 125;
-		}
-	}
-	if (ipg) {
-		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
-		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
-		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
-		if (sched & 1)
-			v >>= 16;
-		v &= 0xffff;
-		*ipg = (10000 * v) / core_ticks_per_usec(adap);
-	}
-}
-
 /*
  * Calculates a rate in bytes/s given the number of 256-byte units per 4K core
  * clocks.  The formula is
@@ -3446,14 +5569,18 @@
 	v = t4_read_reg(adap, A_TP_TX_TRATE);
 	nic_rate[0] = chan_rate(adap, G_TNLRATE0(v));
 	nic_rate[1] = chan_rate(adap, G_TNLRATE1(v));
-	nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
-	nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
+	if (adap->chip_params->nchan > 2) {
+		nic_rate[2] = chan_rate(adap, G_TNLRATE2(v));
+		nic_rate[3] = chan_rate(adap, G_TNLRATE3(v));
+	}
 
 	v = t4_read_reg(adap, A_TP_TX_ORATE);
 	ofld_rate[0] = chan_rate(adap, G_OFDRATE0(v));
 	ofld_rate[1] = chan_rate(adap, G_OFDRATE1(v));
-	ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
-	ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
+	if (adap->chip_params->nchan > 2) {
+		ofld_rate[2] = chan_rate(adap, G_OFDRATE2(v));
+		ofld_rate[3] = chan_rate(adap, G_OFDRATE3(v));
+	}
 }
 
 /**
@@ -3463,22 +5590,24 @@
  *	@idx: which filter to configure
  *	@enable: whether to enable or disable the filter
  *
- *	Configures one of the tracing filters available in HW.  If @enable is
- *	%0 @tp is not examined and may be %NULL. The user is responsible to
- *	set the single/multiple trace mode by writing to A_MPS_TRC_CFG register
- *	by using "cxgbtool iface reg reg_addr=val" command. See t4_sniffer/
- *	docs/readme.txt for a complete description of how to setup traceing on
- *	T4.
+ *	Configures one of the tracing filters available in HW.  If @tp is %NULL
+ *	it indicates that the filter is already written in the register and it
+ *	just needs to be enabled or disabled.
  */
-int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, int idx,
-			int enable)
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+    int idx, int enable)
 {
 	int i, ofst = idx * 4;
 	u32 data_reg, mask_reg, cfg;
 	u32 multitrc = F_TRCMULTIFILTER;
+	u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
 
-	if (!enable) {
-		t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+	if (idx < 0 || idx >= NTRACE)
+		return -EINVAL;
+
+	if (tp == NULL || !enable) {
+		t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
+		    enable ? en : 0);
 		return 0;
 	}
 
@@ -3486,10 +5615,10 @@
 	 * TODO - After T4 data book is updated, specify the exact
 	 * section below.
 	 *
-	 * See T4 data book - MPS section for a complete description 
-	 * of the below if..else handling of A_MPS_TRC_CFG register 
+	 * See T4 data book - MPS section for a complete description
+	 * of the below if..else handling of A_MPS_TRC_CFG register
 	 * value.
-	 */ 
+	 */
 	cfg = t4_read_reg(adap, A_MPS_TRC_CFG);
 	if (cfg & F_TRCMULTIFILTER) {
 		/*
@@ -3498,11 +5627,10 @@
 		 * minus 2 flits for CPL_TRACE_PKT header.
 		 */
 		if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
-			return -EINVAL;		
-	}
-	else {
+			return -EINVAL;
+	} else {
 		/*
-		 * If multiple tracers are disabled, to avoid deadlocks 
+		 * If multiple tracers are disabled, to avoid deadlocks
 		 * maximum packet capture size of 9600 bytes is recommended.
 		 * Also in this mode, only trace0 can be enabled and running.
 		 */
@@ -3511,12 +5639,13 @@
 			return -EINVAL;
 	}
 
-	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > M_TFLENGTH ||
-	    tp->skip_ofst > M_TFOFFSET || tp->min_len > M_TFMINPKTSIZE)
+	if (tp->port > (is_t4(adap) ? 11 : 19) || tp->invert > 1 ||
+	    tp->skip_len > M_TFLENGTH || tp->skip_ofst > M_TFOFFSET ||
+	    tp->min_len > M_TFMINPKTSIZE)
 		return -EINVAL;
 
 	/* stop the tracer we'll be changing */
-	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+	t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
 
 	idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
 	data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
@@ -3530,8 +5659,10 @@
 		     V_TFCAPTUREMAX(tp->snap_len) |
 		     V_TFMINPKTSIZE(tp->min_len));
 	t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
-		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) |
-		     V_TFPORT(tp->port) | F_TFEN | V_TFINVERTMATCH(tp->invert));
+		     V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
+		     (is_t4(adap) ?
+		     V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
+		     V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
 
 	return 0;
 }
@@ -3555,13 +5686,19 @@
 	ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
 	ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
 
-	*enabled = !!(ctla & F_TFEN);
+	if (is_t4(adap)) {
+		*enabled = !!(ctla & F_TFEN);
+		tp->port =  G_TFPORT(ctla);
+		tp->invert = !!(ctla & F_TFINVERTMATCH);
+	} else {
+		*enabled = !!(ctla & F_T5_TFEN);
+		tp->port = G_T5_TFPORT(ctla);
+		tp->invert = !!(ctla & F_T5_TFINVERTMATCH);
+	}
 	tp->snap_len = G_TFCAPTUREMAX(ctlb);
 	tp->min_len = G_TFMINPKTSIZE(ctlb);
 	tp->skip_ofst = G_TFOFFSET(ctla);
 	tp->skip_len = G_TFLENGTH(ctla);
-	tp->invert = !!(ctla & F_TFINVERTMATCH);
-	tp->port = G_TFPORT(ctla);
 
 	ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
 	data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
@@ -3584,11 +5721,19 @@
 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 {
 	int i;
+	u32 data[2];
 
-	for (i = 0; i < PM_NSTATS; i++) {
+	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
 		t4_write_reg(adap, A_PM_TX_STAT_CONFIG, i + 1);
 		cnt[i] = t4_read_reg(adap, A_PM_TX_STAT_COUNT);
-		cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+		if (is_t4(adap))
+			cycles[i] = t4_read_reg64(adap, A_PM_TX_STAT_LSB);
+		else {
+			t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
+					 A_PM_TX_DBG_DATA, data, 2,
+					 A_PM_TX_DBG_STAT_MSB);
+			cycles[i] = (((u64)data[0] << 32) | data[1]);
+		}
 	}
 }
 
@@ -3603,16 +5748,24 @@
 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 {
 	int i;
+	u32 data[2];
 
-	for (i = 0; i < PM_NSTATS; i++) {
+	for (i = 0; i < adap->chip_params->pm_stats_cnt; i++) {
 		t4_write_reg(adap, A_PM_RX_STAT_CONFIG, i + 1);
 		cnt[i] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
-		cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+		if (is_t4(adap)) {
+			cycles[i] = t4_read_reg64(adap, A_PM_RX_STAT_LSB);
+		} else {
+			t4_read_indirect(adap, A_PM_RX_DBG_CTRL,
+					 A_PM_RX_DBG_DATA, data, 2,
+					 A_PM_RX_DBG_STAT_MSB);
+			cycles[i] = (((u64)data[0] << 32) | data[1]);
+		}
 	}
 }
 
 /**
- *	get_mps_bg_map - return the buffer groups associated with a port
+ *	t4_get_mps_bg_map - return the buffer groups associated with a port
  *	@adap: the adapter
  *	@idx: the port index
  *
@@ -3620,20 +5773,56 @@
  *	with the given port.  Bit i is set if buffer group i is used by the
  *	port.
  */
-static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+static unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
 {
 	u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
 
 	if (n == 0)
 		return idx == 0 ? 0xf : 0;
-	if (n == 1)
+	if (n == 1 && chip_id(adap) <= CHELSIO_T5)
 		return idx < 2 ? (3 << (2 * idx)) : 0;
 	return 1 << idx;
 }
 
 /**
+ *      t4_get_port_type_description - return Port Type string description
+ *      @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+	static const char *const port_type_description[] = {
+		"Fiber_XFI",
+		"Fiber_XAUI",
+		"BT_SGMII",
+		"BT_XFI",
+		"BT_XAUI",
+		"KX4",
+		"CX4",
+		"KX",
+		"KR",
+		"SFP",
+		"BP_AP",
+		"BP4_AP",
+		"QSFP_10G",
+		"QSA",
+		"QSFP",
+		"BP40_BA",
+		"KR4_100G",
+		"CR4_QSFP",
+		"CR_QSFP",
+		"CR2_QSFP",
+		"SFP28",
+		"KR_SFP28",
+	};
+
+	if (port_type < ARRAY_SIZE(port_type_description))
+		return port_type_description[port_type];
+	return "UNKNOWN";
+}
+
+/**
  *      t4_get_port_stats_offset - collect port stats relative to a previous
- *                                 snapshot
+ *				   snapshot
  *      @adap: The adapter
  *      @idx: The port
  *      @stats: Current stats to fill
@@ -3663,64 +5852,87 @@
  */
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 {
-	u32 bgmap = get_mps_bg_map(adap, idx);
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
+	u32 stat_ctl;
 
 #define GET_STAT(name) \
-	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))
+	t4_read_reg64(adap, \
+	(is_t4(adap) ? PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) : \
+	T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
 
-	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
-	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
-	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
-	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
-	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
-	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
-	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
-	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
-	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
-	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
-	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
-	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
-	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
-	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
-	p->tx_drop             = GET_STAT(TX_PORT_DROP);
-	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
-	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
-	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
-	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
-	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
-	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
-	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
-	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
+	stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
 
-	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
-	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
-	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
-	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
-	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
-	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
-	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
-	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
-	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
-	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
-	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
-	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
-	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
-	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
-	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
-	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
-	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
-	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
-	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
-	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
-	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
-	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
-	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
-	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
-	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
-	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
-	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
+	p->tx_pause		= GET_STAT(TX_PORT_PAUSE);
+	p->tx_octets		= GET_STAT(TX_PORT_BYTES);
+	p->tx_frames		= GET_STAT(TX_PORT_FRAMES);
+	p->tx_bcast_frames	= GET_STAT(TX_PORT_BCAST);
+	p->tx_mcast_frames	= GET_STAT(TX_PORT_MCAST);
+	p->tx_ucast_frames	= GET_STAT(TX_PORT_UCAST);
+	p->tx_error_frames	= GET_STAT(TX_PORT_ERROR);
+	p->tx_frames_64		= GET_STAT(TX_PORT_64B);
+	p->tx_frames_65_127	= GET_STAT(TX_PORT_65B_127B);
+	p->tx_frames_128_255	= GET_STAT(TX_PORT_128B_255B);
+	p->tx_frames_256_511	= GET_STAT(TX_PORT_256B_511B);
+	p->tx_frames_512_1023	= GET_STAT(TX_PORT_512B_1023B);
+	p->tx_frames_1024_1518	= GET_STAT(TX_PORT_1024B_1518B);
+	p->tx_frames_1519_max	= GET_STAT(TX_PORT_1519B_MAX);
+	p->tx_drop		= GET_STAT(TX_PORT_DROP);
+	p->tx_ppp0		= GET_STAT(TX_PORT_PPP0);
+	p->tx_ppp1		= GET_STAT(TX_PORT_PPP1);
+	p->tx_ppp2		= GET_STAT(TX_PORT_PPP2);
+	p->tx_ppp3		= GET_STAT(TX_PORT_PPP3);
+	p->tx_ppp4		= GET_STAT(TX_PORT_PPP4);
+	p->tx_ppp5		= GET_STAT(TX_PORT_PPP5);
+	p->tx_ppp6		= GET_STAT(TX_PORT_PPP6);
+	p->tx_ppp7		= GET_STAT(TX_PORT_PPP7);
 
+	if (chip_id(adap) >= CHELSIO_T5) {
+		if (stat_ctl & F_COUNTPAUSESTATTX) {
+			p->tx_frames -= p->tx_pause;
+			p->tx_octets -= p->tx_pause * 64;
+		}
+		if (stat_ctl & F_COUNTPAUSEMCTX)
+			p->tx_mcast_frames -= p->tx_pause;
+	}
+
+	p->rx_pause		= GET_STAT(RX_PORT_PAUSE);
+	p->rx_octets		= GET_STAT(RX_PORT_BYTES);
+	p->rx_frames		= GET_STAT(RX_PORT_FRAMES);
+	p->rx_bcast_frames	= GET_STAT(RX_PORT_BCAST);
+	p->rx_mcast_frames	= GET_STAT(RX_PORT_MCAST);
+	p->rx_ucast_frames	= GET_STAT(RX_PORT_UCAST);
+	p->rx_too_long		= GET_STAT(RX_PORT_MTU_ERROR);
+	p->rx_jabber		= GET_STAT(RX_PORT_MTU_CRC_ERROR);
+	p->rx_fcs_err		= GET_STAT(RX_PORT_CRC_ERROR);
+	p->rx_len_err		= GET_STAT(RX_PORT_LEN_ERROR);
+	p->rx_symbol_err	= GET_STAT(RX_PORT_SYM_ERROR);
+	p->rx_runt		= GET_STAT(RX_PORT_LESS_64B);
+	p->rx_frames_64		= GET_STAT(RX_PORT_64B);
+	p->rx_frames_65_127	= GET_STAT(RX_PORT_65B_127B);
+	p->rx_frames_128_255	= GET_STAT(RX_PORT_128B_255B);
+	p->rx_frames_256_511	= GET_STAT(RX_PORT_256B_511B);
+	p->rx_frames_512_1023	= GET_STAT(RX_PORT_512B_1023B);
+	p->rx_frames_1024_1518	= GET_STAT(RX_PORT_1024B_1518B);
+	p->rx_frames_1519_max	= GET_STAT(RX_PORT_1519B_MAX);
+	p->rx_ppp0		= GET_STAT(RX_PORT_PPP0);
+	p->rx_ppp1		= GET_STAT(RX_PORT_PPP1);
+	p->rx_ppp2		= GET_STAT(RX_PORT_PPP2);
+	p->rx_ppp3		= GET_STAT(RX_PORT_PPP3);
+	p->rx_ppp4		= GET_STAT(RX_PORT_PPP4);
+	p->rx_ppp5		= GET_STAT(RX_PORT_PPP5);
+	p->rx_ppp6		= GET_STAT(RX_PORT_PPP6);
+	p->rx_ppp7		= GET_STAT(RX_PORT_PPP7);
+
+	if (chip_id(adap) >= CHELSIO_T5) {
+		if (stat_ctl & F_COUNTPAUSESTATRX) {
+			p->rx_frames -= p->rx_pause;
+			p->rx_octets -= p->rx_pause * 64;
+		}
+		if (stat_ctl & F_COUNTPAUSEMCRX)
+			p->rx_mcast_frames -= p->rx_pause;
+	}
+
 	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
 	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
 	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -3735,33 +5947,6 @@
 }
 
 /**
- *	t4_clr_port_stats - clear port statistics
- *	@adap: the adapter
- *	@idx: the port index
- *
- *	Clear HW statistics for the given port.
- */
-void t4_clr_port_stats(struct adapter *adap, int idx)
-{
-	unsigned int i;
-	u32 bgmap = get_mps_bg_map(adap, idx);
-
-	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
-	     i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
-		t4_write_reg(adap, PORT_REG(idx, i), 0);
-	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
-	     i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
-		t4_write_reg(adap, PORT_REG(idx, i), 0);
-	for (i = 0; i < 4; i++)
-		if (bgmap & (1 << i)) {
-			t4_write_reg(adap,
-				A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
-			t4_write_reg(adap,
-				A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
-		}
-}
-
-/**
  *	t4_get_lb_stats - collect loopback port statistics
  *	@adap: the adapter
  *	@idx: the loopback port index
@@ -3771,28 +5956,30 @@
  */
 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
 {
-	u32 bgmap = get_mps_bg_map(adap, idx);
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
 
 #define GET_STAT(name) \
-	t4_read_reg64(adap, PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L))
+	t4_read_reg64(adap, \
+	(is_t4(adap) ? \
+	PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L) : \
+	T5_PORT_REG(idx, A_MPS_PORT_STAT_LB_PORT_##name##_L)))
 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
 
-	p->octets           = GET_STAT(BYTES);
-	p->frames           = GET_STAT(FRAMES);
-	p->bcast_frames     = GET_STAT(BCAST);
-	p->mcast_frames     = GET_STAT(MCAST);
-	p->ucast_frames     = GET_STAT(UCAST);
-	p->error_frames     = GET_STAT(ERROR);
+	p->octets	= GET_STAT(BYTES);
+	p->frames	= GET_STAT(FRAMES);
+	p->bcast_frames	= GET_STAT(BCAST);
+	p->mcast_frames	= GET_STAT(MCAST);
+	p->ucast_frames	= GET_STAT(UCAST);
+	p->error_frames	= GET_STAT(ERROR);
 
-	p->frames_64        = GET_STAT(64B);
-	p->frames_65_127    = GET_STAT(65B_127B);
-	p->frames_128_255   = GET_STAT(128B_255B);
-	p->frames_256_511   = GET_STAT(256B_511B);
-	p->frames_512_1023  = GET_STAT(512B_1023B);
-	p->frames_1024_1518 = GET_STAT(1024B_1518B);
-	p->frames_1519_max  = GET_STAT(1519B_MAX);
-	p->drop             = t4_read_reg(adap, PORT_REG(idx,
-					  A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+	p->frames_64		= GET_STAT(64B);
+	p->frames_65_127	= GET_STAT(65B_127B);
+	p->frames_128_255	= GET_STAT(128B_255B);
+	p->frames_256_511	= GET_STAT(256B_511B);
+	p->frames_512_1023	= GET_STAT(512B_1023B);
+	p->frames_1024_1518	= GET_STAT(1024B_1518B);
+	p->frames_1519_max	= GET_STAT(1519B_MAX);
+	p->drop			= GET_STAT(DROP_FRAMES);
 
 	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
 	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
@@ -3818,14 +6005,26 @@
 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
 			 const u8 *addr)
 {
+	u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
+
+	if (is_t4(adap)) {
+		mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
+		mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
+		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
+	} else {
+		mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
+		mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
+		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+	}
+
 	if (addr) {
-		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO),
+		t4_write_reg(adap, mag_id_reg_l,
 			     (addr[2] << 24) | (addr[3] << 16) |
 			     (addr[4] << 8) | addr[5]);
-		t4_write_reg(adap, PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI),
+		t4_write_reg(adap, mag_id_reg_h,
 			     (addr[0] << 8) | addr[1]);
 	}
-	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), F_MAGICEN,
+	t4_set_reg_field(adap, port_cfg_reg, F_MAGICEN,
 			 V_MAGICEN(addr != NULL));
 }
 
@@ -3848,16 +6047,23 @@
 		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
 {
 	int i;
+	u32 port_cfg_reg;
 
+	if (is_t4(adap))
+		port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
+	else
+		port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+
 	if (!enable) {
-		t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2),
-				 F_PATEN, 0);
+		t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
 		return 0;
 	}
 	if (map > 0xff)
 		return -EINVAL;
 
-#define EPIO_REG(name) PORT_REG(port, A_XGMAC_PORT_EPIO_##name)
+#define EPIO_REG(name) \
+	(is_t4(adap) ? PORT_REG(port, A_XGMAC_PORT_EPIO_##name) : \
+	T5_PORT_REG(port, A_MAC_PORT_EPIO_##name))
 
 	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
 	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -3883,82 +6089,58 @@
 	}
 #undef EPIO_REG
 
-	t4_set_reg_field(adap, PORT_REG(port, A_XGMAC_PORT_CFG2), 0, F_PATEN);
+	t4_set_reg_field(adap, port_cfg_reg, 0, F_PATEN);
 	return 0;
 }
 
-/**
- *	t4_mk_filtdelwr - create a delete filter WR
- *	@ftid: the filter ID
- *	@wr: the filter work request to populate
- *	@qid: ingress queue to receive the delete notification
+/*     t4_mk_filtdelwr - create a delete filter WR
+ *     @ftid: the filter ID
+ *     @wr: the filter work request to populate
+ *     @qid: ingress queue to receive the delete notification
  *
- *	Creates a filter work request to delete the supplied filter.  If @qid is
- *	negative the delete notification is suppressed.
+ *     Creates a filter work request to delete the supplied filter.  If @qid is
+ *     negative the delete notification is suppressed.
  */
 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
 {
 	memset(wr, 0, sizeof(*wr));
-	wr->op_pkd = htonl(V_FW_WR_OP(FW_FILTER_WR));
-	wr->len16_pkd = htonl(V_FW_WR_LEN16(sizeof(*wr) / 16));
-	wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
-			      V_FW_FILTER_WR_NOREPLY(qid < 0));
-	wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
+	wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+	wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+	wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+				    V_FW_FILTER_WR_NOREPLY(qid < 0));
+	wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
 	if (qid >= 0)
-		wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+		wr->rx_chan_rx_rpl_iq =
+				cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
 }
 
 #define INIT_CMD(var, cmd, rd_wr) do { \
-	(var).op_to_write = htonl(V_FW_CMD_OP(FW_##cmd##_CMD) | \
-				  F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
-	(var).retval_len16 = htonl(FW_LEN16(var)); \
+	(var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+					F_FW_CMD_REQUEST | \
+					F_FW_CMD_##rd_wr); \
+	(var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
 } while (0)
 
-int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val)
+int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
+			  u32 addr, u32 val)
 {
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.addrval.addr = htonl(addr);
-	c.u.addrval.val = htonl(val);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE);
+	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+					F_FW_CMD_REQUEST |
+					F_FW_CMD_WRITE |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.addrval.addr = cpu_to_be32(addr);
+	c.u.addrval.val = cpu_to_be32(val);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
 /**
- *	t4_i2c_rd - read a byte from an i2c addressable device
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@port_id: the port id
- *	@dev_addr: the i2c device address
- *	@offset: the byte offset to read from
- *	@valp: where to store the value
- */
-int t4_i2c_rd(struct adapter *adap, unsigned int mbox, unsigned int port_id,
-	       u8 dev_addr, u8 offset, u8 *valp)
-{
-	int ret;
-	struct fw_ldst_cmd c;
-
-	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-		F_FW_CMD_READ |
-		V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_I2C));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.i2c_deprecated.pid_pkd = V_FW_LDST_CMD_PID(port_id);
-	c.u.i2c_deprecated.base = dev_addr;
-	c.u.i2c_deprecated.boffset = offset;
-
-	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-	if (ret == 0)
-		*valp = c.u.i2c_deprecated.data;
-	return ret;
-}
-
-/**
  *	t4_mdio_rd - read a PHY register through MDIO
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -3973,19 +6155,22 @@
 	       unsigned int mmd, unsigned int reg, unsigned int *valp)
 {
 	int ret;
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-		F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
-				   V_FW_LDST_CMD_MMD(mmd));
-	c.u.mdio.raddr = htons(reg);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+					F_FW_CMD_REQUEST | F_FW_CMD_READ |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
+					 V_FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0)
-		*valp = ntohs(c.u.mdio.rval);
+		*valp = be16_to_cpu(c.u.mdio.rval);
 	return ret;
 }
 
@@ -4003,116 +6188,212 @@
 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 	       unsigned int mmd, unsigned int reg, unsigned int val)
 {
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-		F_FW_CMD_WRITE | V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.mdio.paddr_mmd = htons(V_FW_LDST_CMD_PADDR(phy_addr) |
-				   V_FW_LDST_CMD_MMD(mmd));
-	c.u.mdio.raddr = htons(reg);
-	c.u.mdio.rval = htons(val);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO);
+	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+					F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = cpu_to_be16(V_FW_LDST_CMD_PADDR(phy_addr) |
+					 V_FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = cpu_to_be16(reg);
+	c.u.mdio.rval = cpu_to_be16(val);
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
 /**
- *	t4_sge_ctxt_flush - flush the SGE context cache
+ *
+ *	t4_sge_decode_idma_state - decode the idma state
  *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *
- *	Issues a FW command through the given mailbox to flush the
- *	SGE context cache.
+ *	@state: the state idma is stuck in
  */
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+void t4_sge_decode_idma_state(struct adapter *adapter, int state)
 {
-	int ret;
-	struct fw_ldst_cmd c;
+	static const char * const t4_decode[] = {
+		"IDMA_IDLE",
+		"IDMA_PUSH_MORE_CPL_FIFO",
+		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+		"Not used",
+		"IDMA_PHYSADDR_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+		"IDMA_PHYSADDR_SEND_PAYLOAD",
+		"IDMA_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATA_FL_PREP",
+		"IDMA_FL_REQ_DATA_FL",
+		"IDMA_FL_DROP",
+		"IDMA_FL_H_REQ_HEADER_FL",
+		"IDMA_FL_H_SEND_PCIEHDR",
+		"IDMA_FL_H_PUSH_CPL_FIFO",
+		"IDMA_FL_H_SEND_CPL",
+		"IDMA_FL_H_SEND_IP_HDR_FIRST",
+		"IDMA_FL_H_SEND_IP_HDR",
+		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
+		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_H_SEND_IP_HDR_PADDING",
+		"IDMA_FL_D_SEND_PCIEHDR",
+		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+		"IDMA_FL_D_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_PCIEHDR",
+		"IDMA_FL_PUSH_CPL_FIFO",
+		"IDMA_FL_SEND_CPL",
+		"IDMA_FL_SEND_PAYLOAD_FIRST",
+		"IDMA_FL_SEND_PAYLOAD",
+		"IDMA_FL_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_SEND_PADDING",
+		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
+		"IDMA_FL_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATAFL_DONE",
+		"IDMA_FL_REQ_HEADERFL_DONE",
+	};
+	static const char * const t5_decode[] = {
+		"IDMA_IDLE",
+		"IDMA_ALMOST_IDLE",
+		"IDMA_PUSH_MORE_CPL_FIFO",
+		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+		"IDMA_PHYSADDR_SEND_PAYLOAD",
+		"IDMA_SEND_FIFO_TO_IMSG",
+		"IDMA_FL_REQ_DATA_FL",
+		"IDMA_FL_DROP",
+		"IDMA_FL_DROP_SEND_INC",
+		"IDMA_FL_H_REQ_HEADER_FL",
+		"IDMA_FL_H_SEND_PCIEHDR",
+		"IDMA_FL_H_PUSH_CPL_FIFO",
+		"IDMA_FL_H_SEND_CPL",
+		"IDMA_FL_H_SEND_IP_HDR_FIRST",
+		"IDMA_FL_H_SEND_IP_HDR",
+		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
+		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_H_SEND_IP_HDR_PADDING",
+		"IDMA_FL_D_SEND_PCIEHDR",
+		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+		"IDMA_FL_D_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_PCIEHDR",
+		"IDMA_FL_PUSH_CPL_FIFO",
+		"IDMA_FL_SEND_CPL",
+		"IDMA_FL_SEND_PAYLOAD_FIRST",
+		"IDMA_FL_SEND_PAYLOAD",
+		"IDMA_FL_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_SEND_PADDING",
+		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
+	};
+	static const char * const t6_decode[] = {
+		"IDMA_IDLE",
+		"IDMA_PUSH_MORE_CPL_FIFO",
+		"IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
+		"IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PCIEHDR",
+		"IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
+		"IDMA_PHYSADDR_SEND_PAYLOAD",
+		"IDMA_FL_REQ_DATA_FL",
+		"IDMA_FL_DROP",
+		"IDMA_FL_DROP_SEND_INC",
+		"IDMA_FL_H_REQ_HEADER_FL",
+		"IDMA_FL_H_SEND_PCIEHDR",
+		"IDMA_FL_H_PUSH_CPL_FIFO",
+		"IDMA_FL_H_SEND_CPL",
+		"IDMA_FL_H_SEND_IP_HDR_FIRST",
+		"IDMA_FL_H_SEND_IP_HDR",
+		"IDMA_FL_H_REQ_NEXT_HEADER_FL",
+		"IDMA_FL_H_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_H_SEND_IP_HDR_PADDING",
+		"IDMA_FL_D_SEND_PCIEHDR",
+		"IDMA_FL_D_SEND_CPL_AND_IP_HDR",
+		"IDMA_FL_D_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_PCIEHDR",
+		"IDMA_FL_PUSH_CPL_FIFO",
+		"IDMA_FL_SEND_CPL",
+		"IDMA_FL_SEND_PAYLOAD_FIRST",
+		"IDMA_FL_SEND_PAYLOAD",
+		"IDMA_FL_REQ_NEXT_DATA_FL",
+		"IDMA_FL_SEND_NEXT_PCIEHDR",
+		"IDMA_FL_SEND_PADDING",
+		"IDMA_FL_SEND_COMPLETION_TO_IMSG",
+	};
+	static const u32 sge_regs[] = {
+		A_SGE_DEBUG_DATA_LOW_INDEX_2,
+		A_SGE_DEBUG_DATA_LOW_INDEX_3,
+		A_SGE_DEBUG_DATA_HIGH_INDEX_10,
+	};
+	const char * const *sge_idma_decode;
+	int sge_idma_decode_nstates;
+	int i;
+	unsigned int chip_version = chip_id(adapter);
 
-	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-			F_FW_CMD_READ |
-			V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.idctxt.msg_ctxtflush = htonl(F_FW_LDST_CMD_CTXTFLUSH);
+	/* Select the right set of decode strings to dump depending on the
+	 * adapter chip type.
+	 */
+	switch (chip_version) {
+	case CHELSIO_T4:
+		sge_idma_decode = (const char * const *)t4_decode;
+		sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
+		break;
 
-	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-	return ret;
+	case CHELSIO_T5:
+		sge_idma_decode = (const char * const *)t5_decode;
+		sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
+		break;
+
+	case CHELSIO_T6:
+		sge_idma_decode = (const char * const *)t6_decode;
+		sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
+		break;
+
+	default:
+		CH_ERR(adapter,	"Unsupported chip version %d\n", chip_version);
+		return;
+	}
+
+	if (state < sge_idma_decode_nstates)
+		CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
+	else
+		CH_WARN(adapter, "idma state %d unknown\n", state);
+
+	for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
+		CH_WARN(adapter, "SGE register %#x value %#x\n",
+			sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
 }
 
 /**
- *	t4_sge_ctxt_rd - read an SGE context through FW
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@cid: the context id
- *	@ctype: the context type
- *	@data: where to store the context data
+ *      t4_sge_ctxt_flush - flush the SGE context cache
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
  *
- *	Issues a FW command through the given mailbox to read an SGE context.
+ *      Issues a FW command through the given mailbox to flush the
+ *      SGE context cache.
  */
-int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
-		   enum ctxt_type ctype, u32 *data)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
 {
 	int ret;
+	u32 ldst_addrspace;
 	struct fw_ldst_cmd c;
 
-	if (ctype == CTXT_EGRESS)
-		ret = FW_LDST_ADDRSPC_SGE_EGRC;
-	else if (ctype == CTXT_INGRESS)
-		ret = FW_LDST_ADDRSPC_SGE_INGC;
-	else if (ctype == CTXT_FLM)
-		ret = FW_LDST_ADDRSPC_SGE_FLMC;
-	else
-		ret = FW_LDST_ADDRSPC_SGE_CONMC;
-
 	memset(&c, 0, sizeof(c));
-	c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST |
-				  F_FW_CMD_READ | V_FW_LDST_CMD_ADDRSPACE(ret));
-	c.cycles_to_len16 = htonl(FW_LEN16(c));
-	c.u.idctxt.physid = htonl(cid);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_SGE_EGRC);
+	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+					F_FW_CMD_REQUEST | F_FW_CMD_READ |
+					ldst_addrspace);
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.idctxt.msg_ctxtflush = cpu_to_be32(F_FW_LDST_CMD_CTXTFLUSH);
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-	if (ret == 0) {
-		data[0] = ntohl(c.u.idctxt.ctxt_data0);
-		data[1] = ntohl(c.u.idctxt.ctxt_data1);
-		data[2] = ntohl(c.u.idctxt.ctxt_data2);
-		data[3] = ntohl(c.u.idctxt.ctxt_data3);
-		data[4] = ntohl(c.u.idctxt.ctxt_data4);
-		data[5] = ntohl(c.u.idctxt.ctxt_data5);
-	}
 	return ret;
 }
 
 /**
- *	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
- *	@adap: the adapter
- *	@cid: the context id
- *	@ctype: the context type
- *	@data: where to store the context data
- *
- *	Reads an SGE context directly, bypassing FW.  This is only for
- *	debugging when FW is unavailable.
- */
-int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
-		      u32 *data)
-{
-	int i, ret;
-
-	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
-	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
-	if (!ret)
-		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
-			*data++ = t4_read_reg(adap, i);
-	return ret;
-}
-
-/**
- *	t4_fw_hello - establish communication with FW
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@evt_mbox: mailbox to receive async FW events
- *	@master: specifies the caller's willingness to be the device master
+ *      t4_fw_hello - establish communication with FW
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @evt_mbox: mailbox to receive async FW events
+ *      @master: specifies the caller's willingness to be the device master
  *	@state: returns the current device state (if non-NULL)
  *
  *	Issues a command to establish communication with FW.  Returns either
@@ -4130,11 +6411,11 @@
 retry:
 	memset(&c, 0, sizeof(c));
 	INIT_CMD(c, HELLO, WRITE);
-	c.err_to_clearinit = htonl(
+	c.err_to_clearinit = cpu_to_be32(
 		V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
 		V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
-		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
-			M_FW_HELLO_CMD_MBMASTER) |
+		V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ?
+					mbox : M_FW_HELLO_CMD_MBMASTER) |
 		V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
 		V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
 		F_FW_HELLO_CMD_CLEARINIT);
@@ -4155,7 +6436,7 @@
 		return ret;
 	}
 
-	v = ntohl(c.err_to_clearinit);
+	v = be32_to_cpu(c.err_to_clearinit);
 	master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
 	if (state) {
 		if (v & F_FW_HELLO_CMD_ERR)
@@ -4267,7 +6548,7 @@
 
 	memset(&c, 0, sizeof(c));
 	INIT_CMD(c, RESET, WRITE);
-	c.val = htonl(reset);
+	c.val = cpu_to_be32(reset);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4300,8 +6581,8 @@
 
 		memset(&c, 0, sizeof(c));
 		INIT_CMD(c, RESET, WRITE);
-		c.val = htonl(F_PIORST | F_PIORSTMODE);
-		c.halt_pkd = htonl(F_FW_RESET_CMD_HALT);
+		c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
+		c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
 		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 	}
 
@@ -4320,7 +6601,8 @@
 	 */
 	if (ret == 0 || force) {
 		t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
-		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, F_PCIE_FW_HALT);
+		t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
+				 F_PCIE_FW_HALT);
 	}
 
 	/*
@@ -4418,14 +6700,21 @@
 		  const u8 *fw_data, unsigned int size, int force)
 {
 	const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+	unsigned int bootstrap =
+	    be32_to_cpu(fw_hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP;
 	int reset, ret;
 
-	ret = t4_fw_halt(adap, mbox, force);
-	if (ret < 0 && !force)
-		return ret;
+	if (!t4_fw_matches_chip(adap, fw_hdr))
+		return -EINVAL;
 
+	if (!bootstrap) {
+		ret = t4_fw_halt(adap, mbox, force);
+		if (ret < 0 && !force)
+			return ret;
+	}
+
 	ret = t4_load_fw(adap, fw_data, size);
-	if (ret < 0)
+	if (ret < 0 || bootstrap)
 		return ret;
 
 	/*
@@ -4436,7 +6725,7 @@
 	 * the newly loaded firmware will handle this right by checking
 	 * its header flags to see if it advertises the capability.
 	 */
-	reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+	reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
 	return t4_fw_restart(adap, mbox, reset);
 }
 
@@ -4458,7 +6747,7 @@
 }
 
 /**
- *	t4_query_params - query FW or device parameters
+ *	t4_query_params_rw - query FW or device parameters
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
  *	@pf: the PF
@@ -4466,13 +6755,14 @@
  *	@nparams: the number of parameters
  *	@params: the parameter names
  *	@val: the parameter values
+ *	@rw: Write and read flag
  *
  *	Reads the value of FW or device parameters.  Up to 7 parameters can be
  *	queried at once.
  */
-int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
-		    unsigned int vf, unsigned int nparams, const u32 *params,
-		    u32 *val)
+int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		       unsigned int vf, unsigned int nparams, const u32 *params,
+		       u32 *val, int rw)
 {
 	int i, ret;
 	struct fw_params_cmd c;
@@ -4482,22 +6772,74 @@
 		return -EINVAL;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_READ | V_FW_PARAMS_CMD_PFN(pf) |
-			    V_FW_PARAMS_CMD_VFN(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+				  F_FW_CMD_REQUEST | F_FW_CMD_READ |
+				  V_FW_PARAMS_CMD_PFN(pf) |
+				  V_FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
 
-	for (i = 0; i < nparams; i++, p += 2)
-		*p = htonl(*params++);
+	for (i = 0; i < nparams; i++) {
+		*p++ = cpu_to_be32(*params++);
+		if (rw)
+			*p = cpu_to_be32(*(val + i));
+		p++;
+	}
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0)
 		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
-			*val++ = ntohl(*p);
+			*val++ = be32_to_cpu(*p);
 	return ret;
 }
 
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val)
+{
+	return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
 /**
+ *      t4_set_params_timeout - sets FW or device parameters
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @pf: the PF
+ *      @vf: the VF
+ *      @nparams: the number of parameters
+ *      @params: the parameter names
+ *      @val: the parameter values
+ *      @timeout: the timeout time
+ *
+ *      Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *      specified at once.
+ */
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+			  unsigned int pf, unsigned int vf,
+			  unsigned int nparams, const u32 *params,
+			  const u32 *val, int timeout)
+{
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+				  F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				  V_FW_PARAMS_CMD_PFN(pf) |
+				  V_FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+	while (nparams--) {
+		*p++ = cpu_to_be32(*params++);
+		*p++ = cpu_to_be32(*val++);
+	}
+
+	return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
+}
+
+/**
  *	t4_set_params - sets FW or device parameters
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -4514,24 +6856,8 @@
 		  unsigned int vf, unsigned int nparams, const u32 *params,
 		  const u32 *val)
 {
-	struct fw_params_cmd c;
-	__be32 *p = &c.param[0].mnem;
-
-	if (nparams > 7)
-		return -EINVAL;
-
-	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PARAMS_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_WRITE | V_FW_PARAMS_CMD_PFN(pf) |
-			    V_FW_PARAMS_CMD_VFN(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
-
-	while (nparams--) {
-		*p++ = htonl(*params++);
-		*p++ = htonl(*val++);
-	}
-
-	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+	return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+				     FW_CMD_MAX_TIMEOUT);
 }
 
 /**
@@ -4564,18 +6890,19 @@
 	struct fw_pfvf_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
-			    V_FW_PFVF_CMD_VFN(vf));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	c.niqflint_niq = htonl(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
-			       V_FW_PFVF_CMD_NIQ(rxq));
-	c.type_to_neq = htonl(V_FW_PFVF_CMD_CMASK(cmask) |
-			      V_FW_PFVF_CMD_PMASK(pmask) |
-			      V_FW_PFVF_CMD_NEQ(txq));
-	c.tc_to_nexactf = htonl(V_FW_PFVF_CMD_TC(tc) | V_FW_PFVF_CMD_NVI(vi) |
-				V_FW_PFVF_CMD_NEXACTF(nexact));
-	c.r_caps_to_nethctrl = htonl(V_FW_PFVF_CMD_R_CAPS(rcaps) |
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) | F_FW_CMD_REQUEST |
+				  F_FW_CMD_WRITE | V_FW_PFVF_CMD_PFN(pf) |
+				  V_FW_PFVF_CMD_VFN(vf));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.niqflint_niq = cpu_to_be32(V_FW_PFVF_CMD_NIQFLINT(rxqi) |
+				     V_FW_PFVF_CMD_NIQ(rxq));
+	c.type_to_neq = cpu_to_be32(V_FW_PFVF_CMD_CMASK(cmask) |
+				    V_FW_PFVF_CMD_PMASK(pmask) |
+				    V_FW_PFVF_CMD_NEQ(txq));
+	c.tc_to_nexactf = cpu_to_be32(V_FW_PFVF_CMD_TC(tc) |
+				      V_FW_PFVF_CMD_NVI(vi) |
+				      V_FW_PFVF_CMD_NEXACTF(nexact));
+	c.r_caps_to_nethctrl = cpu_to_be32(V_FW_PFVF_CMD_R_CAPS(rcaps) |
 				     V_FW_PFVF_CMD_WX_CAPS(wxcaps) |
 				     V_FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -4596,6 +6923,7 @@
  *
  *	Allocates a virtual interface for the given physical port.  If @mac is
  *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ *	If @rss_size is %NULL the VI is not assigned any RSS slice by FW.
  *	@mac should be large enough to hold @nmac Ethernet addresses, they are
  *	stored consecutively so the space needed is @nmac * 6 bytes.
  *	Returns a negative error number or the non-negative VI id.
@@ -4602,7 +6930,7 @@
  */
 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
 		     unsigned int port, unsigned int pf, unsigned int vf,
-		     unsigned int nmac, u8 *mac, unsigned int *rss_size,
+		     unsigned int nmac, u8 *mac, u16 *rss_size,
 		     unsigned int portfunc, unsigned int idstype)
 {
 	int ret;
@@ -4609,14 +6937,16 @@
 	struct fw_vi_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
-			    V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
-	c.type_to_viid = htons(V_FW_VI_CMD_TYPE(idstype) |
-			       V_FW_VI_CMD_FUNC(portfunc));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+				  F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+				  V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
+				     V_FW_VI_CMD_FUNC(portfunc));
 	c.portid_pkd = V_FW_VI_CMD_PORTID(port);
 	c.nmac = nmac - 1;
+	if(!rss_size)
+		c.norss_rsssize = F_FW_VI_CMD_NORSS;
 
 	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret)
@@ -4636,20 +6966,20 @@
 		}
 	}
 	if (rss_size)
-		*rss_size = G_FW_VI_CMD_RSSSIZE(ntohs(c.norss_rsssize));
-	return G_FW_VI_CMD_VIID(htons(c.type_to_viid));
+		*rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
+	return G_FW_VI_CMD_VIID(be16_to_cpu(c.type_to_viid));
 }
 
 /**
- *	t4_alloc_vi - allocate an [Ethernet Function] virtual interface
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@port: physical port associated with the VI
- *	@pf: the PF owning the VI
- *	@vf: the VF owning the VI
- *	@nmac: number of MAC addresses needed (1 to 5)
- *	@mac: the MAC addresses of the VI
- *	@rss_size: size of RSS table slice associated with this VI
+ *      t4_alloc_vi - allocate an [Ethernet Function] virtual interface
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @port: physical port associated with the VI
+ *      @pf: the PF owning the VI
+ *      @vf: the VF owning the VI
+ *      @nmac: number of MAC addresses needed (1 to 5)
+ *      @mac: the MAC addresses of the VI
+ *      @rss_size: size of RSS table slice associated with this VI
  *
  *	backwards compatible and convieniance routine to allocate a Virtual
  *	Interface with a Ethernet Port Application Function and Intrustion
@@ -4657,7 +6987,7 @@
  */
 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
 		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
-		unsigned int *rss_size)
+		u16 *rss_size)
 {
 	return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
 				FW_VI_FUNC_ETH, 0);
@@ -4664,14 +6994,14 @@
 }
 
 /**
- *	t4_free_vi - free a virtual interface
- *	@adap: the adapter
- *	@mbox: mailbox to use for the FW command
- *	@pf: the PF owning the VI
- *	@vf: the VF owning the VI
- *	@viid: virtual interface identifiler
+ * 	t4_free_vi - free a virtual interface
+ * 	@adap: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ * 	@pf: the PF owning the VI
+ * 	@vf: the VF owning the VI
+ * 	@viid: virtual interface identifiler
  *
- *	Free a previously allocated virtual interface.
+ * 	Free a previously allocated virtual interface.
  */
 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
 	       unsigned int vf, unsigned int viid)
@@ -4679,13 +7009,13 @@
 	struct fw_vi_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_VI_CMD) |
-			    F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC |
-			    V_FW_VI_CMD_PFN(pf) |
-			    V_FW_VI_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_VI_CMD_FREE | FW_LEN16(c));
-	c.type_to_viid = htons(V_FW_VI_CMD_VIID(viid));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+				  F_FW_CMD_REQUEST |
+				  F_FW_CMD_EXEC |
+				  V_FW_VI_CMD_PFN(pf) |
+				  V_FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
+	c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
 
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 }
@@ -4699,7 +7029,7 @@
  *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
  *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
- *	@vlanex: 1 to enable HVLAN extraction, 0 to disable it, -1 no change
+ *	@vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
  *	@sleep_ok: if true we may sleep while awaiting command completion
  *
  *	Sets Rx properties of a virtual interface.
@@ -4723,14 +7053,16 @@
 		vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | F_FW_CMD_REQUEST |
-			     F_FW_CMD_WRITE | V_FW_VI_RXMODE_CMD_VIID(viid));
-	c.retval_len16 = htonl(FW_LEN16(c));
-	c.mtu_to_vlanexen = htonl(V_FW_VI_RXMODE_CMD_MTU(mtu) |
-				  V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
-				  V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
-				  V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
-				  V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				   V_FW_VI_RXMODE_CMD_VIID(viid));
+	c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+	c.mtu_to_vlanexen =
+		cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+			    V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+			    V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+			    V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+			    V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
@@ -4763,9 +7095,10 @@
 	int offset, ret = 0;
 	struct fw_vi_mac_cmd c;
 	unsigned int nfilters = 0;
+	unsigned int max_naddr = adap->chip_params->mps_tcam_size;
 	unsigned int rem = naddr;
 
-	if (naddr > NUM_MPS_CLS_SRAM_L_INSTANCES)
+	if (naddr > max_naddr)
 		return -EINVAL;
 
 	for (offset = 0; offset < naddr ; /**/) {
@@ -4778,18 +7111,18 @@
 		int i;
 
 		memset(&c, 0, sizeof(c));
-		c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) |
-				     F_FW_CMD_REQUEST |
-				     F_FW_CMD_WRITE |
-				     V_FW_CMD_EXEC(free) |
-				     V_FW_VI_MAC_CMD_VIID(viid));
-		c.freemacs_to_len16 = htonl(V_FW_VI_MAC_CMD_FREEMACS(free) |
-					    V_FW_CMD_LEN16(len16));
+		c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+					   F_FW_CMD_REQUEST |
+					   F_FW_CMD_WRITE |
+					   V_FW_CMD_EXEC(free) |
+					   V_FW_VI_MAC_CMD_VIID(viid));
+		c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(free) |
+						  V_FW_CMD_LEN16(len16));
 
 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
-			p->valid_to_idx = htons(
-				F_FW_VI_MAC_CMD_VALID |
-				V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+			p->valid_to_idx =
+				cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
+					    V_FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
 			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
 		}
 
@@ -4803,13 +7136,14 @@
 			break;
 
 		for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
-			u16 index = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
+			u16 index = G_FW_VI_MAC_CMD_IDX(
+						be16_to_cpu(p->valid_to_idx));
 
 			if (idx)
-				idx[offset+i] = (index >= NUM_MPS_CLS_SRAM_L_INSTANCES
+				idx[offset+i] = (index >=  max_naddr
 						 ? 0xffff
 						 : index);
-			if (index < NUM_MPS_CLS_SRAM_L_INSTANCES)
+			if (index < max_naddr)
 				nfilters++;
 			else if (hash)
 				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -4821,7 +7155,7 @@
 	}
 
 	if (ret == 0 || ret == -FW_ENOMEM)
-		ret = nfilters; 
+		ret = nfilters;
 	return ret;
 }
 
@@ -4853,24 +7187,26 @@
 	int ret, mode;
 	struct fw_vi_mac_cmd c;
 	struct fw_vi_mac_exact *p = c.u.exact;
+	unsigned int max_mac_addr = adap->chip_params->mps_tcam_size;
 
-	if (idx < 0)                             /* new allocation */
+	if (idx < 0)		/* new allocation */
 		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
 	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
-			     F_FW_CMD_WRITE | V_FW_VI_MAC_CMD_VIID(viid));
-	c.freemacs_to_len16 = htonl(V_FW_CMD_LEN16(1));
-	p->valid_to_idx = htons(F_FW_VI_MAC_CMD_VALID |
-				V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
-				V_FW_VI_MAC_CMD_IDX(idx));
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				   V_FW_VI_MAC_CMD_VIID(viid));
+	c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
+	p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
+				      V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+				      V_FW_VI_MAC_CMD_IDX(idx));
 	memcpy(p->macaddr, addr, sizeof(p->macaddr));
 
-	ret = t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), &c);
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
 	if (ret == 0) {
-		ret = G_FW_VI_MAC_CMD_IDX(ntohs(p->valid_to_idx));
-		if (ret >= NUM_MPS_CLS_SRAM_L_INSTANCES)
+		ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
+		if (ret >= max_mac_addr)
 			ret = -ENOMEM;
 	}
 	return ret;
@@ -4891,18 +7227,48 @@
 		     bool ucast, u64 vec, bool sleep_ok)
 {
 	struct fw_vi_mac_cmd c;
+	u32 val;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_MAC_CMD) | F_FW_CMD_REQUEST |
-			     F_FW_CMD_WRITE | V_FW_VI_ENABLE_CMD_VIID(viid));
-	c.freemacs_to_len16 = htonl(F_FW_VI_MAC_CMD_HASHVECEN |
-				    V_FW_VI_MAC_CMD_HASHUNIEN(ucast) |
-				    V_FW_CMD_LEN16(1));
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+				   V_FW_VI_ENABLE_CMD_VIID(viid));
+	val = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_HASHVEC) |
+	      V_FW_VI_MAC_CMD_HASHUNIEN(ucast) | V_FW_CMD_LEN16(1);
+	c.freemacs_to_len16 = cpu_to_be32(val);
 	c.u.hash.hashvec = cpu_to_be64(vec);
 	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
 /**
+ *      t4_enable_vi_params - enable/disable a virtual interface
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @viid: the VI id
+ *      @rx_en: 1=enable Rx, 0=disable Rx
+ *      @tx_en: 1=enable Tx, 0=disable Tx
+ *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ *      Enables/disables a virtual interface.  Note that setting DCB Enable
+ *      only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+			unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+	struct fw_vi_enable_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				   V_FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+				     V_FW_VI_ENABLE_CMD_EEN(tx_en) |
+				     V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
+				     FW_LEN16(c));
+	return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
  *	t4_enable_vi - enable/disable a virtual interface
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
@@ -4910,19 +7276,13 @@
  *	@rx_en: 1=enable Rx, 0=disable Rx
  *	@tx_en: 1=enable Tx, 0=disable Tx
  *
- *	Enables/disables a virtual interface.
+ *	Enables/disables a virtual interface.  Note that setting DCB Enable
+ *	only makes sense when enabling a Virtual Interface ...
  */
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
 		 bool rx_en, bool tx_en)
 {
-	struct fw_vi_enable_cmd c;
-
-	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
-			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
-	c.ien_to_len16 = htonl(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
-			       V_FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
-	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+	return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
 }
 
 /**
@@ -4940,41 +7300,44 @@
 	struct fw_vi_enable_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_viid = htonl(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | F_FW_CMD_REQUEST |
-			     F_FW_CMD_EXEC | V_FW_VI_ENABLE_CMD_VIID(viid));
-	c.ien_to_len16 = htonl(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
-	c.blinkdur = htons(nblinks);
+	c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
+				   F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				   V_FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = cpu_to_be32(F_FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+	c.blinkdur = cpu_to_be16(nblinks);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
 /**
- *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ *	t4_iq_stop - stop an ingress queue and its FLs
  *	@adap: the adapter
  *	@mbox: mailbox to use for the FW command
- *	@start: %true to enable the queues, %false to disable them
  *	@pf: the PF owning the queues
  *	@vf: the VF owning the queues
+ *	@iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
  *	@iqid: ingress queue id
  *	@fl0id: FL0 queue id or 0xffff if no attached FL0
  *	@fl1id: FL1 queue id or 0xffff if no attached FL1
  *
- *	Starts or stops an ingress queue and its associated FLs, if any.
+ *	Stops an ingress queue and its associated FLs, if any.  This causes
+ *	any current or future data/messages destined for these queues to be
+ *	tossed.
  */
-int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
-		     unsigned int pf, unsigned int vf, unsigned int iqid,
-		     unsigned int fl0id, unsigned int fl1id)
+int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id)
 {
 	struct fw_iq_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
-			    V_FW_IQ_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(V_FW_IQ_CMD_IQSTART(start) |
-				 V_FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
-	c.iqid = htons(iqid);
-	c.fl0id = htons(fl0id);
-	c.fl1id = htons(fl1id);
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+				  V_FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_IQSTOP | FW_LEN16(c));
+	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
+	c.iqid = cpu_to_be16(iqid);
+	c.fl0id = cpu_to_be16(fl0id);
+	c.fl1id = cpu_to_be16(fl1id);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -4998,14 +7361,14 @@
 	struct fw_iq_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
-			    V_FW_IQ_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_FREE | FW_LEN16(c));
-	c.type_to_iqandstindex = htonl(V_FW_IQ_CMD_TYPE(iqtype));
-	c.iqid = htons(iqid);
-	c.fl0id = htons(fl0id);
-	c.fl1id = htons(fl1id);
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+				  F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
+				  V_FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+	c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
+	c.iqid = cpu_to_be16(iqid);
+	c.fl0id = cpu_to_be16(fl0id);
+	c.fl1id = cpu_to_be16(fl1id);
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -5025,11 +7388,12 @@
 	struct fw_eq_eth_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(pf) |
-			    V_FW_EQ_ETH_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
-	c.eqid_pkd = htonl(V_FW_EQ_ETH_CMD_EQID(eqid));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
+				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				  V_FW_EQ_ETH_CMD_PFN(pf) |
+				  V_FW_EQ_ETH_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -5049,11 +7413,12 @@
 	struct fw_eq_ctrl_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(pf) |
-			    V_FW_EQ_CTRL_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
-	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_EQID(eqid));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				  V_FW_EQ_CTRL_CMD_PFN(pf) |
+				  V_FW_EQ_CTRL_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+	c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
@@ -5073,15 +7438,41 @@
 	struct fw_eq_ofld_cmd c;
 
 	memset(&c, 0, sizeof(c));
-	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
-			    F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(pf) |
-			    V_FW_EQ_OFLD_CMD_VFN(vf));
-	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
-	c.eqid_pkd = htonl(V_FW_EQ_OFLD_CMD_EQID(eqid));
+	c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_OFLD_CMD) |
+				  F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+				  V_FW_EQ_OFLD_CMD_PFN(pf) |
+				  V_FW_EQ_OFLD_CMD_VFN(vf));
+	c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = cpu_to_be32(V_FW_EQ_OFLD_CMD_EQID(eqid));
 	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
 /**
+ *	t4_link_down_rc_str - return a string for a Link Down Reason Code
+ *	@link_down_rc: Link Down Reason Code
+ *
+ *	Returns a string representation of the Link Down Reason Code.
+ */
+const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+	static const char *reason[] = {
+		"Link Down",
+		"Remote Fault",
+		"Auto-negotiation Failure",
+		"Reserved3",
+		"Insufficient Airflow",
+		"Unable To Determine Reason",
+		"No RX Signal Detected",
+		"Reserved7",
+	};
+
+	if (link_down_rc >= ARRAY_SIZE(reason))
+		return "Bad Reason Code";
+
+	return reason[link_down_rc];
+}
+
+/**
  *	t4_handle_fw_rpl - process a FW reply message
  *	@adap: the adapter
  *	@rpl: start of the FW message
@@ -5092,15 +7483,16 @@
 {
 	u8 opcode = *(const u8 *)rpl;
 	const struct fw_port_cmd *p = (const void *)rpl;
-	unsigned int action = G_FW_PORT_CMD_ACTION(ntohl(p->action_to_len16));
+	unsigned int action =
+			G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
 
 	if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
 		/* link/module state change message */
 		int speed = 0, fc = 0, i;
-		int chan = G_FW_PORT_CMD_PORTID(ntohl(p->op_to_portid));
+		int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
 		struct port_info *pi = NULL;
 		struct link_config *lc;
-		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+		u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
 		int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
 		u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
 
@@ -5109,11 +7501,17 @@
 		if (stat & F_FW_PORT_CMD_TXPAUSE)
 			fc |= PAUSE_TX;
 		if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
-			speed = SPEED_100;
+			speed = 100;
 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
-			speed = SPEED_1000;
+			speed = 1000;
 		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
-			speed = SPEED_10000;
+			speed = 10000;
+		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+			speed = 25000;
+		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+			speed = 40000;
+		else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+			speed = 100000;
 
 		for_each_port(adap, i) {
 			pi = adap2pinfo(adap, i);
@@ -5122,20 +7520,23 @@
 		}
 		lc = &pi->link_cfg;
 
+		if (mod != pi->mod_type) {
+			pi->mod_type = mod;
+			t4_os_portmod_changed(adap, i);
+		}
 		if (link_ok != lc->link_ok || speed != lc->speed ||
 		    fc != lc->fc) {                    /* something changed */
+			if (!link_ok && lc->link_ok)
+				lc->link_down_rc = G_FW_PORT_CMD_LINKDNRC(stat);
 			lc->link_ok = link_ok;
 			lc->speed = speed;
 			lc->fc = fc;
+			lc->supported = be16_to_cpu(p->u.info.pcap);
+			lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
 			t4_os_link_changed(adap, i, link_ok);
 		}
-		if (mod != pi->mod_type) {
-			pi->mod_type = mod;
-			t4_os_portmod_changed(adap, i);
-		}
 	} else {
-		CH_WARN_RATELIMIT(adap,
-		    "Unknown firmware reply 0x%x (0x%x)\n", opcode, action);
+		CH_WARN_RATELIMIT(adap, "Unknown firmware reply %d\n", opcode);
 		return -EINVAL;
 	}
 	return 0;
@@ -5149,7 +7550,7 @@
  *	Determines a card's PCI mode and associated parameters, such as speed
  *	and width.
  */
-static void __devinit get_pci_mode(struct adapter *adapter,
+static void get_pci_mode(struct adapter *adapter,
 				   struct pci_params *p)
 {
 	u16 val;
@@ -5166,18 +7567,34 @@
 /**
  *	init_link_config - initialize a link's SW state
  *	@lc: structure holding the link state
- *	@caps: link capabilities
+ *	@pcaps: supported link capabilities
+ *	@acaps: advertised link capabilities
  *
  *	Initializes the SW state maintained for each link, including the link's
  *	capabilities and default speed/flow-control/autonegotiation settings.
  */
-static void __devinit init_link_config(struct link_config *lc,
-				       unsigned int caps)
+static void init_link_config(struct link_config *lc, unsigned int pcaps,
+    			     unsigned int acaps)
 {
-	lc->supported = caps;
+	unsigned int fec;
+
+	lc->supported = pcaps;
+	lc->lp_advertising = 0;
 	lc->requested_speed = 0;
 	lc->speed = 0;
 	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	lc->link_ok = 0;
+	lc->link_down_rc = 255;
+
+	fec = 0;
+	if (acaps & FW_PORT_CAP_FEC_RS)
+		fec |= FEC_RS;
+	if (acaps & FW_PORT_CAP_FEC_BASER_RS)
+		fec |= FEC_BASER_RS;
+	if (acaps & FW_PORT_CAP_FEC_RESERVED)
+		fec |= FEC_RESERVED;
+	lc->requested_fec = lc->fec = fec;
+
 	if (lc->supported & FW_PORT_CAP_ANEG) {
 		lc->advertising = lc->supported & ADVERT_MASK;
 		lc->autoneg = AUTONEG_ENABLE;
@@ -5188,23 +7605,22 @@
 	}
 }
 
-static int __devinit wait_dev_ready(struct adapter *adap)
+struct flash_desc {
+	u32 vendor_and_model_id;
+	u32 size_mb;
+};
+
+int t4_get_flash_params(struct adapter *adapter)
 {
-	u32 whoami;
+	/*
+	 * Table for non-Numonix supported flash parts.  Numonix parts are left
+	 * to the preexisting well-tested code.  All flash parts have 64KB
+	 * sectors.
+	 */
+	static struct flash_desc supported_flash[] = {
+		{ 0x150201, 4 << 20 },       /* Spansion 4MB S25FL032P */
+	};
 
-	whoami = t4_read_reg(adap, A_PL_WHOAMI);
-
-	if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
-		return 0;
-
-	msleep(500);
-	whoami = t4_read_reg(adap, A_PL_WHOAMI);
-	return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
-		? 0 : -EIO);
-}
-
-static int __devinit get_flash_params(struct adapter *adapter)
-{
 	int ret;
 	u32 info = 0;
 
@@ -5211,13 +7627,21 @@
 	ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
 	if (!ret)
 		ret = sf1_read(adapter, 3, 0, 1, &info);
-	t4_write_reg(adapter, A_SF_OP, 0);               /* unlock SF */
+	t4_write_reg(adapter, A_SF_OP, 0);	/* unlock SF */
 	if (ret < 0)
 		return ret;
 
-	if ((info & 0xff) != 0x20)             /* not a Numonix flash */
+	for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
+		if (supported_flash[ret].vendor_and_model_id == info) {
+			adapter->params.sf_size = supported_flash[ret].size_mb;
+			adapter->params.sf_nsec =
+				adapter->params.sf_size / SF_SEC_SIZE;
+			return 0;
+		}
+
+	if ((info & 0xff) != 0x20)		/* not a Numonix flash */
 		return -EINVAL;
-	info >>= 16;                           /* log2 of size */
+	info >>= 16;				/* log2 of size */
 	if (info >= 0x14 && info < 0x18)
 		adapter->params.sf_nsec = 1 << (info - 16);
 	else if (info == 0x18)
@@ -5225,10 +7649,20 @@
 	else
 		return -EINVAL;
 	adapter->params.sf_size = 1 << info;
+
+	/*
+	 * We should ~probably~ reject adapters with FLASHes which are too
+	 * small but we have some legacy FPGAs with small FLASHes that we'd
+	 * still like to use.  So instead we emit a scary message ...
+	 */
+	if (adapter->params.sf_size < FLASH_MIN_SIZE)
+		CH_WARN(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
+			adapter->params.sf_size, FLASH_MIN_SIZE);
+
 	return 0;
 }
 
-static void __devinit set_pcie_completion_timeout(struct adapter *adapter,
+static void set_pcie_completion_timeout(struct adapter *adapter,
 						  u8 range)
 {
 	u16 val;
@@ -5243,48 +7677,108 @@
 	}
 }
 
+const struct chip_params *t4_get_chip_params(int chipid)
+{
+	static const struct chip_params chip_params[] = {
+		{
+			/* T4 */
+			.nchan = NCHAN,
+			.pm_stats_cnt = PM_NSTATS,
+			.cng_ch_bits_log = 2,
+			.nsched_cls = 15,
+			.cim_num_obq = CIM_NUM_OBQ,
+			.mps_rplc_size = 128,
+			.vfcount = 128,
+			.sge_fl_db = F_DBPRIO,
+			.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
+		},
+		{
+			/* T5 */
+			.nchan = NCHAN,
+			.pm_stats_cnt = PM_NSTATS,
+			.cng_ch_bits_log = 2,
+			.nsched_cls = 16,
+			.cim_num_obq = CIM_NUM_OBQ_T5,
+			.mps_rplc_size = 128,
+			.vfcount = 128,
+			.sge_fl_db = F_DBPRIO | F_DBTYPE,
+			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+		},
+		{
+			/* T6 */
+			.nchan = T6_NCHAN,
+			.pm_stats_cnt = T6_PM_NSTATS,
+			.cng_ch_bits_log = 3,
+			.nsched_cls = 16,
+			.cim_num_obq = CIM_NUM_OBQ_T5,
+			.mps_rplc_size = 256,
+			.vfcount = 256,
+			.sge_fl_db = 0,
+			.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+		},
+	};
+
+	chipid -= CHELSIO_T4;
+	if (chipid < 0 || chipid >= ARRAY_SIZE(chip_params))
+		return NULL;
+
+	return &chip_params[chipid];
+}
+
 /**
  *	t4_prep_adapter - prepare SW and HW for operation
  *	@adapter: the adapter
- *	@reset: if true perform a HW reset
+ *	@buf: temporary space of at least VPD_LEN size provided by the caller.
  *
  *	Initialize adapter SW state for the various HW modules, set initial
  *	values for some adapter tunables, take PHYs out of reset, and
  *	initialize the MDIO interface.
  */
-int __devinit t4_prep_adapter(struct adapter *adapter)
+int t4_prep_adapter(struct adapter *adapter, u8 *buf)
 {
 	int ret;
+	uint16_t device_id;
+	uint32_t pl_rev;
 
-	ret = wait_dev_ready(adapter);
-	if (ret < 0)
-		return ret;
-
 	get_pci_mode(adapter, &adapter->params.pci);
 
-	adapter->params.rev = t4_read_reg(adapter, A_PL_REV);
-	/* T4A1 chip is no longer supported */
-	if (adapter->params.rev == 1) {
-		CH_ALERT(adapter, "T4 rev 1 chip is no longer supported\n");
+	pl_rev = t4_read_reg(adapter, A_PL_REV);
+	adapter->params.chipid = G_CHIPID(pl_rev);
+	adapter->params.rev = G_REV(pl_rev);
+	if (adapter->params.chipid == 0) {
+		/* T4 did not have chipid in PL_REV (T5 onwards do) */
+		adapter->params.chipid = CHELSIO_T4;
+
+		/* T4A1 chip is not supported */
+		if (adapter->params.rev == 1) {
+			CH_ALERT(adapter, "T4 rev 1 chip is not supported.\n");
+			return -EINVAL;
+		}
+	}
+
+	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
+	if (adapter->chip_params == NULL)
 		return -EINVAL;
-	}
+
 	adapter->params.pci.vpd_cap_addr =
-		t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+	    t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
 
-	ret = get_flash_params(adapter);
+	ret = t4_get_flash_params(adapter);
 	if (ret < 0)
 		return ret;
 
-	ret = get_vpd_params(adapter, &adapter->params.vpd);
+	ret = get_vpd_params(adapter, &adapter->params.vpd, buf);
 	if (ret < 0)
 		return ret;
 
-	if (t4_read_reg(adapter, A_PCIE_REVISION) != 0) {
+	/* Cards with real ASICs have the chipid in the PCIe device id */
+	t4_os_pci_read_cfg2(adapter, PCI_DEVICE_ID, &device_id);
+	if (device_id >> 12 == chip_id(adapter))
+		adapter->params.cim_la_size = CIMLA_SIZE;
+	else {
 		/* FPGA */
+		adapter->params.fpga = 1;
 		adapter->params.cim_la_size = 2 * CIMLA_SIZE;
-	} else {
-		/* ASIC */
-		adapter->params.cim_la_size = CIMLA_SIZE;
 	}
 
 	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
@@ -5301,13 +7795,341 @@
 	return 0;
 }
 
-int __devinit t4_port_init(struct port_info *p, int mbox, int pf, int vf)
+/**
+ *	t4_shutdown_adapter - shut down adapter, host & wire
+ *	@adapter: the adapter
+ *
+ *	Perform an emergency shutdown of the adapter and stop it from
+ *	continuing any further communication on the ports or DMA to the
+ *	host.  This is typically used when the adapter and/or firmware
+ *	have crashed and we want to prevent any further accidental
+ *	communication with the rest of the world.  This will also force
+ *	the port Link Status to go down -- if register writes work --
+ *	which should help our peers figure out that we're down.
+ */
+int t4_shutdown_adapter(struct adapter *adapter)
 {
+	int port;
+
+	t4_intr_disable(adapter);
+	t4_write_reg(adapter, A_DBG_GPIO_EN, 0);
+	for_each_port(adapter, port) {
+		u32 a_port_cfg = PORT_REG(port,
+					  is_t4(adapter)
+					  ? A_XGMAC_PORT_CFG
+					  : A_MAC_PORT_CFG);
+
+		t4_write_reg(adapter, a_port_cfg,
+			     t4_read_reg(adapter, a_port_cfg)
+			     & ~V_SIGNAL_DET(1));
+	}
+	t4_set_reg_field(adapter, A_SGE_CONTROL, F_GLOBALENABLE, 0);
+
+	return 0;
+}
+
+/**
+ *	t4_init_devlog_params - initialize adapter->params.devlog
+ *	@adap: the adapter
+ *	@fw_attach: whether we can talk to the firmware
+ *
+ *	Initialize various fields of the adapter's Firmware Device Log
+ *	Parameters structure.
+ */
+int t4_init_devlog_params(struct adapter *adap, int fw_attach)
+{
+	struct devlog_params *dparams = &adap->params.devlog;
+	u32 pf_dparams;
+	unsigned int devlog_meminfo;
+	struct fw_devlog_cmd devlog_cmd;
+	int ret;
+
+	/* If we're dealing with newer firmware, the Device Log Paramerters
+	 * are stored in a designated register which allows us to access the
+	 * Device Log even if we can't talk to the firmware.
+	 */
+	pf_dparams =
+		t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
+	if (pf_dparams) {
+		unsigned int nentries, nentries128;
+
+		dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
+		dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
+
+		nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
+		nentries = (nentries128 + 1) * 128;
+		dparams->size = nentries * sizeof(struct fw_devlog_e);
+
+		return 0;
+	}
+
+	/*
+	 * For any failing returns ...
+	 */
+	memset(dparams, 0, sizeof *dparams);
+
+	/*
+	 * If we can't talk to the firmware, there's really nothing we can do
+	 * at this point.
+	 */
+	if (!fw_attach)
+		return -ENXIO;
+
+	/* Otherwise, ask the firmware for it's Device Log Parameters.
+	 */
+	memset(&devlog_cmd, 0, sizeof devlog_cmd);
+	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
+					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
+	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
+	ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
+			 &devlog_cmd);
+	if (ret)
+		return ret;
+
+	devlog_meminfo =
+		be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
+	dparams->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(devlog_meminfo);
+	dparams->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(devlog_meminfo) << 4;
+	dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
+
+	return 0;
+}
+
+/**
+ *	t4_init_sge_params - initialize adap->params.sge
+ *	@adapter: the adapter
+ *
+ *	Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+	u32 r;
+	struct sge_params *sp = &adapter->params.sge;
+	unsigned i, tscale = 1;
+
+	r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
+	sp->counter_val[0] = G_THRESHOLD_0(r);
+	sp->counter_val[1] = G_THRESHOLD_1(r);
+	sp->counter_val[2] = G_THRESHOLD_2(r);
+	sp->counter_val[3] = G_THRESHOLD_3(r);
+
+	if (chip_id(adapter) >= CHELSIO_T6) {
+		r = t4_read_reg(adapter, A_SGE_ITP_CONTROL);
+		tscale = G_TSCALE(r);
+		if (tscale == 0)
+			tscale = 1;
+		else
+			tscale += 2;
+	}
+
+	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_0_AND_1);
+	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(r)) * tscale;
+	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(r)) * tscale;
+	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_2_AND_3);
+	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(r)) * tscale;
+	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(r)) * tscale;
+	r = t4_read_reg(adapter, A_SGE_TIMER_VALUE_4_AND_5);
+	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(r)) * tscale;
+	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(r)) * tscale;
+
+	r = t4_read_reg(adapter, A_SGE_CONM_CTRL);
+	sp->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1;
+	if (is_t4(adapter))
+		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
+	else if (is_t5(adapter))
+		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1;
+	else
+		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(r) * 2 + 1;
+
+	/* egress queues: log2 of # of doorbells per BAR2 page */
+	r = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
+	r >>= S_QUEUESPERPAGEPF0 +
+	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
+	sp->eq_s_qpp = r & M_QUEUESPERPAGEPF0;
+
+	/* ingress queues: log2 of # of doorbells per BAR2 page */
+	r = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
+	r >>= S_QUEUESPERPAGEPF0 +
+	    (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf;
+	sp->iq_s_qpp = r & M_QUEUESPERPAGEPF0;
+
+	r = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
+	r >>= S_HOSTPAGESIZEPF0 +
+	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adapter->pf;
+	sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
+
+	r = t4_read_reg(adapter, A_SGE_CONTROL);
+	sp->sge_control = r;
+	sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
+	sp->fl_pktshift = G_PKTSHIFT(r);
+	if (chip_id(adapter) <= CHELSIO_T5) {
+		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
+		    X_INGPADBOUNDARY_SHIFT);
+	} else {
+		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) +
+		    X_T6_INGPADBOUNDARY_SHIFT);
+	}
+	if (is_t4(adapter))
+		sp->pack_boundary = sp->pad_boundary;
+	else {
+		r = t4_read_reg(adapter, A_SGE_CONTROL2);
+		if (G_INGPACKBOUNDARY(r) == 0)
+			sp->pack_boundary = 16;
+		else
+			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
+	}
+	for (i = 0; i < SGE_FLBUF_SIZES; i++)
+		sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
+		    A_SGE_FL_BUFFER_SIZE0 + (4 * i));
+
+	return 0;
+}
+
+/*
+ * Read and cache the adapter's compressed filter mode and ingress config.
+ */
+static void read_filter_mode_and_ingress_config(struct adapter *adap)
+{
+	struct tp_params *tpp = &adap->params.tp;
+
+	if (t4_use_ldst(adap)) {
+		t4_fw_tp_pio_rw(adap, &tpp->vlan_pri_map, 1,
+				A_TP_VLAN_PRI_MAP, 1);
+		t4_fw_tp_pio_rw(adap, &tpp->ingress_config, 1,
+				A_TP_INGRESS_CONFIG, 1);
+	} else {
+		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 &tpp->vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
+		t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+				 &tpp->ingress_config, 1, A_TP_INGRESS_CONFIG);
+	}
+
+	/*
+	 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+	 * shift positions of several elements of the Compressed Filter Tuple
+	 * for this adapter which we need frequently ...
+	 */
+	tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
+	tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
+	tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+	tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+	tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
+	tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+	tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
+	tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
+	tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
+	tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+
+	/*
+	 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+	 * represents the presense of an Outer VLAN instead of a VNIC ID.
+	 */
+	if ((tpp->ingress_config & F_VNIC) == 0)
+		tpp->vnic_shift = -1;
+}
+
+/**
+ *      t4_init_tp_params - initialize adap->params.tp
+ *      @adap: the adapter
+ *
+ *      Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+	int chan;
+	u32 v;
+	struct tp_params *tpp = &adap->params.tp;
+
+	v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
+	tpp->tre = G_TIMERRESOLUTION(v);
+	tpp->dack_re = G_DELAYEDACKRESOLUTION(v);
+
+	/* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+	for (chan = 0; chan < MAX_NCHAN; chan++)
+		tpp->tx_modq[chan] = chan;
+
+	read_filter_mode_and_ingress_config(adap);
+
+	/*
+	 * Cache a mask of the bits that represent the error vector portion of
+	 * rx_pkt.err_vec.  T6+ can use a compressed error vector to make room
+	 * for information about outer encapsulation (GENEVE/VXLAN/NVGRE).
+	 */
+	tpp->err_vec_mask = htobe16(0xffff);
+	if (chip_id(adap) > CHELSIO_T5) {
+		v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+		if (v & F_CRXPKTENC) {
+			tpp->err_vec_mask =
+			    htobe16(V_T6_COMPR_RXERR_VEC(M_T6_COMPR_RXERR_VEC));
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *      t4_filter_field_shift - calculate filter field shift
+ *      @adap: the adapter
+ *      @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ *      Return the shift position of a filter field within the Compressed
+ *      Filter Tuple.  The filter field is specified via its selection bit
+ *      within TP_VLAN_PRI_MAL (filter mode).  E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+	unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+	unsigned int sel;
+	int field_shift;
+
+	if ((filter_mode & filter_sel) == 0)
+		return -1;
+
+	for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+		switch (filter_mode & sel) {
+		case F_FCOE:
+			field_shift += W_FT_FCOE;
+			break;
+		case F_PORT:
+			field_shift += W_FT_PORT;
+			break;
+		case F_VNIC_ID:
+			field_shift += W_FT_VNIC_ID;
+			break;
+		case F_VLAN:
+			field_shift += W_FT_VLAN;
+			break;
+		case F_TOS:
+			field_shift += W_FT_TOS;
+			break;
+		case F_PROTOCOL:
+			field_shift += W_FT_PROTOCOL;
+			break;
+		case F_ETHERTYPE:
+			field_shift += W_FT_ETHERTYPE;
+			break;
+		case F_MACMATCH:
+			field_shift += W_FT_MACMATCH;
+			break;
+		case F_MPSHITTYPE:
+			field_shift += W_FT_MPSHITTYPE;
+			break;
+		case F_FRAGMENTATION:
+			field_shift += W_FT_FRAGMENTATION;
+			break;
+		}
+	}
+	return field_shift;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
+{
 	u8 addr[6];
 	int ret, i, j;
 	struct fw_port_cmd c;
-	unsigned int rss_size;
-	adapter_t *adap = p->adapter;
+	u16 rss_size;
+	struct port_info *p = adap2pinfo(adap, port_id);
+	u32 param, val;
 
 	memset(&c, 0, sizeof(c));
 
@@ -5317,33 +8139,1405 @@
 		} while ((adap->params.portvec & (1 << j)) == 0);
 	}
 
-	c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
-			       F_FW_CMD_REQUEST | F_FW_CMD_READ |
-			       V_FW_PORT_CMD_PORTID(j));
-	c.action_to_len16 = htonl(
-		V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
-		FW_LEN16(c));
-	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
-	if (ret)
-		return ret;
+	if (!(adap->flags & IS_VF) ||
+	    adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
+		c.op_to_portid = htonl(V_FW_CMD_OP(FW_PORT_CMD) |
+				       F_FW_CMD_REQUEST | F_FW_CMD_READ |
+				       V_FW_PORT_CMD_PORTID(j));
+		c.action_to_len16 = htonl(
+			V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+			FW_LEN16(c));
+		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+		if (ret)
+			return ret;
 
+		ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+		p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
+			G_FW_PORT_CMD_MDIOADDR(ret) : -1;
+		p->port_type = G_FW_PORT_CMD_PTYPE(ret);
+		p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
+
+		init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
+		    		 be16_to_cpu(c.u.info.acap));
+	}
+
 	ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
 	if (ret < 0)
 		return ret;
 
-	p->viid = ret;
+	p->vi[0].viid = ret;
+	if (chip_id(adap) <= CHELSIO_T5)
+		p->vi[0].smt_idx = (ret & 0x7f) << 1;
+	else
+		p->vi[0].smt_idx = (ret & 0x7f);
 	p->tx_chan = j;
+	p->rx_chan_map = t4_get_mps_bg_map(adap, j);
 	p->lport = j;
-	p->rss_size = rss_size;
+	p->vi[0].rss_size = rss_size;
 	t4_os_set_hw_addr(adap, p->port_id, addr);
 
-	ret = ntohl(c.u.info.lstatus_to_modtype);
-	p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
-		G_FW_PORT_CMD_MDIOADDR(ret) : -1;
-	p->port_type = G_FW_PORT_CMD_PTYPE(ret);
-	p->mod_type = G_FW_PORT_CMD_MODTYPE(ret);
+	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
+	    V_FW_PARAMS_PARAM_YZ(p->vi[0].viid);
+	ret = t4_query_params(adap, mbox, pf, vf, 1, &param, &val);
+	if (ret)
+		p->vi[0].rss_base = 0xffff;
+	else {
+		/* MPASS((val >> 16) == rss_size); */
+		p->vi[0].rss_base = val & 0xffff;
+	}
 
-	init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+	return 0;
+}
 
+/**
+ *	t4_read_cimq_cfg - read CIM queue configuration
+ *	@adap: the adapter
+ *	@base: holds the queue base addresses in bytes
+ *	@size: holds the queue sizes in bytes
+ *	@thres: holds the queue full thresholds in bytes
+ *
+ *	Returns the current configuration of the CIM queues, starting with
+ *	the IBQs, then the OBQs.
+ */
+void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+{
+	unsigned int i, v;
+	int cim_num_obq = adap->chip_params->cim_num_obq;
+
+	for (i = 0; i < CIM_NUM_IBQ; i++) {
+		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
+			     V_QUENUMSELECT(i));
+		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+		/* value is in 256-byte units */
+		*base++ = G_CIMQBASE(v) * 256;
+		*size++ = G_CIMQSIZE(v) * 256;
+		*thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+	}
+	for (i = 0; i < cim_num_obq; i++) {
+		t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+			     V_QUENUMSELECT(i));
+		v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+		/* value is in 256-byte units */
+		*base++ = G_CIMQBASE(v) * 256;
+		*size++ = G_CIMQSIZE(v) * 256;
+	}
+}
+
+/**
+ *	t4_read_cim_ibq - read the contents of a CIM inbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err, attempts;
+	unsigned int addr;
+	const unsigned int nwords = CIM_IBQ_SIZE * 4;
+
+	if (qid > 5 || (n & 3))
+		return -EINVAL;
+
+	addr = qid * nwords;
+	if (n > nwords)
+		n = nwords;
+
+	/* It might take 3-10ms before the IBQ debug read access is allowed.
+	 * Wait for 1 Sec with a delay of 1 usec.
+	 */
+	attempts = 1000000;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
+			     F_IBQDBGEN);
+		err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+				      attempts, 1);
+		if (err)
+			return err;
+		*data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+	}
+	t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
+	return i;
+}
+
+/**
+ *	t4_read_cim_obq - read the contents of a CIM outbound queue
+ *	@adap: the adapter
+ *	@qid: the queue index
+ *	@data: where to store the queue contents
+ *	@n: capacity of @data in 32-bit words
+ *
+ *	Reads the contents of the selected CIM queue starting at address 0 up
+ *	to the capacity of @data.  @n must be a multiple of 4.  Returns < 0 on
+ *	error and the number of 32-bit words actually read on success.
+ */
+int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+{
+	int i, err;
+	unsigned int addr, v, nwords;
+	int cim_num_obq = adap->chip_params->cim_num_obq;
+
+	if ((qid > (cim_num_obq - 1)) || (n & 3))
+		return -EINVAL;
+
+	t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
+		     V_QUENUMSELECT(qid));
+	v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+
+	addr = G_CIMQBASE(v) * 64;    /* muliple of 256 -> muliple of 4 */
+	nwords = G_CIMQSIZE(v) * 64;  /* same */
+	if (n > nwords)
+		n = nwords;
+
+	for (i = 0; i < n; i++, addr++) {
+		t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
+			     F_OBQDBGEN);
+		err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
+				      2, 1);
+		if (err)
+			return err;
+		*data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+	}
+	t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
+	return i;
+}
+
+enum {
+	CIM_QCTL_BASE     = 0,
+	CIM_CTL_BASE      = 0x2000,
+	CIM_PBT_ADDR_BASE = 0x2800,
+	CIM_PBT_LRF_BASE  = 0x3000,
+	CIM_PBT_DATA_BASE = 0x3800
+};
+
+/**
+ *	t4_cim_read - read a block from CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to read
+ *	@valp: where to store the result
+ *
+ *	Reads a block of 4-byte words from the CIM intenal address space.
+ */
+int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
+		unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
+		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+				      0, 5, 2);
+		if (!ret)
+			*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
+	}
+	return ret;
+}
+
+/**
+ *	t4_cim_write - write a block into CIM internal address space
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM address space
+ *	@n: number of words to write
+ *	@valp: set of values to write
+ *
+ *	Writes a block of 4-byte words into the CIM intenal address space.
+ */
+int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
+		 const unsigned int *valp)
+{
+	int ret = 0;
+
+	if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+		return -EBUSY;
+
+	for ( ; !ret && n--; addr += 4) {
+		t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
+		t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
+		ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+				      0, 5, 2);
+	}
+	return ret;
+}
+
+static int t4_cim_write1(struct adapter *adap, unsigned int addr,
+			 unsigned int val)
+{
+	return t4_cim_write(adap, addr, 1, &val);
+}
+
+/**
+ *	t4_cim_ctl_read - read a block from CIM control region
+ *	@adap: the adapter
+ *	@addr: the start address within the CIM control region
+ *	@n: number of words to read
+ *	@valp: where to store the result
+ *
+ *	Reads a block of 4-byte words from the CIM control region.
+ */
+int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
+		    unsigned int *valp)
+{
+	return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+}
+
+/**
+ *	t4_cim_read_la - read CIM LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the CIM LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We try to leave the LA in the running state we find it in.
+ */
+int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+{
+	int i, ret;
+	unsigned int cfg, val, idx;
+
+	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+	if (ret)
+		return ret;
+
+	if (cfg & F_UPDBGLAEN) {	/* LA is running, freeze it */
+		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+		if (ret)
+			return ret;
+	}
+
+	ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+	if (ret)
+		goto restart;
+
+	idx = G_UPDBGLAWRPTR(val);
+	if (wrptr)
+		*wrptr = idx;
+
+	for (i = 0; i < adap->params.cim_la_size; i++) {
+		ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+				    V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+		if (ret)
+			break;
+		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+		if (ret)
+			break;
+		if (val & F_UPDBGLARDEN) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+		ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+		if (ret)
+			break;
+
+		/* address can't exceed 0xfff (UpDbgLaRdPtr is of 12-bits) */
+		idx = (idx + 1) & M_UPDBGLARDPTR;
+		/*
+		 * Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
+		 * identify the 32-bit portion of the full 312-bit data
+		 */
+		if (is_t6(adap))
+			while ((idx & 0xf) > 9)
+				idx = (idx + 1) % M_UPDBGLARDPTR;
+	}
+restart:
+	if (cfg & F_UPDBGLAEN) {
+		int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
+				      cfg & ~F_UPDBGLARDEN);
+		if (!ret)
+			ret = r;
+	}
+	return ret;
+}
+
+/**
+ *	t4_tp_read_la - read TP LA capture buffer
+ *	@adap: the adapter
+ *	@la_buf: where to store the LA data
+ *	@wrptr: the HW write pointer within the capture buffer
+ *
+ *	Reads the contents of the TP LA buffer with the most recent entry at
+ *	the end	of the returned data and with the entry at @wrptr first.
+ *	We leave the LA in the running state we find it in.
+ */
+void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
+{
+	bool last_incomplete;
+	unsigned int i, cfg, val, idx;
+
+	cfg = t4_read_reg(adap, A_TP_DBG_LA_CONFIG) & 0xffff;
+	if (cfg & F_DBGLAENABLE)			/* freeze LA */
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+			     adap->params.tp.la_mask | (cfg ^ F_DBGLAENABLE));
+
+	val = t4_read_reg(adap, A_TP_DBG_LA_CONFIG);
+	idx = G_DBGLAWPTR(val);
+	last_incomplete = G_DBGLAMODE(val) >= 2 && (val & F_DBGLAWHLF) == 0;
+	if (last_incomplete)
+		idx = (idx + 1) & M_DBGLARPTR;
+	if (wrptr)
+		*wrptr = idx;
+
+	val &= 0xffff;
+	val &= ~V_DBGLARPTR(M_DBGLARPTR);
+	val |= adap->params.tp.la_mask;
+
+	for (i = 0; i < TPLA_SIZE; i++) {
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG, V_DBGLARPTR(idx) | val);
+		la_buf[i] = t4_read_reg64(adap, A_TP_DBG_LA_DATAL);
+		idx = (idx + 1) & M_DBGLARPTR;
+	}
+
+	/* Wipe out last entry if it isn't valid */
+	if (last_incomplete)
+		la_buf[TPLA_SIZE - 1] = ~0ULL;
+
+	if (cfg & F_DBGLAENABLE)		/* restore running state */
+		t4_write_reg(adap, A_TP_DBG_LA_CONFIG,
+			     cfg | adap->params.tp.la_mask);
+}
+
+/*
+ * SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
+ * seconds).  If we find one of the SGE Ingress DMA State Machines in the same
+ * state for more than the Warning Threshold then we'll issue a warning about
+ * a potential hang.  We'll repeat the warning as the SGE Ingress DMA Channel
+ * appears to be hung every Warning Repeat second till the situation clears.
+ * If the situation clears, we'll note that as well.
+ */
+#define SGE_IDMA_WARN_THRESH 1
+#define SGE_IDMA_WARN_REPEAT 300
+
+/**
+ *	t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *
+ *	Initialize the state of an SGE Ingress DMA Monitor.
+ */
+void t4_idma_monitor_init(struct adapter *adapter,
+			  struct sge_idma_monitor_state *idma)
+{
+	/* Initialize the state variables for detecting an SGE Ingress DMA
+	 * hang.  The SGE has internal counters which count up on each clock
+	 * tick whenever the SGE finds its Ingress DMA State Engines in the
+	 * same state they were on the previous clock tick.  The clock used is
+	 * the Core Clock so we have a limit on the maximum "time" they can
+	 * record; typically a very small number of seconds.  For instance,
+	 * with a 600MHz Core Clock, we can only count up to a bit more than
+	 * 7s.  So we'll synthesize a larger counter in order to not run the
+	 * risk of having the "timers" overflow and give us the flexibility to
+	 * maintain a Hung SGE State Machine of our own which operates across
+	 * a longer time frame.
+	 */
+	idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
+	idma->idma_stalled[0] = idma->idma_stalled[1] = 0;
+}
+
+/**
+ *	t4_idma_monitor - monitor SGE Ingress DMA state
+ *	@adapter: the adapter
+ *	@idma: the adapter IDMA Monitor state
+ *	@hz: number of ticks/second
+ *	@ticks: number of ticks since the last IDMA Monitor call
+ */
+void t4_idma_monitor(struct adapter *adapter,
+		     struct sge_idma_monitor_state *idma,
+		     int hz, int ticks)
+{
+	int i, idma_same_state_cnt[2];
+
+	 /* Read the SGE Debug Ingress DMA Same State Count registers.  These
+	  * are counters inside the SGE which count up on each clock when the
+	  * SGE finds its Ingress DMA State Engines in the same states they
+	  * were in the previous clock.  The counters will peg out at
+	  * 0xffffffff without wrapping around so once they pass the 1s
+	  * threshold they'll stay above that till the IDMA state changes.
+	  */
+	t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 13);
+	idma_same_state_cnt[0] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_HIGH);
+	idma_same_state_cnt[1] = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
+
+	for (i = 0; i < 2; i++) {
+		u32 debug0, debug11;
+
+		/* If the Ingress DMA Same State Counter ("timer") is less
+		 * than 1s, then we can reset our synthesized Stall Timer and
+		 * continue.  If we have previously emitted warnings about a
+		 * potential stalled Ingress Queue, issue a note indicating
+		 * that the Ingress Queue has resumed forward progress.
+		 */
+		if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
+			if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH*hz)
+				CH_WARN(adapter, "SGE idma%d, queue %u, "
+					"resumed after %d seconds\n",
+					i, idma->idma_qid[i],
+					idma->idma_stalled[i]/hz);
+			idma->idma_stalled[i] = 0;
+			continue;
+		}
+
+		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
+		 * domain.  The first time we get here it'll be because we
+		 * passed the 1s Threshold; each additional time it'll be
+		 * because the RX Timer Callback is being fired on its regular
+		 * schedule.
+		 *
+		 * If the stall is below our Potential Hung Ingress Queue
+		 * Warning Threshold, continue.
+		 */
+		if (idma->idma_stalled[i] == 0) {
+			idma->idma_stalled[i] = hz;
+			idma->idma_warn[i] = 0;
+		} else {
+			idma->idma_stalled[i] += ticks;
+			idma->idma_warn[i] -= ticks;
+		}
+
+		if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH*hz)
+			continue;
+
+		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
+		 */
+		if (idma->idma_warn[i] > 0)
+			continue;
+		idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT*hz;
+
+		/* Read and save the SGE IDMA State and Queue ID information.
+		 * We do this every time in case it changes across time ...
+		 * can't be too careful ...
+		 */
+		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 0);
+		debug0 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
+		idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
+
+		t4_write_reg(adapter, A_SGE_DEBUG_INDEX, 11);
+		debug11 = t4_read_reg(adapter, A_SGE_DEBUG_DATA_LOW);
+		idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
+
+		CH_WARN(adapter, "SGE idma%u, queue %u, potentially stuck in "
+			" state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
+			i, idma->idma_qid[i], idma->idma_state[i],
+			idma->idma_stalled[i]/hz,
+			debug0, debug11);
+		t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
+	}
+}
+
+/**
+ *	t4_read_pace_tbl - read the pace table
+ *	@adap: the adapter
+ *	@pace_vals: holds the returned values
+ *
+ *	Returns the values of TP's pace table in microseconds.
+ */
+void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
+{
+	unsigned int i, v;
+
+	for (i = 0; i < NTX_SCHED; i++) {
+		t4_write_reg(adap, A_TP_PACE_TABLE, 0xffff0000 + i);
+		v = t4_read_reg(adap, A_TP_PACE_TABLE);
+		pace_vals[i] = dack_ticks_to_usec(adap, v);
+	}
+}
+
+/**
+ *	t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
+ *	@adap: the adapter
+ *	@sched: the scheduler index
+ *	@kbps: the byte rate in Kbps
+ *	@ipg: the interpacket delay in tenths of nanoseconds
+ *
+ *	Return the current configuration of a HW Tx scheduler.
+ */
+void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps,
+		     unsigned int *ipg)
+{
+	unsigned int v, addr, bpt, cpt;
+
+	if (kbps) {
+		addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
+		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+		if (sched & 1)
+			v >>= 16;
+		bpt = (v >> 8) & 0xff;
+		cpt = v & 0xff;
+		if (!cpt)
+			*kbps = 0;	/* scheduler disabled */
+		else {
+			v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
+			*kbps = (v * bpt) / 125;
+		}
+	}
+	if (ipg) {
+		addr = A_TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR - sched / 2;
+		t4_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
+		v = t4_read_reg(adap, A_TP_TM_PIO_DATA);
+		if (sched & 1)
+			v >>= 16;
+		v &= 0xffff;
+		*ipg = (10000 * v) / core_ticks_per_usec(adap);
+	}
+}
+
+/**
+ *	t4_load_cfg - download config file
+ *	@adap: the adapter
+ *	@cfg_data: the cfg text file to write
+ *	@size: text file size
+ *
+ *	Write the supplied config text file to the card's serial flash.
+ */
+int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+	int ret, i, n, cfg_addr;
+	unsigned int addr;
+	unsigned int flash_cfg_start_sec;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+	cfg_addr = t4_flash_cfg_addr(adap);
+	if (cfg_addr < 0)
+		return cfg_addr;
+
+	addr = cfg_addr;
+	flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+	if (size > FLASH_CFG_MAX_SIZE) {
+		CH_ERR(adap, "cfg file too large, max is %u bytes\n",
+		       FLASH_CFG_MAX_SIZE);
+		return -EFBIG;
+	}
+
+	i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE,	/* # of sectors spanned */
+			 sf_sec_size);
+	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+				     flash_cfg_start_sec + i - 1);
+	/*
+	 * If size == 0 then we're simply erasing the FLASH sectors associated
+	 * with the on-adapter Firmware Configuration File.
+	 */
+	if (ret || size == 0)
+		goto out;
+
+	/* this will write to the flash up to SF_PAGE_SIZE at a time */
+	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
+		if ( (size - i) <  SF_PAGE_SIZE)
+			n = size - i;
+		else
+			n = SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, n, cfg_data, 1);
+		if (ret)
+			goto out;
+
+		addr += SF_PAGE_SIZE;
+		cfg_data += SF_PAGE_SIZE;
+	}
+
+out:
+	if (ret)
+		CH_ERR(adap, "config file %s failed %d\n",
+		       (size == 0 ? "clear" : "download"), ret);
+	return ret;
+}
+
+/**
+ *	t5_fw_init_extern_mem - initialize the external memory
+ *	@adap: the adapter
+ *
+ *	Initializes the external memory on T5.
+ */
+int t5_fw_init_extern_mem(struct adapter *adap)
+{
+	u32 params[1], val[1];
+	int ret;
+
+	if (!is_t5(adap))
+		return 0;
+
+	val[0] = 0xff; /* Initialize all MCs */
+	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+			V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_MCINIT));
+	ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, params, val,
+			FW_CMD_MAX_TIMEOUT);
+
+	return ret;
+}
+
+/* BIOS boot headers */
+typedef struct pci_expansion_rom_header {
+	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
+	u8	reserved[22]; /* Reserved per processor Architecture data */
+	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
+} pci_exp_rom_header_t; /* PCI_EXPANSION_ROM_HEADER */
+
+/* Legacy PCI Expansion ROM Header */
+typedef struct legacy_pci_expansion_rom_header {
+	u8	signature[2]; /* ROM Signature. Should be 0xaa55 */
+	u8	size512; /* Current Image Size in units of 512 bytes */
+	u8	initentry_point[4];
+	u8	cksum; /* Checksum computed on the entire Image */
+	u8	reserved[16]; /* Reserved */
+	u8	pcir_offset[2]; /* Offset to PCI Data Struture */
+} legacy_pci_exp_rom_header_t; /* LEGACY_PCI_EXPANSION_ROM_HEADER */
+
+/* EFI PCI Expansion ROM Header */
+typedef struct efi_pci_expansion_rom_header {
+	u8	signature[2]; // ROM signature. The value 0xaa55
+	u8	initialization_size[2]; /* Units 512. Includes this header */
+	u8	efi_signature[4]; /* Signature from EFI image header. 0x0EF1 */
+	u8	efi_subsystem[2]; /* Subsystem value for EFI image header */
+	u8	efi_machine_type[2]; /* Machine type from EFI image header */
+	u8	compression_type[2]; /* Compression type. */
+		/*
+		 * Compression type definition
+		 * 0x0: uncompressed
+		 * 0x1: Compressed
+		 * 0x2-0xFFFF: Reserved
+		 */
+	u8	reserved[8]; /* Reserved */
+	u8	efi_image_header_offset[2]; /* Offset to EFI Image */
+	u8	pcir_offset[2]; /* Offset to PCI Data Structure */
+} efi_pci_exp_rom_header_t; /* EFI PCI Expansion ROM Header */
+
+/* PCI Data Structure Format */
+typedef struct pcir_data_structure { /* PCI Data Structure */
+	u8	signature[4]; /* Signature. The string "PCIR" */
+	u8	vendor_id[2]; /* Vendor Identification */
+	u8	device_id[2]; /* Device Identification */
+	u8	vital_product[2]; /* Pointer to Vital Product Data */
+	u8	length[2]; /* PCIR Data Structure Length */
+	u8	revision; /* PCIR Data Structure Revision */
+	u8	class_code[3]; /* Class Code */
+	u8	image_length[2]; /* Image Length. Multiple of 512B */
+	u8	code_revision[2]; /* Revision Level of Code/Data */
+	u8	code_type; /* Code Type. */
+		/*
+		 * PCI Expansion ROM Code Types
+		 * 0x00: Intel IA-32, PC-AT compatible. Legacy
+		 * 0x01: Open Firmware standard for PCI. FCODE
+		 * 0x02: Hewlett-Packard PA RISC. HP reserved
+		 * 0x03: EFI Image. EFI
+		 * 0x04-0xFF: Reserved.
+		 */
+	u8	indicator; /* Indicator. Identifies the last image in the ROM */
+	u8	reserved[2]; /* Reserved */
+} pcir_data_t; /* PCI__DATA_STRUCTURE */
+
+/* BOOT constants */
+enum {
+	BOOT_FLASH_BOOT_ADDR = 0x0,/* start address of boot image in flash */
+	BOOT_SIGNATURE = 0xaa55,   /* signature of BIOS boot ROM */
+	BOOT_SIZE_INC = 512,       /* image size measured in 512B chunks */
+	BOOT_MIN_SIZE = sizeof(pci_exp_rom_header_t), /* basic header */
+	BOOT_MAX_SIZE = 1024*BOOT_SIZE_INC, /* 1 byte * length increment  */
+	VENDOR_ID = 0x1425, /* Vendor ID */
+	PCIR_SIGNATURE = 0x52494350 /* PCIR signature */
+};
+
+/*
+ *	modify_device_id - Modifies the device ID of the Boot BIOS image
+ *	@adatper: the device ID to write.
+ *	@boot_data: the boot image to modify.
+ *
+ *	Write the supplied device ID to the boot BIOS image.
+ */
+static void modify_device_id(int device_id, u8 *boot_data)
+{
+	legacy_pci_exp_rom_header_t *header;
+	pcir_data_t *pcir_header;
+	u32 cur_header = 0;
+
+	/*
+	 * Loop through all chained images and change the device ID's
+	 */
+	while (1) {
+		header = (legacy_pci_exp_rom_header_t *) &boot_data[cur_header];
+		pcir_header = (pcir_data_t *) &boot_data[cur_header +
+			      le16_to_cpu(*(u16*)header->pcir_offset)];
+
+		/*
+		 * Only modify the Device ID if code type is Legacy or HP.
+		 * 0x00: Okay to modify
+		 * 0x01: FCODE. Do not be modify
+		 * 0x03: Okay to modify
+		 * 0x04-0xFF: Do not modify
+		 */
+		if (pcir_header->code_type == 0x00) {
+			u8 csum = 0;
+			int i;
+
+			/*
+			 * Modify Device ID to match current adatper
+			 */
+			*(u16*) pcir_header->device_id = device_id;
+
+			/*
+			 * Set checksum temporarily to 0.
+			 * We will recalculate it later.
+			 */
+			header->cksum = 0x0;
+
+			/*
+			 * Calculate and update checksum
+			 */
+			for (i = 0; i < (header->size512 * 512); i++)
+				csum += (u8)boot_data[cur_header + i];
+
+			/*
+			 * Invert summed value to create the checksum
+			 * Writing new checksum value directly to the boot data
+			 */
+			boot_data[cur_header + 7] = -csum;
+
+		} else if (pcir_header->code_type == 0x03) {
+
+			/*
+			 * Modify Device ID to match current adatper
+			 */
+			*(u16*) pcir_header->device_id = device_id;
+
+		}
+
+
+		/*
+		 * Check indicator element to identify if this is the last
+		 * image in the ROM.
+		 */
+		if (pcir_header->indicator & 0x80)
+			break;
+
+		/*
+		 * Move header pointer up to the next image in the ROM.
+		 */
+		cur_header += header->size512 * 512;
+	}
+}
+
+/*
+ *	t4_load_boot - download boot flash
+ *	@adapter: the adapter
+ *	@boot_data: the boot image to write
+ *	@boot_addr: offset in flash to write boot_data
+ *	@size: image size
+ *
+ *	Write the supplied boot image to the card's serial flash.
+ *	The boot image has the following sections: a 28-byte header and the
+ *	boot image.
+ */
+int t4_load_boot(struct adapter *adap, u8 *boot_data,
+		 unsigned int boot_addr, unsigned int size)
+{
+	pci_exp_rom_header_t *header;
+	int pcir_offset ;
+	pcir_data_t *pcir_header;
+	int ret, addr;
+	uint16_t device_id;
+	unsigned int i;
+	unsigned int boot_sector = (boot_addr * 1024 );
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+	/*
+	 * Make sure the boot image does not encroach on the firmware region
+	 */
+	if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
+		CH_ERR(adap, "boot image encroaching on firmware region\n");
+		return -EFBIG;
+	}
+
+	/*
+	 * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
+	 * and Boot configuration data sections. These 3 boot sections span
+	 * sectors 0 to 7 in flash and live right before the FW image location.
+	 */
+	i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
+			sf_sec_size);
+	ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
+				     (boot_sector >> 16) + i - 1);
+
+	/*
+	 * If size == 0 then we're simply erasing the FLASH sectors associated
+	 * with the on-adapter option ROM file
+	 */
+	if (ret || (size == 0))
+		goto out;
+
+	/* Get boot header */
+	header = (pci_exp_rom_header_t *)boot_data;
+	pcir_offset = le16_to_cpu(*(u16 *)header->pcir_offset);
+	/* PCIR Data Structure */
+	pcir_header = (pcir_data_t *) &boot_data[pcir_offset];
+
+	/*
+	 * Perform some primitive sanity testing to avoid accidentally
+	 * writing garbage over the boot sectors.  We ought to check for
+	 * more but it's not worth it for now ...
+	 */
+	if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
+		CH_ERR(adap, "boot image too small/large\n");
+		return -EFBIG;
+	}
+
+#ifndef CHELSIO_T4_DIAGS
+	/*
+	 * Check BOOT ROM header signature
+	 */
+	if (le16_to_cpu(*(u16*)header->signature) != BOOT_SIGNATURE ) {
+		CH_ERR(adap, "Boot image missing signature\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check PCI header signature
+	 */
+	if (le32_to_cpu(*(u32*)pcir_header->signature) != PCIR_SIGNATURE) {
+		CH_ERR(adap, "PCI header missing signature\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check Vendor ID matches Chelsio ID
+	 */
+	if (le16_to_cpu(*(u16*)pcir_header->vendor_id) != VENDOR_ID) {
+		CH_ERR(adap, "Vendor ID missing signature\n");
+		return -EINVAL;
+	}
+#endif
+
+	/*
+	 * Retrieve adapter's device ID
+	 */
+	t4_os_pci_read_cfg2(adap, PCI_DEVICE_ID, &device_id);
+	/* Want to deal with PF 0 so I strip off PF 4 indicator */
+	device_id = device_id & 0xf0ff;
+
+	/*
+	 * Check PCIE Device ID
+	 */
+	if (le16_to_cpu(*(u16*)pcir_header->device_id) != device_id) {
+		/*
+		 * Change the device ID in the Boot BIOS image to match
+		 * the Device ID of the current adapter.
+		 */
+		modify_device_id(device_id, boot_data);
+	}
+
+	/*
+	 * Skip over the first SF_PAGE_SIZE worth of data and write it after
+	 * we finish copying the rest of the boot image. This will ensure
+	 * that the BIOS boot header will only be written if the boot image
+	 * was written in full.
+	 */
+	addr = boot_sector;
+	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+		addr += SF_PAGE_SIZE;
+		boot_data += SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data, 0);
+		if (ret)
+			goto out;
+	}
+
+	ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
+			     (const u8 *)header, 0);
+
+out:
+	if (ret)
+		CH_ERR(adap, "boot image download failed, error %d\n", ret);
+	return ret;
+}
+
+/*
+ *	t4_flash_bootcfg_addr - return the address of the flash optionrom configuration
+ *	@adapter: the adapter
+ *
+ *	Return the address within the flash where the OptionROM Configuration
+ *	is stored, or an error if the device FLASH is too small to contain
+ *	a OptionROM Configuration.
+ */
+static int t4_flash_bootcfg_addr(struct adapter *adapter)
+{
+	/*
+	 * If the device FLASH isn't large enough to hold a Firmware
+	 * Configuration File, return an error.
+	 */
+	if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+		return -ENOSPC;
+
+	return FLASH_BOOTCFG_START;
+}
+
+int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
+{
+	int ret, i, n, cfg_addr;
+	unsigned int addr;
+	unsigned int flash_cfg_start_sec;
+	unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+
+	cfg_addr = t4_flash_bootcfg_addr(adap);
+	if (cfg_addr < 0)
+		return cfg_addr;
+
+	addr = cfg_addr;
+	flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+	if (size > FLASH_BOOTCFG_MAX_SIZE) {
+		CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
+			FLASH_BOOTCFG_MAX_SIZE);
+		return -EFBIG;
+	}
+
+	i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
+			 sf_sec_size);
+	ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+					flash_cfg_start_sec + i - 1);
+
+	/*
+	 * If size == 0 then we're simply erasing the FLASH sectors associated
+	 * with the on-adapter OptionROM Configuration File.
+	 */
+	if (ret || size == 0)
+		goto out;
+
+	/* this will write to the flash up to SF_PAGE_SIZE at a time */
+	for (i = 0; i< size; i+= SF_PAGE_SIZE) {
+		if ( (size - i) <  SF_PAGE_SIZE)
+			n = size - i;
+		else
+			n = SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, n, cfg_data, 0);
+		if (ret)
+			goto out;
+
+		addr += SF_PAGE_SIZE;
+		cfg_data += SF_PAGE_SIZE;
+	}
+
+out:
+	if (ret)
+		CH_ERR(adap, "boot config data %s failed %d\n",
+				(size == 0 ? "clear" : "download"), ret);
+	return ret;
+}
+
+/**
+ *	t4_set_filter_mode - configure the optional components of filter tuples
+ *	@adap: the adapter
+ *	@mode_map: a bitmap selcting which optional filter components to enable
+ *
+ *	Sets the filter mode by selecting the optional components to enable
+ *	in filter tuples.  Returns 0 on success and a negative error if the
+ *	requested mode needs more bits than are available for optional
+ *	components.
+ */
+int t4_set_filter_mode(struct adapter *adap, unsigned int mode_map)
+{
+	static u8 width[] = { 1, 3, 17, 17, 8, 8, 16, 9, 3, 1 };
+
+	int i, nbits = 0;
+
+	for (i = S_FCOE; i <= S_FRAGMENTATION; i++)
+		if (mode_map & (1 << i))
+			nbits += width[i];
+	if (nbits > FILTER_OPT_LEN)
+		return -EINVAL;
+	if (t4_use_ldst(adap))
+		t4_fw_tp_pio_rw(adap, &mode_map, 1, A_TP_VLAN_PRI_MAP, 0);
+	else
+		t4_write_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, &mode_map,
+				  1, A_TP_VLAN_PRI_MAP);
+	read_filter_mode_and_ingress_config(adap);
+
 	return 0;
 }
+
+/**
+ *	t4_clr_port_stats - clear port statistics
+ *	@adap: the adapter
+ *	@idx: the port index
+ *
+ *	Clear HW statistics for the given port.
+ */
+void t4_clr_port_stats(struct adapter *adap, int idx)
+{
+	unsigned int i;
+	u32 bgmap = t4_get_mps_bg_map(adap, idx);
+	u32 port_base_addr;
+
+	if (is_t4(adap))
+		port_base_addr = PORT_BASE(idx);
+	else
+		port_base_addr = T5_PORT_BASE(idx);
+
+	for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+			i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+		t4_write_reg(adap, port_base_addr + i, 0);
+	for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+			i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+		t4_write_reg(adap, port_base_addr + i, 0);
+	for (i = 0; i < 4; i++)
+		if (bgmap & (1 << i)) {
+			t4_write_reg(adap,
+			A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + i * 8, 0);
+			t4_write_reg(adap,
+			A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + i * 8, 0);
+		}
+}
+
+/**
+ *	t4_i2c_rd - read I2C data from adapter
+ *	@adap: the adapter
+ *	@port: Port number if per-port device; <0 if not
+ *	@devid: per-port device ID or absolute device ID
+ *	@offset: byte offset into device I2C space
+ *	@len: byte length of I2C space data
+ *	@buf: buffer in which to return I2C data
+ *
+ *	Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox,
+	      int port, unsigned int devid,
+	      unsigned int offset, unsigned int len,
+	      u8 *buf)
+{
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd ldst;
+	int ret;
+
+	if (port >= 4 ||
+	    devid >= 256 ||
+	    offset >= 256 ||
+	    len > sizeof ldst.u.i2c.data)
+		return -EINVAL;
+
+	memset(&ldst, 0, sizeof ldst);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
+	ldst.op_to_addrspace =
+		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+			    F_FW_CMD_REQUEST |
+			    F_FW_CMD_READ |
+			    ldst_addrspace);
+	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
+	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
+	ldst.u.i2c.did = devid;
+	ldst.u.i2c.boffset = offset;
+	ldst.u.i2c.blen = len;
+	ret = t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
+	if (!ret)
+		memcpy(buf, ldst.u.i2c.data, len);
+	return ret;
+}
+
+/**
+ *	t4_i2c_wr - write I2C data to adapter
+ *	@adap: the adapter
+ *	@port: Port number if per-port device; <0 if not
+ *	@devid: per-port device ID or absolute device ID
+ *	@offset: byte offset into device I2C space
+ *	@len: byte length of I2C space data
+ *	@buf: buffer containing new I2C data
+ *
+ *	Write the I2C data to the indicated device and location.
+ */
+int t4_i2c_wr(struct adapter *adap, unsigned int mbox,
+	      int port, unsigned int devid,
+	      unsigned int offset, unsigned int len,
+	      u8 *buf)
+{
+	u32 ldst_addrspace;
+	struct fw_ldst_cmd ldst;
+
+	if (port >= 4 ||
+	    devid >= 256 ||
+	    offset >= 256 ||
+	    len > sizeof ldst.u.i2c.data)
+		return -EINVAL;
+
+	memset(&ldst, 0, sizeof ldst);
+	ldst_addrspace = V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_I2C);
+	ldst.op_to_addrspace =
+		cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+			    F_FW_CMD_REQUEST |
+			    F_FW_CMD_WRITE |
+			    ldst_addrspace);
+	ldst.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst));
+	ldst.u.i2c.pid = (port < 0 ? 0xff : port);
+	ldst.u.i2c.did = devid;
+	ldst.u.i2c.boffset = offset;
+	ldst.u.i2c.blen = len;
+	memcpy(ldst.u.i2c.data, buf, len);
+	return t4_wr_mbox(adap, mbox, &ldst, sizeof ldst, &ldst);
+}
+
+/**
+ * 	t4_sge_ctxt_rd - read an SGE context through FW
+ * 	@adap: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ * 	@cid: the context id
+ * 	@ctype: the context type
+ * 	@data: where to store the context data
+ *
+ * 	Issues a FW command through the given mailbox to read an SGE context.
+ */
+int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
+		   enum ctxt_type ctype, u32 *data)
+{
+	int ret;
+	struct fw_ldst_cmd c;
+
+	if (ctype == CTXT_EGRESS)
+		ret = FW_LDST_ADDRSPC_SGE_EGRC;
+	else if (ctype == CTXT_INGRESS)
+		ret = FW_LDST_ADDRSPC_SGE_INGC;
+	else if (ctype == CTXT_FLM)
+		ret = FW_LDST_ADDRSPC_SGE_FLMC;
+	else
+		ret = FW_LDST_ADDRSPC_SGE_CONMC;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+					F_FW_CMD_REQUEST | F_FW_CMD_READ |
+					V_FW_LDST_CMD_ADDRSPACE(ret));
+	c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+	c.u.idctxt.physid = cpu_to_be32(cid);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0) {
+		data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
+		data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
+		data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
+		data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
+		data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
+		data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+	}
+	return ret;
+}
+
+/**
+ * 	t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
+ * 	@adap: the adapter
+ * 	@cid: the context id
+ * 	@ctype: the context type
+ * 	@data: where to store the context data
+ *
+ * 	Reads an SGE context directly, bypassing FW.  This is only for
+ * 	debugging when FW is unavailable.
+ */
+int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type ctype,
+		      u32 *data)
+{
+	int i, ret;
+
+	t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
+	ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
+	if (!ret)
+		for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
+			*data++ = t4_read_reg(adap, i);
+	return ret;
+}
+
+int t4_sched_config(struct adapter *adapter, int type, int minmaxen,
+    		    int sleep_ok)
+{
+	struct fw_sched_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	cmd.u.config.sc = FW_SCHED_SC_CONFIG;
+	cmd.u.config.type = type;
+	cmd.u.config.minmaxen = minmaxen;
+
+	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
+			       NULL, sleep_ok);
+}
+
+int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
+		    int rateunit, int ratemode, int channel, int cl,
+		    int minrate, int maxrate, int weight, int pktsize,
+		    int sleep_ok)
+{
+	struct fw_sched_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+	cmd.u.params.type = type;
+	cmd.u.params.level = level;
+	cmd.u.params.mode = mode;
+	cmd.u.params.ch = channel;
+	cmd.u.params.cl = cl;
+	cmd.u.params.unit = rateunit;
+	cmd.u.params.rate = ratemode;
+	cmd.u.params.min = cpu_to_be32(minrate);
+	cmd.u.params.max = cpu_to_be32(maxrate);
+	cmd.u.params.weight = cpu_to_be16(weight);
+	cmd.u.params.pktsize = cpu_to_be16(pktsize);
+
+	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
+			       NULL, sleep_ok);
+}
+
+int t4_sched_params_ch_rl(struct adapter *adapter, int channel, int ratemode,
+    unsigned int maxrate, int sleep_ok)
+{
+	struct fw_sched_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
+	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CH_RL;
+	cmd.u.params.ch = channel;
+	cmd.u.params.rate = ratemode;		/* REL or ABS */
+	cmd.u.params.max = cpu_to_be32(maxrate);/*  %  or kbps */
+
+	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
+			       NULL, sleep_ok);
+}
+
+int t4_sched_params_cl_wrr(struct adapter *adapter, int channel, int cl,
+    int weight, int sleep_ok)
+{
+	struct fw_sched_cmd cmd;
+
+	if (weight < 0 || weight > 100)
+		return -EINVAL;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
+	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
+	cmd.u.params.ch = channel;
+	cmd.u.params.cl = cl;
+	cmd.u.params.weight = cpu_to_be16(weight);
+
+	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
+			       NULL, sleep_ok);
+}
+
+int t4_sched_params_cl_rl_kbps(struct adapter *adapter, int channel, int cl,
+    int mode, unsigned int maxrate, int pktsize, int sleep_ok)
+{
+	struct fw_sched_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_SCHED_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+	cmd.u.params.sc = FW_SCHED_SC_PARAMS;
+	cmd.u.params.type = FW_SCHED_TYPE_PKTSCHED;
+	cmd.u.params.level = FW_SCHED_PARAMS_LEVEL_CL_RL;
+	cmd.u.params.mode = mode;
+	cmd.u.params.ch = channel;
+	cmd.u.params.cl = cl;
+	cmd.u.params.unit = FW_SCHED_PARAMS_UNIT_BITRATE;
+	cmd.u.params.rate = FW_SCHED_PARAMS_RATE_ABS;
+	cmd.u.params.max = cpu_to_be32(maxrate);
+	cmd.u.params.pktsize = cpu_to_be16(pktsize);
+
+	return t4_wr_mbox_meat(adapter,adapter->mbox, &cmd, sizeof(cmd),
+			       NULL, sleep_ok);
+}
+
+/*
+ *	t4_config_watchdog - configure (enable/disable) a watchdog timer
+ *	@adapter: the adapter
+ * 	@mbox: mailbox to use for the FW command
+ * 	@pf: the PF owning the queue
+ * 	@vf: the VF owning the queue
+ *	@timeout: watchdog timeout in ms
+ *	@action: watchdog timer / action
+ *
+ *	There are separate watchdog timers for each possible watchdog
+ *	action.  Configure one of the watchdog timers by setting a non-zero
+ *	timeout.  Disable a watchdog timer by using a timeout of zero.
+ */
+int t4_config_watchdog(struct adapter *adapter, unsigned int mbox,
+		       unsigned int pf, unsigned int vf,
+		       unsigned int timeout, unsigned int action)
+{
+	struct fw_watchdog_cmd wdog;
+	unsigned int ticks;
+
+	/*
+	 * The watchdog command expects a timeout in units of 10ms so we need
+	 * to convert it here (via rounding) and force a minimum of one 10ms
+	 * "tick" if the timeout is non-zero but the convertion results in 0
+	 * ticks.
+	 */
+	ticks = (timeout + 5)/10;
+	if (timeout && !ticks)
+		ticks = 1;
+
+	memset(&wdog, 0, sizeof wdog);
+	wdog.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_WATCHDOG_CMD) |
+				     F_FW_CMD_REQUEST |
+				     F_FW_CMD_WRITE |
+				     V_FW_PARAMS_CMD_PFN(pf) |
+				     V_FW_PARAMS_CMD_VFN(vf));
+	wdog.retval_len16 = cpu_to_be32(FW_LEN16(wdog));
+	wdog.timeout = cpu_to_be32(ticks);
+	wdog.action = cpu_to_be32(action);
+
+	return t4_wr_mbox(adapter, mbox, &wdog, sizeof wdog, NULL);
+}
+
+int t4_get_devlog_level(struct adapter *adapter, unsigned int *level)
+{
+	struct fw_devlog_cmd devlog_cmd;
+	int ret;
+
+	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
+					     F_FW_CMD_REQUEST | F_FW_CMD_READ);
+	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
+	ret = t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
+			 sizeof(devlog_cmd), &devlog_cmd);
+	if (ret)
+		return ret;
+
+	*level = devlog_cmd.level;
+	return 0;
+}
+
+int t4_set_devlog_level(struct adapter *adapter, unsigned int level)
+{
+	struct fw_devlog_cmd devlog_cmd;
+
+	memset(&devlog_cmd, 0, sizeof(devlog_cmd));
+	devlog_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
+					     F_FW_CMD_REQUEST |
+					     F_FW_CMD_WRITE);
+	devlog_cmd.level = level;
+	devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
+	return t4_wr_mbox(adapter, adapter->mbox, &devlog_cmd,
+			  sizeof(devlog_cmd), &devlog_cmd);
+}

Modified: trunk/sys/dev/cxgbe/common/t4_hw.h
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_hw.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_hw.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/t4_hw.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/t4_hw.h 308304 2016-11-04 18:45:06Z jhb $
  *
  */
 
@@ -33,27 +34,37 @@
 #include "osdep.h"
 
 enum {
-	NCHAN          = 4,     /* # of HW channels */
-	MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
-	EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
-	EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
-	EEPROMPFSIZE   = 1024,  /* EEPROM writable area size for PFn, n>0 */
-	RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
-	TCB_SIZE       = 128,   /* TCB size */
-	NMTUS          = 16,    /* size of MTU table */
-	NCCTRL_WIN     = 32,    /* # of congestion control windows */
-	NTX_SCHED      = 8,     /* # of HW Tx scheduling queues */
-	PM_NSTATS      = 5,     /* # of PM stats */
-	MBOX_LEN       = 64,    /* mailbox size in bytes */
-	TRACE_LEN      = 112,   /* length of trace data and mask */
-	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
-	NWOL_PAT       = 8,     /* # of WoL patterns */
-	WOL_PAT_LEN    = 128,   /* length of WoL patterns */
+	NCHAN           = 4,     /* # of HW channels */
+	T6_NCHAN        = 2,
+	MAX_NCHAN       = 4,
+	MAX_MTU         = 9600,  /* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE      = 17408, /* Serial EEPROM physical size */
+	EEPROMVSIZE     = 32768, /* Serial EEPROM virtual address space size */
+	EEPROMPFSIZE    = 1024,  /* EEPROM writable area size for PFn, n>0 */
+	RSS_NENTRIES    = 2048,  /* # of entries in RSS mapping table */
+	TCB_SIZE        = 128,   /* TCB size */
+	NMTUS           = 16,    /* size of MTU table */
+	NCCTRL_WIN      = 32,    /* # of congestion control windows */
+	NTX_SCHED       = 8,     /* # of HW Tx scheduling queues */
+	PM_NSTATS       = 5,     /* # of PM stats */
+	T6_PM_NSTATS    = 7,
+	MAX_PM_NSTATS   = 7,
+	MBOX_LEN        = 64,    /* mailbox size in bytes */
+	NTRACE          = 4,     /* # of tracing filters */
+	TRACE_LEN       = 112,   /* length of trace data and mask */
+	FILTER_OPT_LEN  = 36,    /* filter tuple width of optional components */
+	NWOL_PAT        = 8,     /* # of WoL patterns */
+	WOL_PAT_LEN     = 128,   /* length of WoL patterns */
+	UDBS_SEG_SIZE   = 128,   /* Segment size of BAR2 doorbells */
+	UDBS_SEG_SHIFT  = 7,     /* log2(UDBS_SEG_SIZE) */
+	UDBS_DB_OFFSET  = 8,     /* offset of the 4B doorbell in a segment */
+	UDBS_WR_OFFSET  = 64,    /* offset of the work request in a segment */
 };
 
 enum {
 	CIM_NUM_IBQ    = 6,     /* # of CIM IBQs */
 	CIM_NUM_OBQ    = 6,     /* # of CIM OBQs */
+	CIM_NUM_OBQ_T5 = 8,     /* # of CIM OBQs for T5 adapter */
 	CIMLA_SIZE     = 2048,  /* # of 32-bit words in CIM LA */
 	CIM_PIFLA_SIZE = 64,    /* # of 192-bit words in CIM PIF LA */
 	CIM_MALA_SIZE  = 64,    /* # of 160-bit words in CIM MA LA */
@@ -80,6 +91,8 @@
 	SGE_CTXT_SIZE = 24,       /* size of SGE context */
 	SGE_NTIMERS = 6,          /* # of interrupt holdoff timer values */
 	SGE_NCOUNTERS = 4,        /* # of interrupt packet counter values */
+	SGE_MAX_IQ_SIZE = 65520,
+	SGE_FLBUF_SIZES = 16,
 };
 
 struct sge_qstat {                /* data written to SGE queue status entries */
@@ -221,11 +234,19 @@
 	 * Location of firmware image in FLASH.
 	 */
 	FLASH_FW_START_SEC = 8,
-	FLASH_FW_NSECS = 8,
+	FLASH_FW_NSECS = 16,
 	FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
 	FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
-        
+
 	/*
+	 * Location of bootstrap firmware image in FLASH.
+	 */
+	FLASH_FWBOOTSTRAP_START_SEC = 27,
+	FLASH_FWBOOTSTRAP_NSECS = 1,
+	FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+	FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
+	/*
 	 * iSCSI persistent/crash information.
 	 */
 	FLASH_ISCSI_CRASH_START_SEC = 29,
@@ -242,10 +263,7 @@
 	FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
 
 	/*
-	 * Location of Firmware Configuration File in FLASH.  Since the FPGA
-	 * "FLASH" is smaller we need to store the Configuration File in a
-	 * different location -- which will overlap the end of the firmware
-	 * image if firmware ever gets that large ...
+	 * Location of Firmware Configuration File in FLASH.
 	 */
 	FLASH_CFG_START_SEC = 31,
 	FLASH_CFG_NSECS = 1,
@@ -252,8 +270,11 @@
 	FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
 	FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
 
-	FLASH_FPGA_CFG_START_SEC = 15,
-	FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC),
+	/*
+	 * We don't support FLASH devices which can't support the full
+	 * standard set of sections which we need for normal operations.
+	 */
+	FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
 
 	/*
 	 * Sectors 32-63 are reserved for FLASH failover.
@@ -263,4 +284,9 @@
 #undef FLASH_START
 #undef FLASH_MAX_SIZE
 
+#define S_SGE_TIMESTAMP 0
+#define M_SGE_TIMESTAMP 0xfffffffffffffffULL
+#define V_SGE_TIMESTAMP(x) ((__u64)(x) << S_SGE_TIMESTAMP)
+#define G_SGE_TIMESTAMP(x) (((__u64)(x) >> S_SGE_TIMESTAMP) & M_SGE_TIMESTAMP)
+
 #endif /* __T4_HW_H */

Modified: trunk/sys/dev/cxgbe/common/t4_msg.h
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_msg.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_msg.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/t4_msg.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/t4_msg.h 318775 2017-05-24 05:30:36Z np $
  *
  */
 
@@ -55,8 +56,9 @@
 	CPL_BARRIER           = 0x18,
 	CPL_TID_RELEASE       = 0x1A,
 	CPL_TAG_READ_REQ      = 0x1B,
+	CPL_SRQ_TABLE_REQ     = 0x1C,
 	CPL_TX_PKT_FSO        = 0x1E,
-	CPL_TX_PKT_ISO        = 0x1F,
+	CPL_TX_DATA_ISO       = 0x1F,
 
 	CPL_CLOSE_LISTSRV_RPL = 0x20,
 	CPL_ERROR             = 0x21,
@@ -97,7 +99,7 @@
 	CPL_RX_DATA_DDP       = 0x42,
 	CPL_SMT_READ_RPL      = 0x43,
 	CPL_PASS_ACCEPT_REQ   = 0x44,
-	CPL_RX2TX_PKT         = 0x45,
+	CPL_RX_ISCSI_CMP      = 0x45,
 	CPL_RX_FCOE_DDP       = 0x46,
 	CPL_FCOE_HDR          = 0x47,
 	CPL_T5_TRACE_PKT      = 0x48,
@@ -104,6 +106,8 @@
 	CPL_RX_ISCSI_DDP      = 0x49,
 	CPL_RX_FCOE_DIF       = 0x4A,
 	CPL_RX_DATA_DIF       = 0x4B,
+	CPL_ERR_NOTIFY	      = 0x4D,
+	CPL_RX_TLS_CMP        = 0x4E,
 
 	CPL_RDMA_READ_REQ     = 0x60,
 	CPL_RX_ISCSI_DIF      = 0x60,
@@ -111,7 +115,12 @@
 	CPL_SET_LE_REQ        = 0x80,
 	CPL_PASS_OPEN_REQ6    = 0x81,
 	CPL_ACT_OPEN_REQ6     = 0x83,
+	CPL_TX_TLS_PDU        = 0x88,
+	CPL_TX_TLS_SFO        = 0x89,
 
+	CPL_TX_SEC_PDU        = 0x8A,
+	CPL_TX_TLS_ACK        = 0x8B,
+
 	CPL_RDMA_TERMINATE    = 0xA2,
 	CPL_RDMA_WRITE        = 0xA4,
 	CPL_SGE_EGR_UPDATE    = 0xA5,
@@ -123,9 +132,11 @@
 	CPL_RDMA_ATOMIC_RPL   = 0xAB,
 	CPL_RDMA_IMM_DATA     = 0xAC,
 	CPL_RDMA_IMM_DATA_SE  = 0xAD,
+	CPL_RX_MPS_PKT        = 0xAF,
 
 	CPL_TRACE_PKT         = 0xB0,
 	CPL_RX2TX_DATA        = 0xB1,
+	CPL_TLS_DATA          = 0xB1,
 	CPL_ISCSI_DATA        = 0xB2,
 	CPL_FCOE_DATA         = 0xB3,
 
@@ -132,9 +143,12 @@
 	CPL_FW4_MSG           = 0xC0,
 	CPL_FW4_PLD           = 0xC1,
 	CPL_FW4_ACK           = 0xC3,
+	CPL_SRQ_TABLE_RPL     = 0xCC,
+	CPL_RX_PHYS_DSGL      = 0xD0,
 
 	CPL_FW6_MSG           = 0xE0,
 	CPL_FW6_PLD           = 0xE1,
+	CPL_TX_TNL_LSO        = 0xEC,
 	CPL_TX_PKT_LSO        = 0xED,
 	CPL_TX_PKT_XT         = 0xEE,
 
@@ -144,6 +158,7 @@
 enum CPL_error {
 	CPL_ERR_NONE               = 0,
 	CPL_ERR_TCAM_PARITY        = 1,
+	CPL_ERR_TCAM_MISS          = 2,
 	CPL_ERR_TCAM_FULL          = 3,
 	CPL_ERR_BAD_LENGTH         = 15,
 	CPL_ERR_BAD_ROUTE          = 18,
@@ -163,8 +178,24 @@
 	CPL_ERR_WAIT_ARP_RPL       = 41,
 	CPL_ERR_ABORT_FAILED       = 42,
 	CPL_ERR_IWARP_FLM          = 50,
+	CPL_CONTAINS_READ_RPL      = 60,
+	CPL_CONTAINS_WRITE_RPL     = 61,
 };
 
+/*
+ * Some of the error codes above implicitly indicate that there is no TID
+ * allocated with the result of an ACT_OPEN.  We use this predicate to make
+ * that explicit.
+ */
+static inline int act_open_has_tid(int status)
+{
+	return (status != CPL_ERR_TCAM_PARITY &&
+		status != CPL_ERR_TCAM_MISS &&
+		status != CPL_ERR_TCAM_FULL &&
+		status != CPL_ERR_CONN_EXIST_SYNRECV &&
+		status != CPL_ERR_CONN_EXIST);
+}
+
 enum {
 	CPL_CONN_POLICY_AUTO = 0,
 	CPL_CONN_POLICY_ASK  = 1,
@@ -178,6 +209,7 @@
 	ULP_MODE_RDMA          = 4,
 	ULP_MODE_TCPDDP        = 5,
 	ULP_MODE_FCOE          = 6,
+	ULP_MODE_TLS           = 8,
 };
 
 enum {
@@ -271,6 +303,7 @@
 
 /* extract the TID from a CPL command */
 #define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
 
 /* partitioning of TID fields that also carry a queue id */
 #define S_TID_TID    0
@@ -335,6 +368,12 @@
 #define M_QNUM 0xFFFF
 #define G_QNUM(x) (((x) >> S_QNUM) & M_QNUM)
 
+#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW)
+# define RSS_HDR struct rss_header rss_hdr;
+#else
+# define RSS_HDR
+#endif
+
 #ifndef CHELSIO_FW
 struct work_request_hdr {
 	__be32 wr_hi;
@@ -356,11 +395,9 @@
 
 # define WR_HDR struct work_request_hdr wr
 # define WR_HDR_SIZE sizeof(struct work_request_hdr)
-# define RSS_HDR
 #else
 # define WR_HDR
 # define WR_HDR_SIZE 0
-# define RSS_HDR struct rss_header rss_hdr;
 #endif
 
 /* option 0 fields */
@@ -478,6 +515,16 @@
 #define V_CONN_POLICY(x) ((x) << S_CONN_POLICY)
 #define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY)
 
+#define S_T5_FILT_INFO    24
+#define M_T5_FILT_INFO    0xffffffffffULL
+#define V_T5_FILT_INFO(x) ((x) << S_T5_FILT_INFO)
+#define G_T5_FILT_INFO(x) (((x) >> S_T5_FILT_INFO) & M_T5_FILT_INFO)
+
+#define S_FILT_INFO    28
+#define M_FILT_INFO    0xfffffffffULL
+#define V_FILT_INFO(x) ((x) << S_FILT_INFO)
+#define G_FILT_INFO(x) (((x) >> S_FILT_INFO) & M_FILT_INFO)
+
 /* option 2 fields */
 #define S_RSS_QUEUE    0
 #define M_RSS_QUEUE    0x3FF
@@ -511,6 +558,10 @@
 #define V_CONG_CNTRL_VALID(x) ((x) << S_CONG_CNTRL_VALID)
 #define F_CONG_CNTRL_VALID    V_CONG_CNTRL_VALID(1U)
 
+#define S_T5_ISS    18
+#define V_T5_ISS(x) ((x) << S_T5_ISS)
+#define F_T5_ISS    V_T5_ISS(1U)
+
 #define S_PACE_VALID    19
 #define V_PACE_VALID(x) ((x) << S_PACE_VALID)
 #define F_PACE_VALID    V_PACE_VALID(1U)
@@ -552,6 +603,10 @@
 #define V_SACK_EN(x) ((x) << S_SACK_EN)
 #define F_SACK_EN    V_SACK_EN(1U)
 
+#define S_T5_OPT_2_VALID    31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID    V_T5_OPT_2_VALID(1U)
+
 struct cpl_pass_open_req {
 	WR_HDR;
 	union opcode_tid ot;
@@ -606,12 +661,31 @@
 #define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS)
 
 /* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */
-#define G_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
-#define G_TCPOPT_SACK(x)       (((x) >> 6) & 1)
-#define G_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
-#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
-#define G_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
+#define S_TCPOPT_WSCALE_OK	5
+#define M_TCPOPT_WSCALE_OK  	0x1
+#define V_TCPOPT_WSCALE_OK(x)	((x) << S_TCPOPT_WSCALE_OK)
+#define G_TCPOPT_WSCALE_OK(x)	(((x) >> S_TCPOPT_WSCALE_OK) & M_TCPOPT_WSCALE_OK)
 
+#define S_TCPOPT_SACK		6
+#define M_TCPOPT_SACK		0x1
+#define V_TCPOPT_SACK(x)	((x) << S_TCPOPT_SACK)
+#define G_TCPOPT_SACK(x)	(((x) >> S_TCPOPT_SACK) & M_TCPOPT_SACK)
+
+#define S_TCPOPT_TSTAMP		7
+#define M_TCPOPT_TSTAMP		0x1
+#define V_TCPOPT_TSTAMP(x)	((x) << S_TCPOPT_TSTAMP)
+#define G_TCPOPT_TSTAMP(x)	(((x) >> S_TCPOPT_TSTAMP) & M_TCPOPT_TSTAMP)
+
+#define S_TCPOPT_SND_WSCALE	8
+#define M_TCPOPT_SND_WSCALE	0xF
+#define V_TCPOPT_SND_WSCALE(x)	((x) << S_TCPOPT_SND_WSCALE)
+#define G_TCPOPT_SND_WSCALE(x)	(((x) >> S_TCPOPT_SND_WSCALE) & M_TCPOPT_SND_WSCALE)
+
+#define S_TCPOPT_MSS	12
+#define M_TCPOPT_MSS	0xF
+#define V_TCPOPT_MSS(x)	((x) << S_TCPOPT_MSS)
+#define G_TCPOPT_MSS(x)	(((x) >> S_TCPOPT_MSS) & M_TCPOPT_MSS)
+
 struct cpl_pass_accept_req {
 	RSS_HDR
 	union opcode_tid ot;
@@ -635,16 +709,29 @@
 #define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN)
 #define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN)
 
+#define S_T6_TCP_HDR_LEN   8
+#define V_T6_TCP_HDR_LEN(x) ((x) << S_T6_TCP_HDR_LEN)
+#define G_T6_TCP_HDR_LEN(x) (((x) >> S_T6_TCP_HDR_LEN) & M_TCP_HDR_LEN)
+
 #define S_IP_HDR_LEN    16
 #define M_IP_HDR_LEN    0x3FF
 #define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN)
 #define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN)
 
+#define S_T6_IP_HDR_LEN    14
+#define V_T6_IP_HDR_LEN(x) ((x) << S_T6_IP_HDR_LEN)
+#define G_T6_IP_HDR_LEN(x) (((x) >> S_T6_IP_HDR_LEN) & M_IP_HDR_LEN)
+
 #define S_ETH_HDR_LEN    26
 #define M_ETH_HDR_LEN    0x3F
 #define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN)
 #define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN)
 
+#define S_T6_ETH_HDR_LEN    24
+#define M_T6_ETH_HDR_LEN    0xFF
+#define V_T6_ETH_HDR_LEN(x) ((x) << S_T6_ETH_HDR_LEN)
+#define G_T6_ETH_HDR_LEN(x) (((x) >> S_T6_ETH_HDR_LEN) & M_T6_ETH_HDR_LEN)
+
 /* cpl_pass_accept_req.l2info fields */
 #define S_SYN_MAC_IDX    0
 #define M_SYN_MAC_IDX    0x1FF
@@ -667,6 +754,18 @@
 	__be64 opt0;
 };
 
+struct cpl_t5_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+	__be64 opt0;
+	__be32 iss;
+	union {
+		__be32 rsvd; /* T5 */
+		__be32 opt3; /* T6 */
+	} u;
+};
+
 struct cpl_act_open_req {
 	WR_HDR;
 	union opcode_tid ot;
@@ -679,6 +778,10 @@
 	__be32 opt2;
 };
 
+#define S_FILTER_TUPLE	24
+#define M_FILTER_TUPLE	0xFFFFFFFFFF
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
 struct cpl_t5_act_open_req {
 	WR_HDR;
 	union opcode_tid ot;
@@ -687,11 +790,31 @@
 	__be32 local_ip;
 	__be32 peer_ip;
 	__be64 opt0;
-	__be32 rsvd;
+	__be32 iss;
 	__be32 opt2;
 	__be64 params;
 };
 
+struct cpl_t6_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 iss;
+	__be32 opt2;
+	__be64 params;
+	__be32 rsvd2;
+	__be32 opt3;
+};
+
+/* cpl_{t5,t6}_act_open_req.params field */
+#define S_AOPEN_FCOEMASK	0
+#define V_AOPEN_FCOEMASK(x)	((x) << S_AOPEN_FCOEMASK)
+#define F_AOPEN_FCOEMASK	V_AOPEN_FCOEMASK(1U)
+
 struct cpl_act_open_req6 {
 	WR_HDR;
 	union opcode_tid ot;
@@ -716,11 +839,28 @@
 	__be64 peer_ip_hi;
 	__be64 peer_ip_lo;
 	__be64 opt0;
-	__be32 rsvd;
+	__be32 iss;
 	__be32 opt2;
 	__be64 params;
 };
 
+struct cpl_t6_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 iss;
+	__be32 opt2;
+	__be64 params;
+	__be32 rsvd2;
+	__be32 opt3;
+};
+
 struct cpl_act_open_rpl {
 	RSS_HDR
 	union opcode_tid ot;
@@ -863,6 +1003,28 @@
 	__u8  status;
 };
 
+struct cpl_abort_req_rss6 {
+	RSS_HDR
+	union opcode_tid ot;
+	__u32 srqidx_status;
+};
+
+#define S_ABORT_RSS_STATUS    0
+#define M_ABORT_RSS_STATUS    0xff
+#define V_ABORT_RSS_STATUS(x) ((x) << S_ABORT_RSS_STATUS)
+#define G_ABORT_RSS_STATUS(x) (((x) >> S_ABORT_RSS_STATUS) & M_ABORT_RSS_STATUS)
+
+#define S_ABORT_RSS_SRQIDX    8
+#define M_ABORT_RSS_SRQIDX    0xffffff
+#define V_ABORT_RSS_SRQIDX(x) ((x) << S_ABORT_RSS_SRQIDX)
+#define G_ABORT_RSS_SRQIDX(x) (((x) >> S_ABORT_RSS_SRQIDX) & M_ABORT_RSS_SRQIDX)
+
+
+/* cpl_abort_req status command code in case of T6,
+ * bit[0] specifies whether to send RST (0) to remote peer or suppress it (1)
+ * bit[1] indicates ABORT_REQ was sent after a CLOSE_CON_REQ
+ * bit[2] specifies whether to disable the mmgr (1) or not (0)
+ */
 struct cpl_abort_req {
 	WR_HDR;
 	union opcode_tid ot;
@@ -879,6 +1041,12 @@
 	__u8  status;
 };
 
+struct cpl_abort_rpl_rss6 {
+	RSS_HDR
+	union opcode_tid ot;
+	__u32 srqidx_status;
+};
+
 struct cpl_abort_rpl {
 	WR_HDR;
 	union opcode_tid ot;
@@ -954,10 +1122,14 @@
 #define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE)
 
 #define S_TX_ULP_MODE    10
-#define M_TX_ULP_MODE    0xF
+#define M_TX_ULP_MODE    0x7
 #define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE)
 #define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE)
 
+#define S_TX_FORCE    13
+#define V_TX_FORCE(x) ((x) << S_TX_FORCE)
+#define F_TX_FORCE    V_TX_FORCE(1U)
+
 #define S_TX_SHOVE    14
 #define V_TX_SHOVE(x) ((x) << S_TX_SHOVE)
 #define F_TX_SHOVE    V_TX_SHOVE(1U)
@@ -982,6 +1154,10 @@
 #define V_TX_TNL(x) ((x) << S_TX_TNL)
 #define F_TX_TNL    V_TX_TNL(1U)
 
+#define S_T6_TX_FORCE    20
+#define V_T6_TX_FORCE(x) ((x) << S_T6_TX_FORCE)
+#define F_T6_TX_FORCE    V_T6_TX_FORCE(1U)
+
 /* additional tx_data_wr.flags fields */
 #define S_TX_CPU_IDX    0
 #define M_TX_CPU_IDX    0x3F
@@ -1053,6 +1229,12 @@
 #define V_TXPKT_OVLAN_IDX(x) ((x) << S_TXPKT_OVLAN_IDX)
 #define G_TXPKT_OVLAN_IDX(x) (((x) >> S_TXPKT_OVLAN_IDX) & M_TXPKT_OVLAN_IDX)
 
+#define S_TXPKT_T5_OVLAN_IDX    12
+#define M_TXPKT_T5_OVLAN_IDX    0x7
+#define V_TXPKT_T5_OVLAN_IDX(x) ((x) << S_TXPKT_T5_OVLAN_IDX)
+#define G_TXPKT_T5_OVLAN_IDX(x) (((x) >> S_TXPKT_T5_OVLAN_IDX) & \
+				M_TXPKT_T5_OVLAN_IDX)
+
 #define S_TXPKT_INTF    16
 #define M_TXPKT_INTF    0xF
 #define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
@@ -1062,10 +1244,18 @@
 #define V_TXPKT_SPECIAL_STAT(x) ((x) << S_TXPKT_SPECIAL_STAT)
 #define F_TXPKT_SPECIAL_STAT    V_TXPKT_SPECIAL_STAT(1U)
 
+#define S_TXPKT_T5_FCS_DIS    21
+#define V_TXPKT_T5_FCS_DIS(x) ((x) << S_TXPKT_T5_FCS_DIS)
+#define F_TXPKT_T5_FCS_DIS    V_TXPKT_T5_FCS_DIS(1U)
+
 #define S_TXPKT_INS_OVLAN    21
 #define V_TXPKT_INS_OVLAN(x) ((x) << S_TXPKT_INS_OVLAN)
 #define F_TXPKT_INS_OVLAN    V_TXPKT_INS_OVLAN(1U)
 
+#define S_TXPKT_T5_INS_OVLAN    15
+#define V_TXPKT_T5_INS_OVLAN(x) ((x) << S_TXPKT_T5_INS_OVLAN)
+#define F_TXPKT_T5_INS_OVLAN    V_TXPKT_T5_INS_OVLAN(1U)
+
 #define S_TXPKT_STAT_DIS    22
 #define V_TXPKT_STAT_DIS(x) ((x) << S_TXPKT_STAT_DIS)
 #define F_TXPKT_STAT_DIS    V_TXPKT_STAT_DIS(1U)
@@ -1104,6 +1294,10 @@
 #define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
 #define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
 
+#define M_T6_TXPKT_IPHDR_LEN    0xFFF
+#define G_T6_TXPKT_IPHDR_LEN(x) \
+	(((x) >> S_TXPKT_IPHDR_LEN) & M_T6_TXPKT_IPHDR_LEN)
+
 #define S_TXPKT_CSUM_LOC    30
 #define M_TXPKT_CSUM_LOC    0x3FF
 #define V_TXPKT_CSUM_LOC(x) ((__u64)(x) << S_TXPKT_CSUM_LOC)
@@ -1114,6 +1308,12 @@
 #define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
 #define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
 
+#define S_T6_TXPKT_ETHHDR_LEN    32
+#define M_T6_TXPKT_ETHHDR_LEN    0xFF
+#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN)
+#define G_T6_TXPKT_ETHHDR_LEN(x) \
+	(((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN)
+
 #define S_TXPKT_CSUM_TYPE    40
 #define M_TXPKT_CSUM_TYPE    0xF
 #define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
@@ -1208,6 +1408,11 @@
 #define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
 #define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
 
+#define S_LSO_T5_XFER_SIZE	   0
+#define M_LSO_T5_XFER_SIZE    0xFFFFFFF
+#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE)
+#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE)
+
 /* cpl_tx_pkt_lso_core.mss fields */
 #define S_LSO_MSS    0
 #define M_LSO_MSS    0x3FFF
@@ -1253,39 +1458,84 @@
 };
 
 struct cpl_tx_data_iso {
-	WR_HDR;
-	__be32 iso_ctrl;
-	__u8   rsvd;
+	__be32 op_to_scsi;
+	__u8   reserved1;
 	__u8   ahs_len;
-	__be16 mss;
+	__be16 mpdu;
 	__be32 burst_size;
 	__be32 len;
+	__be32 reserved2_seglen_offset;
+	__be32 datasn_offset;
+	__be32 buffer_offset;
+	__be32 reserved3;
+
 	/* encapsulated CPL_TX_DATA follows here */
 };
 
-/* cpl_tx_data_iso.iso_ctrl fields different from cpl_tx_pkt_lso.lso_ctrl */
-#define S_ISO_CPLHDR_LEN    18
-#define M_ISO_CPLHDR_LEN    0xF
-#define V_ISO_CPLHDR_LEN(x) ((x) << S_ISO_CPLHDR_LEN)
-#define G_ISO_CPLHDR_LEN(x) (((x) >> S_ISO_CPLHDR_LEN) & M_ISO_CPLHDR_LEN)
+/* cpl_tx_data_iso.op_to_scsi fields */
+#define S_CPL_TX_DATA_ISO_OP	24
+#define M_CPL_TX_DATA_ISO_OP	0xff
+#define V_CPL_TX_DATA_ISO_OP(x)	((x) << S_CPL_TX_DATA_ISO_OP)
+#define G_CPL_TX_DATA_ISO_OP(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_OP) & M_CPL_TX_DATA_ISO_OP)
 
-#define S_ISO_HDR_CRC    17
-#define V_ISO_HDR_CRC(x) ((x) << S_ISO_HDR_CRC)
-#define F_ISO_HDR_CRC    V_ISO_HDR_CRC(1U)
+#define S_CPL_TX_DATA_ISO_FIRST		23
+#define M_CPL_TX_DATA_ISO_FIRST		0x1
+#define V_CPL_TX_DATA_ISO_FIRST(x)	((x) << S_CPL_TX_DATA_ISO_FIRST)
+#define G_CPL_TX_DATA_ISO_FIRST(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_FIRST) & M_CPL_TX_DATA_ISO_FIRST)
+#define F_CPL_TX_DATA_ISO_FIRST	V_CPL_TX_DATA_ISO_FIRST(1U)
 
-#define S_ISO_DATA_CRC    16
-#define V_ISO_DATA_CRC(x) ((x) << S_ISO_DATA_CRC)
-#define F_ISO_DATA_CRC    V_ISO_DATA_CRC(1U)
+#define S_CPL_TX_DATA_ISO_LAST		22
+#define M_CPL_TX_DATA_ISO_LAST		0x1
+#define V_CPL_TX_DATA_ISO_LAST(x)	((x) << S_CPL_TX_DATA_ISO_LAST)
+#define G_CPL_TX_DATA_ISO_LAST(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_LAST) & M_CPL_TX_DATA_ISO_LAST)
+#define F_CPL_TX_DATA_ISO_LAST	V_CPL_TX_DATA_ISO_LAST(1U)
 
-#define S_ISO_IMD_DATA_EN    15
-#define V_ISO_IMD_DATA_EN(x) ((x) << S_ISO_IMD_DATA_EN)
-#define F_ISO_IMD_DATA_EN    V_ISO_IMD_DATA_EN(1U)
+#define S_CPL_TX_DATA_ISO_CPLHDRLEN	21
+#define M_CPL_TX_DATA_ISO_CPLHDRLEN	0x1
+#define V_CPL_TX_DATA_ISO_CPLHDRLEN(x)	((x) << S_CPL_TX_DATA_ISO_CPLHDRLEN)
+#define G_CPL_TX_DATA_ISO_CPLHDRLEN(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_CPLHDRLEN) & M_CPL_TX_DATA_ISO_CPLHDRLEN)
+#define F_CPL_TX_DATA_ISO_CPLHDRLEN	V_CPL_TX_DATA_ISO_CPLHDRLEN(1U)
 
-#define S_ISO_PDU_TYPE    13
-#define M_ISO_PDU_TYPE    0x3
-#define V_ISO_PDU_TYPE(x) ((x) << S_ISO_PDU_TYPE)
-#define G_ISO_PDU_TYPE(x) (((x) >> S_ISO_PDU_TYPE) & M_ISO_PDU_TYPE)
+#define S_CPL_TX_DATA_ISO_HDRCRC	20
+#define M_CPL_TX_DATA_ISO_HDRCRC	0x1
+#define V_CPL_TX_DATA_ISO_HDRCRC(x)	((x) << S_CPL_TX_DATA_ISO_HDRCRC)
+#define G_CPL_TX_DATA_ISO_HDRCRC(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_HDRCRC) & M_CPL_TX_DATA_ISO_HDRCRC)
+#define F_CPL_TX_DATA_ISO_HDRCRC	V_CPL_TX_DATA_ISO_HDRCRC(1U)
 
+#define S_CPL_TX_DATA_ISO_PLDCRC	19
+#define M_CPL_TX_DATA_ISO_PLDCRC	0x1
+#define V_CPL_TX_DATA_ISO_PLDCRC(x)	((x) << S_CPL_TX_DATA_ISO_PLDCRC)
+#define G_CPL_TX_DATA_ISO_PLDCRC(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_PLDCRC) & M_CPL_TX_DATA_ISO_PLDCRC)
+#define F_CPL_TX_DATA_ISO_PLDCRC	V_CPL_TX_DATA_ISO_PLDCRC(1U)
+
+#define S_CPL_TX_DATA_ISO_IMMEDIATE	18
+#define M_CPL_TX_DATA_ISO_IMMEDIATE	0x1
+#define V_CPL_TX_DATA_ISO_IMMEDIATE(x)	((x) << S_CPL_TX_DATA_ISO_IMMEDIATE)
+#define G_CPL_TX_DATA_ISO_IMMEDIATE(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_IMMEDIATE) & M_CPL_TX_DATA_ISO_IMMEDIATE)
+#define F_CPL_TX_DATA_ISO_IMMEDIATE	V_CPL_TX_DATA_ISO_IMMEDIATE(1U)
+
+#define S_CPL_TX_DATA_ISO_SCSI		16
+#define M_CPL_TX_DATA_ISO_SCSI		0x3
+#define V_CPL_TX_DATA_ISO_SCSI(x)	((x) << S_CPL_TX_DATA_ISO_SCSI)
+#define G_CPL_TX_DATA_ISO_SCSI(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_SCSI) & M_CPL_TX_DATA_ISO_SCSI)
+
+/* cpl_tx_data_iso.reserved2_seglen_offset fields */
+#define S_CPL_TX_DATA_ISO_SEGLEN_OFFSET		0
+#define M_CPL_TX_DATA_ISO_SEGLEN_OFFSET		0xffffff
+#define V_CPL_TX_DATA_ISO_SEGLEN_OFFSET(x)	\
+    ((x) << S_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+#define G_CPL_TX_DATA_ISO_SEGLEN_OFFSET(x)	\
+    (((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \
+     M_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+
 struct cpl_iscsi_hdr {
 	RSS_HDR
 	union opcode_tid ot;
@@ -1357,6 +1607,19 @@
 	__be32 param;
 };
 
+/* cpl_fcoe_hdr.rctl_fctl fields */
+#define S_FCOE_FCHDR_RCTL	24
+#define M_FCOE_FCHDR_RCTL	0xff
+#define V_FCOE_FCHDR_RCTL(x)	((x) << S_FCOE_FCHDR_RCTL)
+#define G_FCOE_FCHDR_RCTL(x)	\
+	(((x) >> S_FCOE_FCHDR_RCTL) & M_FCOE_FCHDR_RCTL)
+
+#define S_FCOE_FCHDR_FCTL	0
+#define M_FCOE_FCHDR_FCTL	0xffffff
+#define V_FCOE_FCHDR_FCTL(x)	((x) << S_FCOE_FCHDR_FCTL)
+#define G_FCOE_FCHDR_FCTL(x)	\
+	(((x) >> S_FCOE_FCHDR_FCTL) & M_FCOE_FCHDR_FCTL)
+
 struct cpl_fcoe_data {
 	RSS_HDR
 	union opcode_tid ot;
@@ -1484,6 +1747,19 @@
 	__u8 rsvd1[4];
 };
 
+struct cpl_rx_iscsi_cmp {
+	RSS_HDR
+	union opcode_tid ot;
+	__be16 pdu_len_ddp;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	__u8 rsvd;
+	__u8 status;
+	__be32 ulp_crc;
+	__be32 ddpvld;
+};
+
 struct cpl_rx_fcoe_dif {
 	RSS_HDR
 	union opcode_tid ot;
@@ -1628,6 +1904,9 @@
 #define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
 #define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
 
+#define M_RX_T6_ETHHDR_LEN    0xFF
+#define G_RX_T6_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_T6_ETHHDR_LEN)
+
 #define S_RX_PKTYPE    5
 #define M_RX_PKTYPE    0x7
 #define V_RX_PKTYPE(x) ((x) << S_RX_PKTYPE)
@@ -1758,6 +2037,65 @@
 #define V_RXERR_PING(x) ((x) << S_RXERR_PING)
 #define F_RXERR_PING    V_RXERR_PING(1U)
 
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+
+#define S_T6_COMPR_RXERR_VEC    0
+#define M_T6_COMPR_RXERR_VEC    0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+		(((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
+#define S_T6_COMPR_RXERR_MAC    0
+#define V_T6_COMPR_RXERR_MAC(x) ((x) << S_T6_COMPR_RXERR_MAC)
+#define F_T6_COMPR_RXERR_MAC    V_T6_COMPR_RXERR_MAC(1U)
+
+/* Logical OR of RX_ERROR_PKT_LEN, RX_ERROR_TCP_HDR_LEN
+ * RX_ERROR_IP_HDR_LEN, RX_ERROR_ETH_HDR_LEN
+ */
+#define S_T6_COMPR_RXERR_LEN    1
+#define V_T6_COMPR_RXERR_LEN(x) ((x) << S_T6_COMPR_RXERR_LEN)
+#define F_T6_COMPR_RXERR_LEN    V_COMPR_T6_RXERR_LEN(1U)
+
+#define S_T6_COMPR_RXERR_TCP_OPT    2
+#define V_T6_COMPR_RXERR_TCP_OPT(x) ((x) << S_T6_COMPR_RXERR_TCP_OPT)
+#define F_T6_COMPR_RXERR_TCP_OPT    V_T6_COMPR_RXERR_TCP_OPT(1U)
+
+#define S_T6_COMPR_RXERR_IPV6_EXT    3
+#define V_T6_COMPR_RXERR_IPV6_EXT(x) ((x) << S_T6_COMPR_RXERR_IPV6_EXT)
+#define F_T6_COMPR_RXERR_IPV6_EXT    V_T6_COMPR_RXERR_IPV6_EXT(1U)
+
+/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */
+#define S_T6_COMPR_RXERR_SUM   4
+#define V_T6_COMPR_RXERR_SUM(x) ((x) << S_T6_COMPR_RXERR_SUM)
+#define F_T6_COMPR_RXERR_SUM    V_T6_COMPR_RXERR_SUM(1U)
+
+/* Logical OR of RX_ERROR_FPMA, RX_ERROR_PING_DROP,
+ * RX_ERROR_ATTACK, RX_ERROR_FRAG,RX_ERROR_IPVERSION
+ */
+#define S_T6_COMPR_RXERR_MISC   5
+#define V_T6_COMPR_RXERR_MISC(x) ((x) << S_T6_COMPR_RXERR_MISC)
+#define F_T6_COMPR_RXERR_MISC    V_T6_COMPR_RXERR_MISC(1U)
+
+#define S_T6_RX_TNL_TYPE    6
+#define M_T6_RX_TNL_TYPE    0x3
+#define V_T6_RX_TNL_TYPE(x) ((x) << S_T6_RX_TNL_TYPE)
+#define G_T6_RX_TNL_TYPE(x) (((x) >> S_T6_RX_TNL_TYPE) & M_T6_RX_TNL_TYPE)
+
+#define RX_PKT_TNL_TYPE_NVGRE	1
+#define RX_PKT_TNL_TYPE_VXLAN	2
+#define RX_PKT_TNL_TYPE_GENEVE	3
+
+#define S_T6_RX_TNLHDR_LEN    8
+#define M_T6_RX_TNLHDR_LEN    0xFF
+#define V_T6_RX_TNLHDR_LEN(x) ((x) << S_T6_RX_TNLHDR_LEN)
+#define G_T6_RX_TNLHDR_LEN(x) (((x) >> S_T6_RX_TNLHDR_LEN) & M_T6_RX_TNLHDR_LEN)
+
 struct cpl_trace_pkt {
 	RSS_HDR
 	__u8 opcode;
@@ -1906,14 +2244,24 @@
 #define G_L2T_W_INFO(x) (((x) >> S_L2T_W_INFO) & M_L2T_W_INFO)
 
 #define S_L2T_W_PORT    8
-#define M_L2T_W_PORT    0xF
+#define M_L2T_W_PORT    0x3
 #define V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT)
 #define G_L2T_W_PORT(x) (((x) >> S_L2T_W_PORT) & M_L2T_W_PORT)
 
+#define S_L2T_W_LPBK    10
+#define V_L2T_W_LPBK(x) ((x) << S_L2T_W_LPBK)
+#define F_L2T_W_PKBK    V_L2T_W_LPBK(1U)
+
+#define S_L2T_W_ARPMISS         11
+#define V_L2T_W_ARPMISS(x)      ((x) << S_L2T_W_ARPMISS)
+#define F_L2T_W_ARPMISS         V_L2T_W_ARPMISS(1U)
+
 #define S_L2T_W_NOREPLY    15
 #define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
 #define F_L2T_W_NOREPLY    V_L2T_W_NOREPLY(1U)
 
+#define CPL_L2T_VLAN_NONE 0xfff
+
 struct cpl_l2t_write_rpl {
 	RSS_HDR
 	union opcode_tid ot;
@@ -1943,6 +2291,51 @@
 	__u8 dst_mac[6];
 };
 
+struct cpl_srq_table_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[2];
+	__u8 idx;
+	__be64 rsvd_pdid;
+	__be32 qlen_qbase;
+	__be16 cur_msn;
+	__be16 max_msn;
+};
+
+struct cpl_srq_table_rpl {
+	RSS_HDR
+	union opcode_tid ot;
+	__u8 status;
+	__u8 rsvd[2];
+	__u8 idx;
+	__be64 rsvd_pdid;
+	__be32 qlen_qbase;
+	__be16 cur_msn;
+	__be16 max_msn;
+};
+
+/* cpl_srq_table_{req,rpl}.params fields */
+#define S_SRQT_QLEN   28
+#define M_SRQT_QLEN   0xF
+#define V_SRQT_QLEN(x) ((x) << S_SRQT_QLEN)
+#define G_SRQT_QLEN(x) (((x) >> S_SRQT_QLEN) & M_SRQT_QLEN)
+
+#define S_SRQT_QBASE    0
+#define M_SRQT_QBASE   0x3FFFFFF
+#define V_SRQT_QBASE(x) ((x) << S_SRQT_QBASE)
+#define G_SRQT_QBASE(x) (((x) >> S_SRQT_QBASE) & M_SRQT_QBASE)
+
+#define S_SRQT_PDID    0
+#define M_SRQT_PDID   0xFF
+#define V_SRQT_PDID(x) ((x) << S_SRQT_PDID)
+#define G_SRQT_PDID(x) (((x) >> S_SRQT_PDID) & M_SRQT_PDID)
+
+#define S_SRQT_IDX    0
+#define M_SRQT_IDX    0xF
+#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX)
+#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX)
+
 struct cpl_smt_write_req {
 	WR_HDR;
 	union opcode_tid ot;
@@ -1953,6 +2346,17 @@
 	__u8   src_mac0[6];
 };
 
+struct cpl_t6_smt_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 params;
+	__be64 tag;
+	__be16 pfvf0;
+	__u8   src_mac0[6];
+	__be32 local_ip;
+	__be32 rsvd;
+};
+
 struct cpl_smt_write_rpl {
 	RSS_HDR
 	union opcode_tid ot;
@@ -1989,6 +2393,9 @@
 #define V_SMTW_IDX(x) ((x) << S_SMTW_IDX)
 #define G_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_SMTW_IDX)
 
+#define M_T6_SMTW_IDX    0xFF
+#define G_T6_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_T6_SMTW_IDX)
+
 #define S_SMTW_NORPL    31
 #define V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL)
 #define F_SMTW_NORPL    V_SMTW_NORPL(1U)
@@ -2219,6 +2626,11 @@
 };
 
 /* cpl_sge_egr_update.ot fields */
+#define S_AUTOEQU	22
+#define M_AUTOEQU	0x1
+#define V_AUTOEQU(x)	((x) << S_AUTOEQU)
+#define G_AUTOEQU(x)	(((x) >> S_AUTOEQU) & M_AUTOEQU)
+
 #define S_EGR_QID    0
 #define M_EGR_QID    0x1FFFF
 #define V_EGR_QID(x) ((x) << S_EGR_QID)
@@ -2231,6 +2643,9 @@
 	FW_TYPE_CQE = 2,
 	FW_TYPE_OFLD_CONNECTION_WR_RPL = 3,
 	FW_TYPE_RSSCPL = 4,
+	FW_TYPE_WRERR_RPL = 5,
+	FW_TYPE_PI_ERR = 6,
+	FW_TYPE_TLS_KEY = 7,
 };
 
 struct cpl_fw2_pld {
@@ -2306,7 +2721,8 @@
 	FW6_TYPE_CQE		= FW_TYPE_CQE,
 	FW6_TYPE_OFLD_CONNECTION_WR_RPL = FW_TYPE_OFLD_CONNECTION_WR_RPL,
 	FW6_TYPE_RSSCPL		= FW_TYPE_RSSCPL,
-
+	FW6_TYPE_WRERR_RPL	= FW_TYPE_WRERR_RPL,
+	FW6_TYPE_PI_ERR		= FW_TYPE_PI_ERR,
 	NUM_FW6_TYPES
 };
 
@@ -2329,7 +2745,9 @@
 	ULP_TX_SC_NOOP = 0x80,
 	ULP_TX_SC_IMM  = 0x81,
 	ULP_TX_SC_DSGL = 0x82,
-	ULP_TX_SC_ISGL = 0x83
+	ULP_TX_SC_ISGL = 0x83,
+	ULP_TX_SC_PICTRL = 0x84,
+	ULP_TX_SC_MEMRD = 0x86
 };
 
 #define S_ULPTX_CMD    24
@@ -2380,7 +2798,13 @@
 #define S_ULPTX_NSGE    0
 #define M_ULPTX_NSGE    0xFFFF
 #define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+#define G_ULPTX_NSGE(x) (((x) >> S_ULPTX_NSGE) & M_ULPTX_NSGE)
 
+struct ulptx_sc_memrd {
+	__be32 cmd_to_len;
+	__be32 addr;
+};
+
 struct ulp_mem_io {
 	WR_HDR;
 	__be32 cmd;
@@ -2394,6 +2818,18 @@
 #define V_ULP_MEMIO_ORDER(x) ((x) << S_ULP_MEMIO_ORDER)
 #define F_ULP_MEMIO_ORDER    V_ULP_MEMIO_ORDER(1U)
 
+#define S_T5_ULP_MEMIO_IMM    23
+#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
+#define F_T5_ULP_MEMIO_IMM    V_T5_ULP_MEMIO_IMM(1U)
+
+#define S_T5_ULP_MEMIO_ORDER    22
+#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
+#define F_T5_ULP_MEMIO_ORDER    V_T5_ULP_MEMIO_ORDER(1U)
+
+#define S_T5_ULP_MEMIO_FID	4
+#define M_T5_ULP_MEMIO_FID	0x7ff
+#define V_T5_ULP_MEMIO_FID(x)	((x) << S_T5_ULP_MEMIO_FID)
+
 /* ulp_mem_io.lock_addr fields */
 #define S_ULP_MEMIO_ADDR    0
 #define M_ULP_MEMIO_ADDR    0x7FFFFFF
@@ -2408,6 +2844,14 @@
 #define M_ULP_MEMIO_DATA_LEN    0x1F
 #define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
 
+/* ULP_TXPKT field values */
+enum {
+	ULP_TXPKT_DEST_TP = 0,
+	ULP_TXPKT_DEST_SGE,
+	ULP_TXPKT_DEST_UP,
+	ULP_TXPKT_DEST_DEVNULL,
+};
+
 struct ulp_txpkt {
 	__be32 cmd_dest;
 	__be32 len;
@@ -2414,6 +2858,21 @@
 };
 
 /* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DATAMODIFY       23
+#define M_ULP_TXPKT_DATAMODIFY       0x1
+#define V_ULP_TXPKT_DATAMODIFY(x)    ((x) << S_ULP_TXPKT_DATAMODIFY)
+#define G_ULP_TXPKT_DATAMODIFY(x)    \
+	(((x) >> S_ULP_TXPKT_DATAMODIFY) & M_ULP_TXPKT_DATAMODIFY_)
+#define F_ULP_TXPKT_DATAMODIFY       V_ULP_TXPKT_DATAMODIFY(1U)
+
+#define S_ULP_TXPKT_CHANNELID        22
+#define M_ULP_TXPKT_CHANNELID        0x1
+#define V_ULP_TXPKT_CHANNELID(x)     ((x) << S_ULP_TXPKT_CHANNELID)
+#define G_ULP_TXPKT_CHANNELID(x)     \
+	(((x) >> S_ULP_TXPKT_CHANNELID) & M_ULP_TXPKT_CHANNELID)
+#define F_ULP_TXPKT_CHANNELID        V_ULP_TXPKT_CHANNELID(1U)
+
+/* ulp_txpkt.cmd_dest fields */
 #define S_ULP_TXPKT_DEST    16
 #define M_ULP_TXPKT_DEST    0x3
 #define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
@@ -2426,4 +2885,757 @@
 #define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO)
 #define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U)
 
+enum cpl_tx_tnl_lso_type {
+	TX_TNL_TYPE_OPAQUE,
+	TX_TNL_TYPE_NVGRE,
+	TX_TNL_TYPE_VXLAN,
+	TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+	__be32 op_to_IpIdSplitOut;
+	__be16 IpIdOffsetOut;
+	__be16 UdpLenSetOut_to_TnlHdrLen;
+	__be64 r1;
+	__be32 Flow_to_TcpHdrLen;
+	__be16 IpIdOffset;
+	__be16 IpIdSplit_to_Mss;
+	__be32 TCPSeqOffset;
+	__be32 EthLenOffset_Size;
+	/* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define S_CPL_TX_TNL_LSO_OPCODE		24
+#define M_CPL_TX_TNL_LSO_OPCODE		0xff
+#define V_CPL_TX_TNL_LSO_OPCODE(x)	((x) << S_CPL_TX_TNL_LSO_OPCODE)
+#define G_CPL_TX_TNL_LSO_OPCODE(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_OPCODE) & M_CPL_TX_TNL_LSO_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_FIRST		23
+#define M_CPL_TX_TNL_LSO_FIRST		0x1
+#define V_CPL_TX_TNL_LSO_FIRST(x)	((x) << S_CPL_TX_TNL_LSO_FIRST)
+#define G_CPL_TX_TNL_LSO_FIRST(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_FIRST) & M_CPL_TX_TNL_LSO_FIRST)
+#define F_CPL_TX_TNL_LSO_FIRST		V_CPL_TX_TNL_LSO_FIRST(1U)
+
+#define S_CPL_TX_TNL_LSO_LAST		22
+#define M_CPL_TX_TNL_LSO_LAST		0x1
+#define V_CPL_TX_TNL_LSO_LAST(x)	((x) << S_CPL_TX_TNL_LSO_LAST)
+#define G_CPL_TX_TNL_LSO_LAST(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_LAST) & M_CPL_TX_TNL_LSO_LAST)
+#define F_CPL_TX_TNL_LSO_LAST		V_CPL_TX_TNL_LSO_LAST(1U)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLENXOUT	21
+#define M_CPL_TX_TNL_LSO_ETHHDRLENXOUT	0x1
+#define V_CPL_TX_TNL_LSO_ETHHDRLENXOUT(x) \
+    ((x) << S_CPL_TX_TNL_LSO_ETHHDRLENXOUT)
+#define G_CPL_TX_TNL_LSO_ETHHDRLENXOUT(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENXOUT) & M_CPL_TX_TNL_LSO_ETHHDRLENXOUT)
+#define F_CPL_TX_TNL_LSO_ETHHDRLENXOUT	V_CPL_TX_TNL_LSO_ETHHDRLENXOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_IPV6OUT	20
+#define M_CPL_TX_TNL_LSO_IPV6OUT	0x1
+#define V_CPL_TX_TNL_LSO_IPV6OUT(x)	((x) << S_CPL_TX_TNL_LSO_IPV6OUT)
+#define G_CPL_TX_TNL_LSO_IPV6OUT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPV6OUT) & M_CPL_TX_TNL_LSO_IPV6OUT)
+#define F_CPL_TX_TNL_LSO_IPV6OUT	V_CPL_TX_TNL_LSO_IPV6OUT(1U)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLENOUT	16
+#define M_CPL_TX_TNL_LSO_ETHHDRLENOUT	0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLENOUT(x) \
+    ((x) << S_CPL_TX_TNL_LSO_ETHHDRLENOUT)
+#define G_CPL_TX_TNL_LSO_ETHHDRLENOUT(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENOUT) & M_CPL_TX_TNL_LSO_ETHHDRLENOUT)
+
+#define S_CPL_TX_TNL_LSO_IPHDRLENOUT	4
+#define M_CPL_TX_TNL_LSO_IPHDRLENOUT	0xfff
+#define V_CPL_TX_TNL_LSO_IPHDRLENOUT(x)	((x) << S_CPL_TX_TNL_LSO_IPHDRLENOUT)
+#define G_CPL_TX_TNL_LSO_IPHDRLENOUT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPHDRLENOUT) & M_CPL_TX_TNL_LSO_IPHDRLENOUT)
+
+#define S_CPL_TX_TNL_LSO_IPHDRCHKOUT	3
+#define M_CPL_TX_TNL_LSO_IPHDRCHKOUT	0x1
+#define V_CPL_TX_TNL_LSO_IPHDRCHKOUT(x)	((x) << S_CPL_TX_TNL_LSO_IPHDRCHKOUT)
+#define G_CPL_TX_TNL_LSO_IPHDRCHKOUT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPHDRCHKOUT) & M_CPL_TX_TNL_LSO_IPHDRCHKOUT)
+#define F_CPL_TX_TNL_LSO_IPHDRCHKOUT	V_CPL_TX_TNL_LSO_IPHDRCHKOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_IPLENSETOUT	2
+#define M_CPL_TX_TNL_LSO_IPLENSETOUT	0x1
+#define V_CPL_TX_TNL_LSO_IPLENSETOUT(x)	((x) << S_CPL_TX_TNL_LSO_IPLENSETOUT)
+#define G_CPL_TX_TNL_LSO_IPLENSETOUT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPLENSETOUT) & M_CPL_TX_TNL_LSO_IPLENSETOUT)
+#define F_CPL_TX_TNL_LSO_IPLENSETOUT	V_CPL_TX_TNL_LSO_IPLENSETOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_IPIDINCOUT	1
+#define M_CPL_TX_TNL_LSO_IPIDINCOUT	0x1
+#define V_CPL_TX_TNL_LSO_IPIDINCOUT(x)	((x) << S_CPL_TX_TNL_LSO_IPIDINCOUT)
+#define G_CPL_TX_TNL_LSO_IPIDINCOUT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPIDINCOUT) & M_CPL_TX_TNL_LSO_IPIDINCOUT)
+#define F_CPL_TX_TNL_LSO_IPIDINCOUT	V_CPL_TX_TNL_LSO_IPIDINCOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_IPIDSPLITOUT	0
+#define M_CPL_TX_TNL_LSO_IPIDSPLITOUT	0x1
+#define V_CPL_TX_TNL_LSO_IPIDSPLITOUT(x) \
+    ((x) << S_CPL_TX_TNL_LSO_IPIDSPLITOUT)
+#define G_CPL_TX_TNL_LSO_IPIDSPLITOUT(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_IPIDSPLITOUT) & M_CPL_TX_TNL_LSO_IPIDSPLITOUT)
+#define F_CPL_TX_TNL_LSO_IPIDSPLITOUT	V_CPL_TX_TNL_LSO_IPIDSPLITOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPLENSETOUT	15
+#define M_CPL_TX_TNL_LSO_UDPLENSETOUT	0x1
+#define V_CPL_TX_TNL_LSO_UDPLENSETOUT(x) \
+    ((x) << S_CPL_TX_TNL_LSO_UDPLENSETOUT)
+#define G_CPL_TX_TNL_LSO_UDPLENSETOUT(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_UDPLENSETOUT) & M_CPL_TX_TNL_LSO_UDPLENSETOUT)
+#define F_CPL_TX_TNL_LSO_UDPLENSETOUT	V_CPL_TX_TNL_LSO_UDPLENSETOUT(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPCHKCLROUT	14
+#define M_CPL_TX_TNL_LSO_UDPCHKCLROUT	0x1
+#define V_CPL_TX_TNL_LSO_UDPCHKCLROUT(x) \
+    ((x) << S_CPL_TX_TNL_LSO_UDPCHKCLROUT)
+#define G_CPL_TX_TNL_LSO_UDPCHKCLROUT(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_UDPCHKCLROUT) & M_CPL_TX_TNL_LSO_UDPCHKCLROUT)
+#define F_CPL_TX_TNL_LSO_UDPCHKCLROUT	V_CPL_TX_TNL_LSO_UDPCHKCLROUT(1U)
+
+#define S_CPL_TX_TNL_LSO_TNLTYPE	12
+#define M_CPL_TX_TNL_LSO_TNLTYPE	0x3
+#define V_CPL_TX_TNL_LSO_TNLTYPE(x)	((x) << S_CPL_TX_TNL_LSO_TNLTYPE)
+#define G_CPL_TX_TNL_LSO_TNLTYPE(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_TNLTYPE) & M_CPL_TX_TNL_LSO_TNLTYPE)
+
+#define S_CPL_TX_TNL_LSO_TNLHDRLEN	0
+#define M_CPL_TX_TNL_LSO_TNLHDRLEN	0xfff
+#define V_CPL_TX_TNL_LSO_TNLHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_TNLHDRLEN)
+#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_FLOW		21
+#define M_CPL_TX_TNL_LSO_FLOW		0x1
+#define V_CPL_TX_TNL_LSO_FLOW(x)	((x) << S_CPL_TX_TNL_LSO_FLOW)
+#define G_CPL_TX_TNL_LSO_FLOW(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_FLOW) & M_CPL_TX_TNL_LSO_FLOW)
+#define F_CPL_TX_TNL_LSO_FLOW		V_CPL_TX_TNL_LSO_FLOW(1U)
+
+#define S_CPL_TX_TNL_LSO_IPV6		20
+#define M_CPL_TX_TNL_LSO_IPV6		0x1
+#define V_CPL_TX_TNL_LSO_IPV6(x)	((x) << S_CPL_TX_TNL_LSO_IPV6)
+#define G_CPL_TX_TNL_LSO_IPV6(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPV6) & M_CPL_TX_TNL_LSO_IPV6)
+#define F_CPL_TX_TNL_LSO_IPV6		V_CPL_TX_TNL_LSO_IPV6(1U)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN	16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN	0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPHDRLEN	4
+#define M_CPL_TX_TNL_LSO_IPHDRLEN	0xfff
+#define V_CPL_TX_TNL_LSO_IPHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_IPHDRLEN)
+#define G_CPL_TX_TNL_LSO_IPHDRLEN(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPHDRLEN) & M_CPL_TX_TNL_LSO_IPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_TCPHDRLEN	0
+#define M_CPL_TX_TNL_LSO_TCPHDRLEN	0xf
+#define V_CPL_TX_TNL_LSO_TCPHDRLEN(x)	((x) << S_CPL_TX_TNL_LSO_TCPHDRLEN)
+#define G_CPL_TX_TNL_LSO_TCPHDRLEN(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_TCPHDRLEN) & M_CPL_TX_TNL_LSO_TCPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPIDSPLIT	15
+#define M_CPL_TX_TNL_LSO_IPIDSPLIT	0x1
+#define V_CPL_TX_TNL_LSO_IPIDSPLIT(x)	((x) << S_CPL_TX_TNL_LSO_IPIDSPLIT)
+#define G_CPL_TX_TNL_LSO_IPIDSPLIT(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_IPIDSPLIT) & M_CPL_TX_TNL_LSO_IPIDSPLIT)
+#define F_CPL_TX_TNL_LSO_IPIDSPLIT	V_CPL_TX_TNL_LSO_IPIDSPLIT(1U)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLENX	14
+#define M_CPL_TX_TNL_LSO_ETHHDRLENX	0x1
+#define V_CPL_TX_TNL_LSO_ETHHDRLENX(x)	((x) << S_CPL_TX_TNL_LSO_ETHHDRLENX)
+#define G_CPL_TX_TNL_LSO_ETHHDRLENX(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENX) & M_CPL_TX_TNL_LSO_ETHHDRLENX)
+#define F_CPL_TX_TNL_LSO_ETHHDRLENX	V_CPL_TX_TNL_LSO_ETHHDRLENX(1U)
+
+#define S_CPL_TX_TNL_LSO_MSS		0
+#define M_CPL_TX_TNL_LSO_MSS		0x3fff
+#define V_CPL_TX_TNL_LSO_MSS(x)		((x) << S_CPL_TX_TNL_LSO_MSS)
+#define G_CPL_TX_TNL_LSO_MSS(x)		\
+    (((x) >> S_CPL_TX_TNL_LSO_MSS) & M_CPL_TX_TNL_LSO_MSS)
+
+#define S_CPL_TX_TNL_LSO_ETHLENOFFSET	28
+#define M_CPL_TX_TNL_LSO_ETHLENOFFSET	0xf
+#define V_CPL_TX_TNL_LSO_ETHLENOFFSET(x) \
+    ((x) << S_CPL_TX_TNL_LSO_ETHLENOFFSET)
+#define G_CPL_TX_TNL_LSO_ETHLENOFFSET(x) \
+    (((x) >> S_CPL_TX_TNL_LSO_ETHLENOFFSET) & M_CPL_TX_TNL_LSO_ETHLENOFFSET)
+
+#define S_CPL_TX_TNL_LSO_SIZE		0
+#define M_CPL_TX_TNL_LSO_SIZE		0xfffffff
+#define V_CPL_TX_TNL_LSO_SIZE(x)	((x) << S_CPL_TX_TNL_LSO_SIZE)
+#define G_CPL_TX_TNL_LSO_SIZE(x)	\
+    (((x) >> S_CPL_TX_TNL_LSO_SIZE) & M_CPL_TX_TNL_LSO_SIZE)
+
+struct cpl_rx_mps_pkt {
+	__be32 op_to_r1_hi;
+	__be32 r1_lo_length;
+};
+
+#define S_CPL_RX_MPS_PKT_OP     24
+#define M_CPL_RX_MPS_PKT_OP     0xff
+#define V_CPL_RX_MPS_PKT_OP(x)  ((x) << S_CPL_RX_MPS_PKT_OP)
+#define G_CPL_RX_MPS_PKT_OP(x)  \
+	(((x) >> S_CPL_RX_MPS_PKT_OP) & M_CPL_RX_MPS_PKT_OP)
+
+#define S_CPL_RX_MPS_PKT_TYPE           20
+#define M_CPL_RX_MPS_PKT_TYPE           0xf
+#define V_CPL_RX_MPS_PKT_TYPE(x)        ((x) << S_CPL_RX_MPS_PKT_TYPE)
+#define G_CPL_RX_MPS_PKT_TYPE(x)        \
+	(((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE)
+
+/*
+ * Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field.
+ */
+#define X_CPL_RX_MPS_PKT_TYPE_PAUSE	(1 << 0)
+#define X_CPL_RX_MPS_PKT_TYPE_PPP	(1 << 1)
+#define X_CPL_RX_MPS_PKT_TYPE_QFC	(1 << 2)
+#define X_CPL_RX_MPS_PKT_TYPE_PTP	(1 << 3)
+
+struct cpl_tx_tls_sfo {
+	__be32 op_to_seg_len;
+	__be32 pld_len;
+	__be64 rsvd;
+	__be32 seqno_numivs;
+	__be32 ivgen_hdrlen;
+	__be64 scmd1;
+};
+
+/* cpl_tx_tls_sfo macros */
+#define S_CPL_TX_TLS_SFO_OPCODE         24
+#define M_CPL_TX_TLS_SFO_OPCODE         0xff
+#define V_CPL_TX_TLS_SFO_OPCODE(x)      ((x) << S_CPL_TX_TLS_SFO_OPCODE)
+#define G_CPL_TX_TLS_SFO_OPCODE(x)      \
+	(((x) >> S_CPL_TX_TLS_SFO_OPCODE) & M_CPL_TX_TLS_SFO_OPCODE)
+
+#define S_CPL_TX_TLS_SFO_DATA_TYPE      20
+#define M_CPL_TX_TLS_SFO_DATA_TYPE      0xf
+#define V_CPL_TX_TLS_SFO_DATA_TYPE(x)   ((x) << S_CPL_TX_TLS_SFO_DATA_TYPE)
+#define G_CPL_TX_TLS_SFO_DATA_TYPE(x)   \
+	(((x) >> S_CPL_TX_TLS_SFO_DATA_TYPE) & M_CPL_TX_TLS_SFO_DATA_TYPE)
+
+#define S_CPL_TX_TLS_SFO_CPL_LEN        16
+#define M_CPL_TX_TLS_SFO_CPL_LEN        0xf
+#define V_CPL_TX_TLS_SFO_CPL_LEN(x)     ((x) << S_CPL_TX_TLS_SFO_CPL_LEN)
+#define G_CPL_TX_TLS_SFO_CPL_LEN(x)     \
+	(((x) >> S_CPL_TX_TLS_SFO_CPL_LEN) & M_CPL_TX_TLS_SFO_CPL_LEN)
+#define S_CPL_TX_TLS_SFO_SEG_LEN        0
+#define M_CPL_TX_TLS_SFO_SEG_LEN        0xffff
+#define V_CPL_TX_TLS_SFO_SEG_LEN(x)     ((x) << S_CPL_TX_TLS_SFO_SEG_LEN)
+#define G_CPL_TX_TLS_SFO_SEG_LEN(x)     \
+	(((x) >> S_CPL_TX_TLS_SFO_SEG_LEN) & M_CPL_TX_TLS_SFO_SEG_LEN)
+
+struct cpl_tls_data {
+	RSS_HDR
+	__be32 op_tid;
+	__be32 length_pkd;
+	__be32 seq;
+	__be32 r1;
+};
+
+#define S_CPL_TLS_DATA_OPCODE           24
+#define M_CPL_TLS_DATA_OPCODE           0xff
+#define V_CPL_TLS_DATA_OPCODE(x)        ((x) << S_CPL_TLS_DATA_OPCODE)
+#define G_CPL_TLS_DATA_OPCODE(x)        \
+	(((x) >> S_CPL_TLS_DATA_OPCODE) & M_CPL_TLS_DATA_OPCODE)
+
+#define S_CPL_TLS_DATA_TID              0
+#define M_CPL_TLS_DATA_TID              0xffffff
+#define V_CPL_TLS_DATA_TID(x)           ((x) << S_CPL_TLS_DATA_TID)
+#define G_CPL_TLS_DATA_TID(x)           \
+	(((x) >> S_CPL_TLS_DATA_TID) & M_CPL_TLS_DATA_TID)
+
+#define S_CPL_TLS_DATA_LENGTH           0
+#define M_CPL_TLS_DATA_LENGTH           0xffff
+#define V_CPL_TLS_DATA_LENGTH(x)        ((x) << S_CPL_TLS_DATA_LENGTH)
+#define G_CPL_TLS_DATA_LENGTH(x)        \
+	(((x) >> S_CPL_TLS_DATA_LENGTH) & M_CPL_TLS_DATA_LENGTH)
+
+struct cpl_rx_tls_cmp {
+	RSS_HDR
+	__be32 op_tid;
+	__be32 pdulength_length;
+	__be32 seq;
+	__be32 ddp_report;
+	__be32 r;
+	__be32 ddp_valid;
+};
+
+#define S_CPL_RX_TLS_CMP_OPCODE         24
+#define M_CPL_RX_TLS_CMP_OPCODE         0xff
+#define V_CPL_RX_TLS_CMP_OPCODE(x)      ((x) << S_CPL_RX_TLS_CMP_OPCODE)
+#define G_CPL_RX_TLS_CMP_OPCODE(x)      \
+	(((x) >> S_CPL_RX_TLS_CMP_OPCODE) & M_CPL_RX_TLS_CMP_OPCODE)
+
+#define S_CPL_RX_TLS_CMP_TID            0
+#define M_CPL_RX_TLS_CMP_TID            0xffffff
+#define V_CPL_RX_TLS_CMP_TID(x)         ((x) << S_CPL_RX_TLS_CMP_TID)
+#define G_CPL_RX_TLS_CMP_TID(x)         \
+	(((x) >> S_CPL_RX_TLS_CMP_TID) & M_CPL_RX_TLS_CMP_TID)
+
+#define S_CPL_RX_TLS_CMP_PDULENGTH      16
+#define M_CPL_RX_TLS_CMP_PDULENGTH      0xffff
+#define V_CPL_RX_TLS_CMP_PDULENGTH(x)   ((x) << S_CPL_RX_TLS_CMP_PDULENGTH)
+#define G_CPL_RX_TLS_CMP_PDULENGTH(x)   \
+	(((x) >> S_CPL_RX_TLS_CMP_PDULENGTH) & M_CPL_RX_TLS_CMP_PDULENGTH)
+
+#define S_CPL_RX_TLS_CMP_LENGTH         0
+#define M_CPL_RX_TLS_CMP_LENGTH         0xffff
+#define V_CPL_RX_TLS_CMP_LENGTH(x)      ((x) << S_CPL_RX_TLS_CMP_LENGTH)
+#define G_CPL_RX_TLS_CMP_LENGTH(x)      \
+	(((x) >> S_CPL_RX_TLS_CMP_LENGTH) & M_CPL_RX_TLS_CMP_LENGTH)
+
+#define S_SCMD_SEQ_NO_CTRL      29
+#define M_SCMD_SEQ_NO_CTRL      0x3
+#define V_SCMD_SEQ_NO_CTRL(x)   ((x) << S_SCMD_SEQ_NO_CTRL)
+#define G_SCMD_SEQ_NO_CTRL(x)   \
+	(((x) >> S_SCMD_SEQ_NO_CTRL) & M_SCMD_SEQ_NO_CTRL)
+
+/* StsFieldPrsnt- Status field at the end of the TLS PDU */
+#define S_SCMD_STATUS_PRESENT   28
+#define M_SCMD_STATUS_PRESENT   0x1
+#define V_SCMD_STATUS_PRESENT(x)    ((x) << S_SCMD_STATUS_PRESENT)
+#define G_SCMD_STATUS_PRESENT(x)    \
+	(((x) >> S_SCMD_STATUS_PRESENT) & M_SCMD_STATUS_PRESENT)
+#define F_SCMD_STATUS_PRESENT   V_SCMD_STATUS_PRESENT(1U)
+
+/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic,
+ * 3-15: Reserved. */
+#define S_SCMD_PROTO_VERSION    24
+#define M_SCMD_PROTO_VERSION    0xf
+#define V_SCMD_PROTO_VERSION(x) ((x) << S_SCMD_PROTO_VERSION)
+#define G_SCMD_PROTO_VERSION(x) \
+	(((x) >> S_SCMD_PROTO_VERSION) & M_SCMD_PROTO_VERSION)
+
+/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */
+#define S_SCMD_ENC_DEC_CTRL     23
+#define M_SCMD_ENC_DEC_CTRL     0x1
+#define V_SCMD_ENC_DEC_CTRL(x)  ((x) << S_SCMD_ENC_DEC_CTRL)
+#define G_SCMD_ENC_DEC_CTRL(x)  \
+	(((x) >> S_SCMD_ENC_DEC_CTRL) & M_SCMD_ENC_DEC_CTRL)
+#define F_SCMD_ENC_DEC_CTRL V_SCMD_ENC_DEC_CTRL(1U)
+
+/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */
+#define S_SCMD_CIPH_AUTH_SEQ_CTRL       22
+#define M_SCMD_CIPH_AUTH_SEQ_CTRL       0x1
+#define V_SCMD_CIPH_AUTH_SEQ_CTRL(x)    \
+	((x) << S_SCMD_CIPH_AUTH_SEQ_CTRL)
+#define G_SCMD_CIPH_AUTH_SEQ_CTRL(x)    \
+	(((x) >> S_SCMD_CIPH_AUTH_SEQ_CTRL) & M_SCMD_CIPH_AUTH_SEQ_CTRL)
+#define F_SCMD_CIPH_AUTH_SEQ_CTRL   V_SCMD_CIPH_AUTH_SEQ_CTRL(1U)
+
+/* CiphMode -  Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR,
+ * 4:Generic-AES, 5-15: Reserved. */
+#define S_SCMD_CIPH_MODE    18
+#define M_SCMD_CIPH_MODE    0xf
+#define V_SCMD_CIPH_MODE(x) ((x) << S_SCMD_CIPH_MODE)
+#define G_SCMD_CIPH_MODE(x) \
+	(((x) >> S_SCMD_CIPH_MODE) & M_SCMD_CIPH_MODE)
+
+/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256
+ * 4-15: Reserved */
+#define S_SCMD_AUTH_MODE    14
+#define M_SCMD_AUTH_MODE    0xf
+#define V_SCMD_AUTH_MODE(x) ((x) << S_SCMD_AUTH_MODE)
+#define G_SCMD_AUTH_MODE(x) \
+	(((x) >> S_SCMD_AUTH_MODE) & M_SCMD_AUTH_MODE)
+
+/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation
+ * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved
+ */
+#define S_SCMD_HMAC_CTRL    11
+#define M_SCMD_HMAC_CTRL    0x7
+#define V_SCMD_HMAC_CTRL(x) ((x) << S_SCMD_HMAC_CTRL)
+#define G_SCMD_HMAC_CTRL(x) \
+	(((x) >> S_SCMD_HMAC_CTRL) & M_SCMD_HMAC_CTRL)
+
+/* IvSize - IV size in units of 2 bytes */
+#define S_SCMD_IV_SIZE  7
+#define M_SCMD_IV_SIZE  0xf
+#define V_SCMD_IV_SIZE(x)   ((x) << S_SCMD_IV_SIZE)
+#define G_SCMD_IV_SIZE(x)   \
+	(((x) >> S_SCMD_IV_SIZE) & M_SCMD_IV_SIZE)
+
+/* NumIVs - Number of IVs */
+#define S_SCMD_NUM_IVS  0
+#define M_SCMD_NUM_IVS  0x7f
+#define V_SCMD_NUM_IVS(x)   ((x) << S_SCMD_NUM_IVS)
+#define G_SCMD_NUM_IVS(x)   \
+	(((x) >> S_SCMD_NUM_IVS) & M_SCMD_NUM_IVS)
+
+/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber
+ * (below) are used as Cid (connection id for debug status), these
+ * bits are padded to zero for forming the 64 bit
+ * sequence number for TLS
+ */
+#define S_SCMD_ENB_DBGID  31
+#define M_SCMD_ENB_DBGID  0x1
+#define V_SCMD_ENB_DBGID(x)   ((x) << S_SCMD_ENB_DBGID)
+#define G_SCMD_ENB_DBGID(x)   \
+	(((x) >> S_SCMD_ENB_DBGID) & M_SCMD_ENB_DBGID)
+
+/* IV generation in SW. */
+#define S_SCMD_IV_GEN_CTRL      30
+#define M_SCMD_IV_GEN_CTRL      0x1
+#define V_SCMD_IV_GEN_CTRL(x)   ((x) << S_SCMD_IV_GEN_CTRL)
+#define G_SCMD_IV_GEN_CTRL(x)   \
+	(((x) >> S_SCMD_IV_GEN_CTRL) & M_SCMD_IV_GEN_CTRL)
+#define F_SCMD_IV_GEN_CTRL  V_SCMD_IV_GEN_CTRL(1U)
+
+/* More frags */
+#define S_SCMD_MORE_FRAGS   20
+#define M_SCMD_MORE_FRAGS   0x1
+#define V_SCMD_MORE_FRAGS(x)    ((x) << S_SCMD_MORE_FRAGS)
+#define G_SCMD_MORE_FRAGS(x)    (((x) >> S_SCMD_MORE_FRAGS) & M_SCMD_MORE_FRAGS)
+
+/*last frag */
+#define S_SCMD_LAST_FRAG    19
+#define M_SCMD_LAST_FRAG    0x1
+#define V_SCMD_LAST_FRAG(x) ((x) << S_SCMD_LAST_FRAG)
+#define G_SCMD_LAST_FRAG(x) (((x) >> S_SCMD_LAST_FRAG) & M_SCMD_LAST_FRAG)
+
+/* TlsCompPdu */
+#define S_SCMD_TLS_COMPPDU    18
+#define M_SCMD_TLS_COMPPDU    0x1
+#define V_SCMD_TLS_COMPPDU(x) ((x) << S_SCMD_TLS_COMPPDU)
+#define G_SCMD_TLS_COMPPDU(x) (((x) >> S_SCMD_TLS_COMPPDU) & M_SCMD_TLS_COMPPDU)
+
+/* KeyCntxtInline - Key context inline after the scmd  OR PayloadOnly*/
+#define S_SCMD_KEY_CTX_INLINE   17
+#define M_SCMD_KEY_CTX_INLINE   0x1
+#define V_SCMD_KEY_CTX_INLINE(x)    ((x) << S_SCMD_KEY_CTX_INLINE)
+#define G_SCMD_KEY_CTX_INLINE(x)    \
+	(((x) >> S_SCMD_KEY_CTX_INLINE) & M_SCMD_KEY_CTX_INLINE)
+#define F_SCMD_KEY_CTX_INLINE   V_SCMD_KEY_CTX_INLINE(1U)
+
+/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */
+#define S_SCMD_TLS_FRAG_ENABLE  16
+#define M_SCMD_TLS_FRAG_ENABLE  0x1
+#define V_SCMD_TLS_FRAG_ENABLE(x)   ((x) << S_SCMD_TLS_FRAG_ENABLE)
+#define G_SCMD_TLS_FRAG_ENABLE(x)   \
+	(((x) >> S_SCMD_TLS_FRAG_ENABLE) & M_SCMD_TLS_FRAG_ENABLE)
+#define F_SCMD_TLS_FRAG_ENABLE  V_SCMD_TLS_FRAG_ENABLE(1U)
+
+/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only
+ * modes, in this case TLS_TX  will drop the PDU and only
+ * send back the MAC bytes. */
+#define S_SCMD_MAC_ONLY 15
+#define M_SCMD_MAC_ONLY 0x1
+#define V_SCMD_MAC_ONLY(x)  ((x) << S_SCMD_MAC_ONLY)
+#define G_SCMD_MAC_ONLY(x)  \
+	(((x) >> S_SCMD_MAC_ONLY) & M_SCMD_MAC_ONLY)
+#define F_SCMD_MAC_ONLY V_SCMD_MAC_ONLY(1U)
+
+/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols
+ * which have complex AAD and IV formations Eg:AES-CCM
+ */
+#define S_SCMD_AADIVDROP 14
+#define M_SCMD_AADIVDROP 0x1
+#define V_SCMD_AADIVDROP(x)  ((x) << S_SCMD_AADIVDROP)
+#define G_SCMD_AADIVDROP(x)  \
+	(((x) >> S_SCMD_AADIVDROP) & M_SCMD_AADIVDROP)
+#define F_SCMD_AADIVDROP V_SCMD_AADIVDROP(1U)
+
+/* HdrLength - Length of all headers excluding TLS header
+ * present before start of crypto PDU/payload. */
+#define S_SCMD_HDR_LEN  0
+#define M_SCMD_HDR_LEN  0x3fff
+#define V_SCMD_HDR_LEN(x)   ((x) << S_SCMD_HDR_LEN)
+#define G_SCMD_HDR_LEN(x)   \
+	(((x) >> S_SCMD_HDR_LEN) & M_SCMD_HDR_LEN)
+
+struct cpl_tx_sec_pdu {
+	__be32 op_ivinsrtofst;
+	__be32 pldlen;
+	__be32 aadstart_cipherstop_hi;
+	__be32 cipherstop_lo_authinsert;
+	__be32 seqno_numivs;
+	__be32 ivgen_hdrlen;
+	__be64 scmd1;
+};
+
+#define S_CPL_TX_SEC_PDU_OPCODE     24
+#define M_CPL_TX_SEC_PDU_OPCODE     0xff
+#define V_CPL_TX_SEC_PDU_OPCODE(x)  ((x) << S_CPL_TX_SEC_PDU_OPCODE)
+#define G_CPL_TX_SEC_PDU_OPCODE(x)  \
+	(((x) >> S_CPL_TX_SEC_PDU_OPCODE) & M_CPL_TX_SEC_PDU_OPCODE)
+
+/* RX Channel Id */
+#define S_CPL_TX_SEC_PDU_RXCHID  22
+#define M_CPL_TX_SEC_PDU_RXCHID  0x1
+#define V_CPL_TX_SEC_PDU_RXCHID(x)   ((x) << S_CPL_TX_SEC_PDU_RXCHID)
+#define G_CPL_TX_SEC_PDU_RXCHID(x)   \
+(((x) >> S_CPL_TX_SEC_PDU_RXCHID) & M_CPL_TX_SEC_PDU_RXCHID)
+#define F_CPL_TX_SEC_PDU_RXCHID  V_CPL_TX_SEC_PDU_RXCHID(1U)
+
+/* Ack Follows */
+#define S_CPL_TX_SEC_PDU_ACKFOLLOWS  21
+#define M_CPL_TX_SEC_PDU_ACKFOLLOWS  0x1
+#define V_CPL_TX_SEC_PDU_ACKFOLLOWS(x)   ((x) << S_CPL_TX_SEC_PDU_ACKFOLLOWS)
+#define G_CPL_TX_SEC_PDU_ACKFOLLOWS(x)   \
+(((x) >> S_CPL_TX_SEC_PDU_ACKFOLLOWS) & M_CPL_TX_SEC_PDU_ACKFOLLOWS)
+#define F_CPL_TX_SEC_PDU_ACKFOLLOWS  V_CPL_TX_SEC_PDU_ACKFOLLOWS(1U)
+
+/* Loopback bit in cpl_tx_sec_pdu */
+#define S_CPL_TX_SEC_PDU_ULPTXLPBK  20
+#define M_CPL_TX_SEC_PDU_ULPTXLPBK  0x1
+#define V_CPL_TX_SEC_PDU_ULPTXLPBK(x)   ((x) << S_CPL_TX_SEC_PDU_ULPTXLPBK)
+#define G_CPL_TX_SEC_PDU_ULPTXLPBK(x)   \
+(((x) >> S_CPL_TX_SEC_PDU_ULPTXLPBK) & M_CPL_TX_SEC_PDU_ULPTXLPBK)
+#define F_CPL_TX_SEC_PDU_ULPTXLPBK  V_CPL_TX_SEC_PDU_ULPTXLPBK(1U)
+
+/* Length of cpl header encapsulated */
+#define S_CPL_TX_SEC_PDU_CPLLEN     16
+#define M_CPL_TX_SEC_PDU_CPLLEN     0xf
+#define V_CPL_TX_SEC_PDU_CPLLEN(x)  ((x) << S_CPL_TX_SEC_PDU_CPLLEN)
+#define G_CPL_TX_SEC_PDU_CPLLEN(x)  \
+	(((x) >> S_CPL_TX_SEC_PDU_CPLLEN) & M_CPL_TX_SEC_PDU_CPLLEN)
+
+/* PlaceHolder */
+#define S_CPL_TX_SEC_PDU_PLACEHOLDER    10
+#define M_CPL_TX_SEC_PDU_PLACEHOLDER    0x1
+#define V_CPL_TX_SEC_PDU_PLACEHOLDER(x) ((x) << S_CPL_TX_SEC_PDU_PLACEHOLDER)
+#define G_CPL_TX_SEC_PDU_PLACEHOLDER(x) \
+	(((x) >> S_CPL_TX_SEC_PDU_PLACEHOLDER) & \
+	 M_CPL_TX_SEC_PDU_PLACEHOLDER)
+
+/* IvInsrtOffset: Insertion location for IV */
+#define S_CPL_TX_SEC_PDU_IVINSRTOFST    0
+#define M_CPL_TX_SEC_PDU_IVINSRTOFST    0x3ff
+#define V_CPL_TX_SEC_PDU_IVINSRTOFST(x) ((x) << S_CPL_TX_SEC_PDU_IVINSRTOFST)
+#define G_CPL_TX_SEC_PDU_IVINSRTOFST(x) \
+	(((x) >> S_CPL_TX_SEC_PDU_IVINSRTOFST) & \
+	 M_CPL_TX_SEC_PDU_IVINSRTOFST)
+
+/* AadStartOffset: Offset in bytes for AAD start from
+ * the first byte following
+ * the pkt headers (0-255
+ *  bytes) */
+#define S_CPL_TX_SEC_PDU_AADSTART   24
+#define M_CPL_TX_SEC_PDU_AADSTART   0xff
+#define V_CPL_TX_SEC_PDU_AADSTART(x)    ((x) << S_CPL_TX_SEC_PDU_AADSTART)
+#define G_CPL_TX_SEC_PDU_AADSTART(x)    \
+	(((x) >> S_CPL_TX_SEC_PDU_AADSTART) & \
+	 M_CPL_TX_SEC_PDU_AADSTART)
+
+/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following
+ * the pkt headers (0-511 bytes) */
+#define S_CPL_TX_SEC_PDU_AADSTOP    15
+#define M_CPL_TX_SEC_PDU_AADSTOP    0x1ff
+#define V_CPL_TX_SEC_PDU_AADSTOP(x) ((x) << S_CPL_TX_SEC_PDU_AADSTOP)
+#define G_CPL_TX_SEC_PDU_AADSTOP(x) \
+	(((x) >> S_CPL_TX_SEC_PDU_AADSTOP) & M_CPL_TX_SEC_PDU_AADSTOP)
+
+/* CipherStartOffset: offset in bytes for encryption/decryption start from the
+ * first byte following the pkt headers (0-1023
+ *  bytes) */
+#define S_CPL_TX_SEC_PDU_CIPHERSTART    5
+#define M_CPL_TX_SEC_PDU_CIPHERSTART    0x3ff
+#define V_CPL_TX_SEC_PDU_CIPHERSTART(x) ((x) << S_CPL_TX_SEC_PDU_CIPHERSTART)
+#define G_CPL_TX_SEC_PDU_CIPHERSTART(x) \
+	(((x) >> S_CPL_TX_SEC_PDU_CIPHERSTART) & \
+	 M_CPL_TX_SEC_PDU_CIPHERSTART)
+
+/* CipherStopOffset: offset in bytes for encryption/decryption end
+ * from end of the payload of this command (0-511 bytes) */
+#define S_CPL_TX_SEC_PDU_CIPHERSTOP_HI      0
+#define M_CPL_TX_SEC_PDU_CIPHERSTOP_HI      0x1f
+#define V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(x)   \
+	((x) << S_CPL_TX_SEC_PDU_CIPHERSTOP_HI)
+#define G_CPL_TX_SEC_PDU_CIPHERSTOP_HI(x)   \
+	(((x) >> S_CPL_TX_SEC_PDU_CIPHERSTOP_HI) & \
+	 M_CPL_TX_SEC_PDU_CIPHERSTOP_HI)
+
+#define S_CPL_TX_SEC_PDU_CIPHERSTOP_LO      28
+#define M_CPL_TX_SEC_PDU_CIPHERSTOP_LO      0xf
+#define V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(x)   \
+	((x) << S_CPL_TX_SEC_PDU_CIPHERSTOP_LO)
+#define G_CPL_TX_SEC_PDU_CIPHERSTOP_LO(x)   \
+	(((x) >> S_CPL_TX_SEC_PDU_CIPHERSTOP_LO) & \
+	 M_CPL_TX_SEC_PDU_CIPHERSTOP_LO)
+
+/* AuthStartOffset: offset in bytes for authentication start from
+ * the first byte following the pkt headers (0-1023)
+ *  */
+#define S_CPL_TX_SEC_PDU_AUTHSTART  18
+#define M_CPL_TX_SEC_PDU_AUTHSTART  0x3ff
+#define V_CPL_TX_SEC_PDU_AUTHSTART(x)   ((x) << S_CPL_TX_SEC_PDU_AUTHSTART)
+#define G_CPL_TX_SEC_PDU_AUTHSTART(x)   \
+	(((x) >> S_CPL_TX_SEC_PDU_AUTHSTART) & \
+	 M_CPL_TX_SEC_PDU_AUTHSTART)
+
+/* AuthStopOffset: offset in bytes for authentication
+ * end from end of the payload of this command (0-511 Bytes) */
+#define S_CPL_TX_SEC_PDU_AUTHSTOP   9
+#define M_CPL_TX_SEC_PDU_AUTHSTOP   0x1ff
+#define V_CPL_TX_SEC_PDU_AUTHSTOP(x)    ((x) << S_CPL_TX_SEC_PDU_AUTHSTOP)
+#define G_CPL_TX_SEC_PDU_AUTHSTOP(x)    \
+	(((x) >> S_CPL_TX_SEC_PDU_AUTHSTOP) & \
+	 M_CPL_TX_SEC_PDU_AUTHSTOP)
+
+/* AuthInsrtOffset: offset in bytes for authentication insertion
+ * from end of the payload of this command (0-511 bytes) */
+#define S_CPL_TX_SEC_PDU_AUTHINSERT 0
+#define M_CPL_TX_SEC_PDU_AUTHINSERT 0x1ff
+#define V_CPL_TX_SEC_PDU_AUTHINSERT(x)  ((x) << S_CPL_TX_SEC_PDU_AUTHINSERT)
+#define G_CPL_TX_SEC_PDU_AUTHINSERT(x)  \
+	(((x) >> S_CPL_TX_SEC_PDU_AUTHINSERT) & \
+	 M_CPL_TX_SEC_PDU_AUTHINSERT)
+
+struct cpl_rx_phys_dsgl {
+	__be32 op_to_tid;
+	__be32 pcirlxorder_to_noofsgentr;
+	struct rss_header rss_hdr_int;
+};
+
+#define S_CPL_RX_PHYS_DSGL_OPCODE       24
+#define M_CPL_RX_PHYS_DSGL_OPCODE       0xff
+#define V_CPL_RX_PHYS_DSGL_OPCODE(x)    ((x) << S_CPL_RX_PHYS_DSGL_OPCODE)
+#define G_CPL_RX_PHYS_DSGL_OPCODE(x)    \
+	    (((x) >> S_CPL_RX_PHYS_DSGL_OPCODE) & M_CPL_RX_PHYS_DSGL_OPCODE)
+
+#define S_CPL_RX_PHYS_DSGL_ISRDMA       23
+#define M_CPL_RX_PHYS_DSGL_ISRDMA       0x1
+#define V_CPL_RX_PHYS_DSGL_ISRDMA(x)    ((x) << S_CPL_RX_PHYS_DSGL_ISRDMA)
+#define G_CPL_RX_PHYS_DSGL_ISRDMA(x)    \
+	    (((x) >> S_CPL_RX_PHYS_DSGL_ISRDMA) & M_CPL_RX_PHYS_DSGL_ISRDMA)
+#define F_CPL_RX_PHYS_DSGL_ISRDMA       V_CPL_RX_PHYS_DSGL_ISRDMA(1U)
+
+#define S_CPL_RX_PHYS_DSGL_RSVD1        20
+#define M_CPL_RX_PHYS_DSGL_RSVD1        0x7
+#define V_CPL_RX_PHYS_DSGL_RSVD1(x)     ((x) << S_CPL_RX_PHYS_DSGL_RSVD1)
+#define G_CPL_RX_PHYS_DSGL_RSVD1(x)     \
+	    (((x) >> S_CPL_RX_PHYS_DSGL_RSVD1) & M_CPL_RX_PHYS_DSGL_RSVD1)
+
+#define S_CPL_RX_PHYS_DSGL_PCIRLXORDER          31
+#define M_CPL_RX_PHYS_DSGL_PCIRLXORDER          0x1
+#define V_CPL_RX_PHYS_DSGL_PCIRLXORDER(x)       \
+	((x) << S_CPL_RX_PHYS_DSGL_PCIRLXORDER)
+#define G_CPL_RX_PHYS_DSGL_PCIRLXORDER(x)       \
+	(((x) >> S_CPL_RX_PHYS_DSGL_PCIRLXORDER) & \
+	 M_CPL_RX_PHYS_DSGL_PCIRLXORDER)
+#define F_CPL_RX_PHYS_DSGL_PCIRLXORDER  V_CPL_RX_PHYS_DSGL_PCIRLXORDER(1U)
+
+#define S_CPL_RX_PHYS_DSGL_PCINOSNOOP           30
+#define M_CPL_RX_PHYS_DSGL_PCINOSNOOP           0x1
+#define V_CPL_RX_PHYS_DSGL_PCINOSNOOP(x)        \
+	((x) << S_CPL_RX_PHYS_DSGL_PCINOSNOOP)
+#define G_CPL_RX_PHYS_DSGL_PCINOSNOOP(x)        \
+	(((x) >> S_CPL_RX_PHYS_DSGL_PCINOSNOOP) & \
+	 M_CPL_RX_PHYS_DSGL_PCINOSNOOP)
+#define F_CPL_RX_PHYS_DSGL_PCINOSNOOP   V_CPL_RX_PHYS_DSGL_PCINOSNOOP(1U)
+
+#define S_CPL_RX_PHYS_DSGL_PCITPHNTENB          29
+#define M_CPL_RX_PHYS_DSGL_PCITPHNTENB          0x1
+#define V_CPL_RX_PHYS_DSGL_PCITPHNTENB(x)       \
+	((x) << S_CPL_RX_PHYS_DSGL_PCITPHNTENB)
+#define G_CPL_RX_PHYS_DSGL_PCITPHNTENB(x)       \
+	(((x) >> S_CPL_RX_PHYS_DSGL_PCITPHNTENB) & \
+	 M_CPL_RX_PHYS_DSGL_PCITPHNTENB)
+#define F_CPL_RX_PHYS_DSGL_PCITPHNTENB  V_CPL_RX_PHYS_DSGL_PCITPHNTENB(1U)
+
+#define S_CPL_RX_PHYS_DSGL_PCITPHNT     27
+#define M_CPL_RX_PHYS_DSGL_PCITPHNT     0x3
+#define V_CPL_RX_PHYS_DSGL_PCITPHNT(x)  ((x) << S_CPL_RX_PHYS_DSGL_PCITPHNT)
+#define G_CPL_RX_PHYS_DSGL_PCITPHNT(x)  \
+	(((x) >> S_CPL_RX_PHYS_DSGL_PCITPHNT) & \
+	M_CPL_RX_PHYS_DSGL_PCITPHNT)
+
+#define S_CPL_RX_PHYS_DSGL_DCAID        16
+#define M_CPL_RX_PHYS_DSGL_DCAID        0x7ff
+#define V_CPL_RX_PHYS_DSGL_DCAID(x)     ((x) << S_CPL_RX_PHYS_DSGL_DCAID)
+#define G_CPL_RX_PHYS_DSGL_DCAID(x)     \
+	(((x) >> S_CPL_RX_PHYS_DSGL_DCAID) & \
+	 M_CPL_RX_PHYS_DSGL_DCAID)
+
+#define S_CPL_RX_PHYS_DSGL_NOOFSGENTR           0
+#define M_CPL_RX_PHYS_DSGL_NOOFSGENTR           0xffff
+#define V_CPL_RX_PHYS_DSGL_NOOFSGENTR(x)        \
+	((x) << S_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+#define G_CPL_RX_PHYS_DSGL_NOOFSGENTR(x)        \
+	(((x) >> S_CPL_RX_PHYS_DSGL_NOOFSGENTR) & \
+	 M_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+
+/* CPL_TX_TLS_ACK */
+struct cpl_tx_tls_ack {
+        __be32 op_to_Rsvd2;
+        __be32 PldLen;
+        __be64 Rsvd3;
+};
+
+#define S_CPL_TX_TLS_ACK_OPCODE         24
+#define M_CPL_TX_TLS_ACK_OPCODE         0xff
+#define V_CPL_TX_TLS_ACK_OPCODE(x)      ((x) << S_CPL_TX_TLS_ACK_OPCODE)
+#define G_CPL_TX_TLS_ACK_OPCODE(x)      \
+    (((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
+
+#define S_CPL_TX_TLS_ACK_RSVD1          23
+#define M_CPL_TX_TLS_ACK_RSVD1          0x1
+#define V_CPL_TX_TLS_ACK_RSVD1(x)       ((x) << S_CPL_TX_TLS_ACK_RSVD1)
+#define G_CPL_TX_TLS_ACK_RSVD1(x)       \
+    (((x) >> S_CPL_TX_TLS_ACK_RSVD1) & M_CPL_TX_TLS_ACK_RSVD1)
+#define F_CPL_TX_TLS_ACK_RSVD1  V_CPL_TX_TLS_ACK_RSVD1(1U)
+
+#define S_CPL_TX_TLS_ACK_RXCHID         22
+#define M_CPL_TX_TLS_ACK_RXCHID         0x1
+#define V_CPL_TX_TLS_ACK_RXCHID(x)      ((x) << S_CPL_TX_TLS_ACK_RXCHID)
+#define G_CPL_TX_TLS_ACK_RXCHID(x)      \
+    (((x) >> S_CPL_TX_TLS_ACK_RXCHID) & M_CPL_TX_TLS_ACK_RXCHID)
+#define F_CPL_TX_TLS_ACK_RXCHID V_CPL_TX_TLS_ACK_RXCHID(1U)
+
+#define S_CPL_TX_TLS_ACK_FWMSG          21
+#define M_CPL_TX_TLS_ACK_FWMSG          0x1
+#define V_CPL_TX_TLS_ACK_FWMSG(x)       ((x) << S_CPL_TX_TLS_ACK_FWMSG)
+#define G_CPL_TX_TLS_ACK_FWMSG(x)       \
+    (((x) >> S_CPL_TX_TLS_ACK_FWMSG) & M_CPL_TX_TLS_ACK_FWMSG)
+#define F_CPL_TX_TLS_ACK_FWMSG  V_CPL_TX_TLS_ACK_FWMSG(1U)
+
+#define S_CPL_TX_TLS_ACK_ULPTXLPBK      20
+#define M_CPL_TX_TLS_ACK_ULPTXLPBK      0x1
+#define V_CPL_TX_TLS_ACK_ULPTXLPBK(x)   ((x) << S_CPL_TX_TLS_ACK_ULPTXLPBK)
+#define G_CPL_TX_TLS_ACK_ULPTXLPBK(x)   \
+    (((x) >> S_CPL_TX_TLS_ACK_ULPTXLPBK) & M_CPL_TX_TLS_ACK_ULPTXLPBK)
+#define F_CPL_TX_TLS_ACK_ULPTXLPBK      V_CPL_TX_TLS_ACK_ULPTXLPBK(1U)
+
+#define S_CPL_TX_TLS_ACK_CPLLEN         16
+#define M_CPL_TX_TLS_ACK_CPLLEN         0xf
+#define V_CPL_TX_TLS_ACK_CPLLEN(x)      ((x) << S_CPL_TX_TLS_ACK_CPLLEN)
+#define G_CPL_TX_TLS_ACK_CPLLEN(x)      \
+    (((x) >> S_CPL_TX_TLS_ACK_CPLLEN) & M_CPL_TX_TLS_ACK_CPLLEN)
+
+#define S_CPL_TX_TLS_ACK_COMPLONERR     15
+#define M_CPL_TX_TLS_ACK_COMPLONERR     0x1
+#define V_CPL_TX_TLS_ACK_COMPLONERR(x)  ((x) << S_CPL_TX_TLS_ACK_COMPLONERR)
+#define G_CPL_TX_TLS_ACK_COMPLONERR(x)  \
+    (((x) >> S_CPL_TX_TLS_ACK_COMPLONERR) & M_CPL_TX_TLS_ACK_COMPLONERR)
+#define F_CPL_TX_TLS_ACK_COMPLONERR     V_CPL_TX_TLS_ACK_COMPLONERR(1U)
+
+#define S_CPL_TX_TLS_ACK_LCB    14
+#define M_CPL_TX_TLS_ACK_LCB    0x1
+#define V_CPL_TX_TLS_ACK_LCB(x) ((x) << S_CPL_TX_TLS_ACK_LCB)
+#define G_CPL_TX_TLS_ACK_LCB(x) \
+    (((x) >> S_CPL_TX_TLS_ACK_LCB) & M_CPL_TX_TLS_ACK_LCB)
+#define F_CPL_TX_TLS_ACK_LCB    V_CPL_TX_TLS_ACK_LCB(1U)
+
+#define S_CPL_TX_TLS_ACK_PHASH          13
+#define M_CPL_TX_TLS_ACK_PHASH          0x1
+#define V_CPL_TX_TLS_ACK_PHASH(x)       ((x) << S_CPL_TX_TLS_ACK_PHASH)
+#define G_CPL_TX_TLS_ACK_PHASH(x)       \
+    (((x) >> S_CPL_TX_TLS_ACK_PHASH) & M_CPL_TX_TLS_ACK_PHASH)
+#define F_CPL_TX_TLS_ACK_PHASH  V_CPL_TX_TLS_ACK_PHASH(1U)
+
+#define S_CPL_TX_TLS_ACK_RSVD2          0
+#define M_CPL_TX_TLS_ACK_RSVD2          0x1fff
+#define V_CPL_TX_TLS_ACK_RSVD2(x)       ((x) << S_CPL_TX_TLS_ACK_RSVD2)
+#define G_CPL_TX_TLS_ACK_RSVD2(x)       \
+    (((x) >> S_CPL_TX_TLS_ACK_RSVD2) & M_CPL_TX_TLS_ACK_RSVD2)
+
 #endif  /* T4_MSG_H */

Modified: trunk/sys/dev/cxgbe/common/t4_regs.h
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_regs.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_regs.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2013, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,11 +24,15 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/t4_regs.h 218792 2011-02-18 08:00:26Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/t4_regs.h 308304 2016-11-04 18:45:06Z jhb $
  *
  */
 
 /* This file is automatically generated --- changes will be lost */
+/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */
+/* Directory name: t4_reg.txt, Changeset:  */
+/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */
+/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */
 
 #define MYPF_BASE 0x1b000
 #define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -60,6 +65,21 @@
 #define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
 #define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
 
+#define VF_SGE_BASE 0x0
+#define VF_SGE_REG(reg_addr) (VF_SGE_BASE + (reg_addr))
+
+#define VF_MPS_BASE 0x100
+#define VF_MPS_REG(reg_addr) (VF_MPS_BASE + (reg_addr))
+
+#define VF_PL_BASE 0x200
+#define VF_PL_REG(reg_addr) (VF_PL_BASE + (reg_addr))
+
+#define VF_MBDATA_BASE 0x240
+#define VF_MBDATA_REG(reg_addr) (VF_MBDATA_BASE + (reg_addr))
+
+#define VF_CIM_BASE 0x300
+#define VF_CIM_REG(reg_addr) (VF_CIM_BASE + (reg_addr))
+
 #define MYPORT_BASE 0x1c000
 #define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
 
@@ -79,24 +99,6 @@
 #define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
 #define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
 
-#define VF_SGE_BASE 0x0
-#define VF_SGE_REG(reg_addr) (VF_SGE_BASE + (reg_addr))
-
-#define VF_MPS_BASE 0x100
-#define VF_MPS_REG(reg_addr) (VF_MPS_BASE + (reg_addr))
-
-#define VF_PL_BASE 0x200
-#define VF_PL_REG(reg_addr) (VF_PL_BASE + (reg_addr))
-
-#define VF_MBDATA_BASE 0x240
-#define VF_MBDATA_REG(reg_addr) (VF_MBDATA_BASE + (reg_addr))
-
-#define VF_CIM_BASE 0x300
-#define VF_CIM_REG(reg_addr) (VF_CIM_BASE + (reg_addr))
-
-#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
-#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
-
 #define SGE_QUEUE_BASE_MAP_HIGH(idx) (A_SGE_QUEUE_BASE_MAP_HIGH + (idx) * 8)
 #define NUM_SGE_QUEUE_BASE_MAP_HIGH_INSTANCES 136
 
@@ -265,6 +267,247 @@
 #define CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
 #define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
 
+#define T5_MYPORT_BASE 0x2c000
+#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr))
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr))
+
+#define T5_PORT1_BASE 0x34000
+#define T5_PORT1_REG(reg_addr) (T5_PORT1_BASE + (reg_addr))
+
+#define T5_PORT2_BASE 0x38000
+#define T5_PORT2_REG(reg_addr) (T5_PORT2_BASE + (reg_addr))
+
+#define T5_PORT3_BASE 0x3c000
+#define T5_PORT3_REG(reg_addr) (T5_PORT3_BASE + (reg_addr))
+
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define PCIE_PF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_PF_INT_INSTANCES 8
+
+#define PCIE_VF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_VF_INT_INSTANCES 128
+
+#define PCIE_FID_VFID(idx) (A_PCIE_FID_VFID + (idx) * 4)
+#define NUM_PCIE_FID_VFID_INSTANCES 2048
+
+#define PCIE_COOKIE_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_COOKIE_INSTANCES 8
+
+#define PCIE_T5_DMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_DMA_INSTANCES 4
+
+#define PCIE_T5_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_CMD_INSTANCES 3
+
+#define PCIE_T5_HMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_HMA_INSTANCES 1
+
+#define PCIE_PHY_PRESET_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_PHY_PRESET_INSTANCES 11
+
+#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512
+
+#define LE_T5_DB_MASK_IPV4(idx) (A_LE_T5_DB_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_T5_DB_MASK_IPV4_INSTANCES 5
+
+#define LE_T5_DB_ACTIVE_MASK_IPV4(idx) (A_LE_T5_DB_ACTIVE_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_T5_DB_ACTIVE_MASK_IPV4_INSTANCES 5
+
+#define LE_HASH_MASK_GEN_IPV4T5(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T5_INSTANCES 5
+
+#define LE_HASH_MASK_GEN_IPV6T5(idx) (A_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 12
+
+#define LE_HASH_MASK_CMP_IPV4T5(idx) (A_LE_HASH_MASK_CMP_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_CMP_IPV4T5_INSTANCES 5
+
+#define LE_HASH_MASK_CMP_IPV6T5(idx) (A_LE_HASH_MASK_CMP_IPV6T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_CMP_IPV6T5_INSTANCES 12
+
+#define LE_DB_SECOND_ACTIVE_MASK_IPV4(idx) (A_LE_DB_SECOND_ACTIVE_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_ACTIVE_MASK_IPV4_INSTANCES 5
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_INSTANCES 5
+
+#define LE_DB_SECOND_CMP_HASH_MASK_IPV4(idx) (A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_CMP_HASH_MASK_IPV4_INSTANCES 5
+
+#define MC_ADR_REG(reg_addr, idx) ((reg_addr) + (idx) * 512)
+#define NUM_MC_ADR_INSTANCES 2
+
+#define MC_DDRPHY_DP18_REG(reg_addr, idx) ((reg_addr) + (idx) * 512)
+#define NUM_MC_DDRPHY_DP18_INSTANCES 5
+
+#define MC_CE_ERR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_INSTANCES 8
+
+#define MC_CE_COR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_COR_DATA_INSTANCES 8
+
+#define MC_UE_ERR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_INSTANCES 8
+
+#define MC_UE_COR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_COR_DATA_INSTANCES 8
+
+#define MC_P_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_P_BIST_STATUS_INSTANCES 18
+
+#define EDC_H_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_EDC_H_BIST_STATUS_INSTANCES 18
+
+#define EDC_H_ECC_ERR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_EDC_H_ECC_ERR_DATA_INSTANCES 16
+
+#define SGE_DEBUG1_DBP_THREAD(idx) (A_SGE_DEBUG1_DBP_THREAD + (idx) * 4)
+#define NUM_SGE_DEBUG1_DBP_THREAD_INSTANCES 4
+
+#define SGE_DEBUG0_DBP_THREAD(idx) (A_SGE_DEBUG0_DBP_THREAD + (idx) * 4)
+#define NUM_SGE_DEBUG0_DBP_THREAD_INSTANCES 5
+
+#define SGE_WC_EGRS_BAR2_OFF_PF(idx) (A_SGE_WC_EGRS_BAR2_OFF_PF + (idx) * 4)
+#define NUM_SGE_WC_EGRS_BAR2_OFF_PF_INSTANCES 8
+
+#define SGE_WC_EGRS_BAR2_OFF_VF(idx) (A_SGE_WC_EGRS_BAR2_OFF_VF + (idx) * 4)
+#define NUM_SGE_WC_EGRS_BAR2_OFF_VF_INSTANCES 8
+
+#define PCIE_T6_DMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T6_DMA_INSTANCES 2
+
+#define PCIE_T6_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T6_CMD_INSTANCES 1
+
+#define PCIE_VF_256_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_VF_256_INT_INSTANCES 128
+
+#define MPS_CLS_REQUEST_TRACE_MAC_DA_L(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_DA_L + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_MAC_DA_L_INSTANCES 8
+
+#define MPS_CLS_REQUEST_TRACE_MAC_DA_H(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_DA_H + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_MAC_DA_H_INSTANCES 8
+
+#define MPS_CLS_REQUEST_TRACE_MAC_SA_L(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_SA_L + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_MAC_SA_L_INSTANCES 8
+
+#define MPS_CLS_REQUEST_TRACE_MAC_SA_H(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_SA_H + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_MAC_SA_H_INSTANCES 8
+
+#define MPS_CLS_REQUEST_TRACE_PORT_VLAN(idx) (A_MPS_CLS_REQUEST_TRACE_PORT_VLAN + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_PORT_VLAN_INSTANCES 8
+
+#define MPS_CLS_REQUEST_TRACE_ENCAP(idx) (A_MPS_CLS_REQUEST_TRACE_ENCAP + (idx) * 32)
+#define NUM_MPS_CLS_REQUEST_TRACE_ENCAP_INSTANCES 8
+
+#define MPS_CLS_RESULT_TRACE(idx) (A_MPS_CLS_RESULT_TRACE + (idx) * 4)
+#define NUM_MPS_CLS_RESULT_TRACE_INSTANCES 8
+
+#define MPS_CLS_DIPIPV4_ID_TABLE(idx) (A_MPS_CLS_DIPIPV4_ID_TABLE + (idx) * 8)
+#define NUM_MPS_CLS_DIPIPV4_ID_TABLE_INSTANCES 4
+
+#define MPS_CLS_DIPIPV4_MASK_TABLE(idx) (A_MPS_CLS_DIPIPV4_MASK_TABLE + (idx) * 8)
+#define NUM_MPS_CLS_DIPIPV4_MASK_TABLE_INSTANCES 4
+
+#define MPS_CLS_DIPIPV6ID_0_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_0_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6ID_0_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6ID_1_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_1_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6ID_1_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6ID_2_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_2_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6ID_2_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6ID_3_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_3_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6ID_3_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6MASK_0_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_0_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6MASK_0_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6MASK_1_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_1_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6MASK_1_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6MASK_2_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_2_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6MASK_2_TABLE_INSTANCES 2
+
+#define MPS_CLS_DIPIPV6MASK_3_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_3_TABLE + (idx) * 32)
+#define NUM_MPS_CLS_DIPIPV6MASK_3_TABLE_INSTANCES 2
+
+#define MPS_RX_HASH_LKP_TABLE(idx) (A_MPS_RX_HASH_LKP_TABLE + (idx) * 4)
+#define NUM_MPS_RX_HASH_LKP_TABLE_INSTANCES 4
+
+#define LE_DB_DBG_MATCH_DATA_MASK(idx) (A_LE_DB_DBG_MATCH_DATA_MASK + (idx) * 4)
+#define NUM_LE_DB_DBG_MATCH_DATA_MASK_INSTANCES 8
+
+#define LE_DB_DBG_MATCH_DATA(idx) (A_LE_DB_DBG_MATCH_DATA + (idx) * 4)
+#define NUM_LE_DB_DBG_MATCH_DATA_INSTANCES 8
+
+#define LE_DB_DBGI_REQ_DATA_T6(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_T6_INSTANCES 11
+
+#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11
+
+#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11
+
+#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8
+
+#define LE_HASH_MASK_GEN_IPV4T6(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T6_INSTANCES 8
+
+#define T6_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T6_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_T6_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8
+
+#define LE_DB_PSV_FILTER_MASK_TUP_IPV4(idx) (A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 + (idx) * 4)
+#define NUM_LE_DB_PSV_FILTER_MASK_TUP_IPV4_INSTANCES 3
+
+#define LE_DB_PSV_FILTER_MASK_FLT_IPV4(idx) (A_LE_DB_PSV_FILTER_MASK_FLT_IPV4 + (idx) * 4)
+#define NUM_LE_DB_PSV_FILTER_MASK_FLT_IPV4_INSTANCES 2
+
+#define LE_DB_PSV_FILTER_MASK_TUP_IPV6(idx) (A_LE_DB_PSV_FILTER_MASK_TUP_IPV6 + (idx) * 4)
+#define NUM_LE_DB_PSV_FILTER_MASK_TUP_IPV6_INSTANCES 9
+
+#define LE_DB_PSV_FILTER_MASK_FLT_IPV6(idx) (A_LE_DB_PSV_FILTER_MASK_FLT_IPV6 + (idx) * 4)
+#define NUM_LE_DB_PSV_FILTER_MASK_FLT_IPV6_INSTANCES 2
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T6(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T6_INSTANCES 8
+
+#define MC_DDRPHY_DP18_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 512)
+#define NUM_MC_DDRPHY_DP18_T6_INSTANCES 9
+
+#define MC_CE_ERR_DATA_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_T6_INSTANCES 16
+
+#define MC_UE_ERR_DATA_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_T6_INSTANCES 16
+
+#define CIM_CTL_MAILBOX_VF_STATUS_T6(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T6_INSTANCES 8
+
+#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
+#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+
 /* registers for module SGE */
 #define SGE_BASE_ADDR 0x1000
 
@@ -285,6 +528,20 @@
 #define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
 
 #define A_SGE_VF_KDOORBELL 0x0
+
+#define S_DBTYPE    13
+#define V_DBTYPE(x) ((x) << S_DBTYPE)
+#define F_DBTYPE    V_DBTYPE(1U)
+
+#define S_PIDX_T5    0
+#define M_PIDX_T5    0x1fffU
+#define V_PIDX_T5(x) ((x) << S_PIDX_T5)
+#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
+#define S_SYNC_T6    14
+#define V_SYNC_T6(x) ((x) << S_SYNC_T6)
+#define F_SYNC_T6    V_SYNC_T6(1U)
+
 #define A_SGE_PF_GTS 0x4
 
 #define S_INGRESSQID    16
@@ -307,6 +564,16 @@
 #define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
 
 #define A_SGE_VF_GTS 0x4
+#define A_SGE_PF_KTIMESTAMP_LO 0x8
+#define A_SGE_VF_KTIMESTAMP_LO 0x8
+#define A_SGE_PF_KTIMESTAMP_HI 0xc
+
+#define S_TSTAMPVAL    0
+#define M_TSTAMPVAL    0xfffffffU
+#define V_TSTAMPVAL(x) ((x) << S_TSTAMPVAL)
+#define G_TSTAMPVAL(x) (((x) >> S_TSTAMPVAL) & M_TSTAMPVAL)
+
+#define A_SGE_VF_KTIMESTAMP_HI 0xc
 #define A_SGE_CONTROL 0x1008
 
 #define S_IGRALLCPLTOFL    31
@@ -663,6 +930,18 @@
 #define V_PERR_EGR_CTXT_MIFRSP(x) ((x) << S_PERR_EGR_CTXT_MIFRSP)
 #define F_PERR_EGR_CTXT_MIFRSP    V_PERR_EGR_CTXT_MIFRSP(1U)
 
+#define S_PERR_PC_CHPI_RSP2    31
+#define V_PERR_PC_CHPI_RSP2(x) ((x) << S_PERR_PC_CHPI_RSP2)
+#define F_PERR_PC_CHPI_RSP2    V_PERR_PC_CHPI_RSP2(1U)
+
+#define S_PERR_PC_RSP    23
+#define V_PERR_PC_RSP(x) ((x) << S_PERR_PC_RSP)
+#define F_PERR_PC_RSP    V_PERR_PC_RSP(1U)
+
+#define S_PERR_PC_REQ    22
+#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ)
+#define F_PERR_PC_REQ    V_PERR_PC_REQ(1U)
+
 #define A_SGE_INT_ENABLE1 0x1028
 #define A_SGE_PERR_ENABLE1 0x102c
 #define A_SGE_INT_CAUSE2 0x1030
@@ -791,6 +1070,42 @@
 #define V_PERR_BASE_SIZE(x) ((x) << S_PERR_BASE_SIZE)
 #define F_PERR_BASE_SIZE    V_PERR_BASE_SIZE(1U)
 
+#define S_PERR_DBP_HINT_FL_FIFO    24
+#define V_PERR_DBP_HINT_FL_FIFO(x) ((x) << S_PERR_DBP_HINT_FL_FIFO)
+#define F_PERR_DBP_HINT_FL_FIFO    V_PERR_DBP_HINT_FL_FIFO(1U)
+
+#define S_PERR_EGR_DBP_TX_COAL    23
+#define V_PERR_EGR_DBP_TX_COAL(x) ((x) << S_PERR_EGR_DBP_TX_COAL)
+#define F_PERR_EGR_DBP_TX_COAL    V_PERR_EGR_DBP_TX_COAL(1U)
+
+#define S_PERR_DBP_FL_FIFO    22
+#define V_PERR_DBP_FL_FIFO(x) ((x) << S_PERR_DBP_FL_FIFO)
+#define F_PERR_DBP_FL_FIFO    V_PERR_DBP_FL_FIFO(1U)
+
+#define S_PERR_PC_DBP2    15
+#define V_PERR_PC_DBP2(x) ((x) << S_PERR_PC_DBP2)
+#define F_PERR_PC_DBP2    V_PERR_PC_DBP2(1U)
+
+#define S_DEQ_LL_PERR    21
+#define V_DEQ_LL_PERR(x) ((x) << S_DEQ_LL_PERR)
+#define F_DEQ_LL_PERR    V_DEQ_LL_PERR(1U)
+
+#define S_ENQ_PERR    20
+#define V_ENQ_PERR(x) ((x) << S_ENQ_PERR)
+#define F_ENQ_PERR    V_ENQ_PERR(1U)
+
+#define S_DEQ_OUT_PERR    19
+#define V_DEQ_OUT_PERR(x) ((x) << S_DEQ_OUT_PERR)
+#define F_DEQ_OUT_PERR    V_DEQ_OUT_PERR(1U)
+
+#define S_BUF_PERR    18
+#define V_BUF_PERR(x) ((x) << S_BUF_PERR)
+#define F_BUF_PERR    V_BUF_PERR(1U)
+
+#define S_PERR_DB_FIFO    3
+#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO)
+#define F_PERR_DB_FIFO    V_PERR_DB_FIFO(1U)
+
 #define A_SGE_INT_ENABLE2 0x1034
 #define A_SGE_PERR_ENABLE2 0x1038
 #define A_SGE_INT_CAUSE3 0x103c
@@ -923,6 +1238,14 @@
 #define V_ERR_INV_CTXT0(x) ((x) << S_ERR_INV_CTXT0)
 #define F_ERR_INV_CTXT0    V_ERR_INV_CTXT0(1U)
 
+#define S_DBP_TBUF_FULL    8
+#define V_DBP_TBUF_FULL(x) ((x) << S_DBP_TBUF_FULL)
+#define F_DBP_TBUF_FULL    V_DBP_TBUF_FULL(1U)
+
+#define S_FATAL_WRE_LEN    7
+#define V_FATAL_WRE_LEN(x) ((x) << S_FATAL_WRE_LEN)
+#define F_FATAL_WRE_LEN    V_FATAL_WRE_LEN(1U)
+
 #define A_SGE_INT_ENABLE3 0x1040
 #define A_SGE_FL_BUFFER_SIZE0 0x1044
 
@@ -931,21 +1254,116 @@
 #define V_SIZE(x) ((x) << S_SIZE)
 #define G_SIZE(x) (((x) >> S_SIZE) & M_SIZE)
 
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE1 0x1048
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE2 0x104c
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE3 0x1050
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE4 0x1054
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE5 0x1058
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE6 0x105c
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE7 0x1060
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE8 0x1064
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE9 0x1068
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE10 0x106c
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE11 0x1070
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE12 0x1074
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE13 0x1078
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE14 0x107c
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_FL_BUFFER_SIZE15 0x1080
+
+#define S_T6_SIZE    4
+#define M_T6_SIZE    0xfffffU
+#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
+#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
+
 #define A_SGE_DBQ_CTXT_BADDR 0x1084
 
 #define S_BASEADDR    3
@@ -995,6 +1413,20 @@
 #define V_NOEDRAM(x) ((x) << S_NOEDRAM)
 #define F_NOEDRAM    V_NOEDRAM(1U)
 
+#define S_CREDITCNTPACKING    2
+#define M_CREDITCNTPACKING    0x3U
+#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING)
+#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING)
+
+#define S_NULLPTR    20
+#define M_NULLPTR    0xfU
+#define V_NULLPTR(x) ((x) << S_NULLPTR)
+#define G_NULLPTR(x) (((x) >> S_NULLPTR) & M_NULLPTR)
+
+#define S_NULLPTREN    19
+#define V_NULLPTREN(x) ((x) << S_NULLPTREN)
+#define F_NULLPTREN    V_NULLPTREN(1U)
+
 #define A_SGE_CONM_CTRL 0x1094
 
 #define S_EGRTHRESHOLD    8
@@ -1015,6 +1447,21 @@
 #define V_TP_ENABLE(x) ((x) << S_TP_ENABLE)
 #define F_TP_ENABLE    V_TP_ENABLE(1U)
 
+#define S_EGRTHRESHOLDPACKING    14
+#define M_EGRTHRESHOLDPACKING    0x3fU
+#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING)
+#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & M_EGRTHRESHOLDPACKING)
+
+#define S_T6_EGRTHRESHOLDPACKING    16
+#define M_T6_EGRTHRESHOLDPACKING    0xffU
+#define V_T6_EGRTHRESHOLDPACKING(x) ((x) << S_T6_EGRTHRESHOLDPACKING)
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & M_T6_EGRTHRESHOLDPACKING)
+
+#define S_T6_EGRTHRESHOLD    8
+#define M_T6_EGRTHRESHOLD    0xffU
+#define V_T6_EGRTHRESHOLD(x) ((x) << S_T6_EGRTHRESHOLD)
+#define G_T6_EGRTHRESHOLD(x) (((x) >> S_T6_EGRTHRESHOLD) & M_T6_EGRTHRESHOLD)
+
 #define A_SGE_TIMESTAMP_LO 0x1098
 #define A_SGE_TIMESTAMP_HI 0x109c
 
@@ -1072,6 +1519,39 @@
 #define V_LP_COUNT(x) ((x) << S_LP_COUNT)
 #define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
 
+#define S_BAR2VALID    31
+#define V_BAR2VALID(x) ((x) << S_BAR2VALID)
+#define F_BAR2VALID    V_BAR2VALID(1U)
+
+#define S_BAR2FULL    30
+#define V_BAR2FULL(x) ((x) << S_BAR2FULL)
+#define F_BAR2FULL    V_BAR2FULL(1U)
+
+#define S_LP_INT_THRESH_T5    18
+#define M_LP_INT_THRESH_T5    0xfffU
+#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
+#define G_LP_INT_THRESH_T5(x) (((x) >> S_LP_INT_THRESH_T5) & M_LP_INT_THRESH_T5)
+
+#define S_LP_COUNT_T5    0
+#define M_LP_COUNT_T5    0x3ffffU
+#define V_LP_COUNT_T5(x) ((x) << S_LP_COUNT_T5)
+#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT_T5) & M_LP_COUNT_T5)
+
+#define S_VFIFO_CNT    15
+#define M_VFIFO_CNT    0x1ffffU
+#define V_VFIFO_CNT(x) ((x) << S_VFIFO_CNT)
+#define G_VFIFO_CNT(x) (((x) >> S_VFIFO_CNT) & M_VFIFO_CNT)
+
+#define S_COAL_CTL_FIFO_CNT    8
+#define M_COAL_CTL_FIFO_CNT    0x3fU
+#define V_COAL_CTL_FIFO_CNT(x) ((x) << S_COAL_CTL_FIFO_CNT)
+#define G_COAL_CTL_FIFO_CNT(x) (((x) >> S_COAL_CTL_FIFO_CNT) & M_COAL_CTL_FIFO_CNT)
+
+#define S_MERGE_FIFO_CNT    0
+#define M_MERGE_FIFO_CNT    0x3fU
+#define V_MERGE_FIFO_CNT(x) ((x) << S_MERGE_FIFO_CNT)
+#define G_MERGE_FIFO_CNT(x) (((x) >> S_MERGE_FIFO_CNT) & M_MERGE_FIFO_CNT)
+
 #define A_SGE_DOORBELL_CONTROL 0x10a8
 
 #define S_HINTDEPTHCTL    27
@@ -1141,6 +1621,32 @@
 #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB)
 #define F_DROPPED_DB    V_DROPPED_DB(1U)
 
+#define S_T6_DROP_TIMEOUT    7
+#define M_T6_DROP_TIMEOUT    0x3fU
+#define V_T6_DROP_TIMEOUT(x) ((x) << S_T6_DROP_TIMEOUT)
+#define G_T6_DROP_TIMEOUT(x) (((x) >> S_T6_DROP_TIMEOUT) & M_T6_DROP_TIMEOUT)
+
+#define S_INVONDBSYNC    6
+#define V_INVONDBSYNC(x) ((x) << S_INVONDBSYNC)
+#define F_INVONDBSYNC    V_INVONDBSYNC(1U)
+
+#define S_INVONGTSSYNC    5
+#define V_INVONGTSSYNC(x) ((x) << S_INVONGTSSYNC)
+#define F_INVONGTSSYNC    V_INVONGTSSYNC(1U)
+
+#define S_DB_DBG_EN    4
+#define V_DB_DBG_EN(x) ((x) << S_DB_DBG_EN)
+#define F_DB_DBG_EN    V_DB_DBG_EN(1U)
+
+#define S_GTS_DBG_TIMER_REG    1
+#define M_GTS_DBG_TIMER_REG    0x7U
+#define V_GTS_DBG_TIMER_REG(x) ((x) << S_GTS_DBG_TIMER_REG)
+#define G_GTS_DBG_TIMER_REG(x) (((x) >> S_GTS_DBG_TIMER_REG) & M_GTS_DBG_TIMER_REG)
+
+#define S_GTS_DBG_EN    0
+#define V_GTS_DBG_EN(x) ((x) << S_GTS_DBG_EN)
+#define F_GTS_DBG_EN    V_GTS_DBG_EN(1U)
+
 #define A_SGE_DROPPED_DOORBELL 0x10ac
 #define A_SGE_DOORBELL_THROTTLE_CONTROL 0x10b0
 
@@ -1153,6 +1659,23 @@
 #define V_THROTTLE_ENABLE(x) ((x) << S_THROTTLE_ENABLE)
 #define F_THROTTLE_ENABLE    V_THROTTLE_ENABLE(1U)
 
+#define S_BAR2THROTTLECOUNT    16
+#define M_BAR2THROTTLECOUNT    0xffU
+#define V_BAR2THROTTLECOUNT(x) ((x) << S_BAR2THROTTLECOUNT)
+#define G_BAR2THROTTLECOUNT(x) (((x) >> S_BAR2THROTTLECOUNT) & M_BAR2THROTTLECOUNT)
+
+#define S_CLRCOALESCEDISABLE    15
+#define V_CLRCOALESCEDISABLE(x) ((x) << S_CLRCOALESCEDISABLE)
+#define F_CLRCOALESCEDISABLE    V_CLRCOALESCEDISABLE(1U)
+
+#define S_OPENBAR2GATEONCE    14
+#define V_OPENBAR2GATEONCE(x) ((x) << S_OPENBAR2GATEONCE)
+#define F_OPENBAR2GATEONCE    V_OPENBAR2GATEONCE(1U)
+
+#define S_FORCEOPENBAR2GATE    13
+#define V_FORCEOPENBAR2GATE(x) ((x) << S_FORCEOPENBAR2GATE)
+#define F_FORCEOPENBAR2GATE    V_FORCEOPENBAR2GATE(1U)
+
 #define A_SGE_ITP_CONTROL 0x10b4
 
 #define S_CRITICAL_TIME    10
@@ -1169,6 +1692,11 @@
 #define V_LL_READ_WAIT_DISABLE(x) ((x) << S_LL_READ_WAIT_DISABLE)
 #define F_LL_READ_WAIT_DISABLE    V_LL_READ_WAIT_DISABLE(1U)
 
+#define S_TSCALE    28
+#define M_TSCALE    0xfU
+#define V_TSCALE(x) ((x) << S_TSCALE)
+#define G_TSCALE(x) (((x) >> S_TSCALE) & M_TSCALE)
+
 #define A_SGE_TIMER_VALUE_0_AND_1 0x10b8
 
 #define S_TIMERVALUE0    16
@@ -1235,6 +1763,39 @@
 #define V_MAXRSPCNT1(x) ((x) << S_MAXRSPCNT1)
 #define G_MAXRSPCNT1(x) (((x) >> S_MAXRSPCNT1) & M_MAXRSPCNT1)
 
+#define A_SGE_GK_CONTROL 0x10c4
+
+#define S_EN_FLM_FIFTH    29
+#define V_EN_FLM_FIFTH(x) ((x) << S_EN_FLM_FIFTH)
+#define F_EN_FLM_FIFTH    V_EN_FLM_FIFTH(1U)
+
+#define S_FL_PROG_THRESH    20
+#define M_FL_PROG_THRESH    0x1ffU
+#define V_FL_PROG_THRESH(x) ((x) << S_FL_PROG_THRESH)
+#define G_FL_PROG_THRESH(x) (((x) >> S_FL_PROG_THRESH) & M_FL_PROG_THRESH)
+
+#define S_COAL_ALL_THREAD    19
+#define V_COAL_ALL_THREAD(x) ((x) << S_COAL_ALL_THREAD)
+#define F_COAL_ALL_THREAD    V_COAL_ALL_THREAD(1U)
+
+#define S_EN_PSHB    18
+#define V_EN_PSHB(x) ((x) << S_EN_PSHB)
+#define F_EN_PSHB    V_EN_PSHB(1U)
+
+#define S_EN_DB_FIFTH    17
+#define V_EN_DB_FIFTH(x) ((x) << S_EN_DB_FIFTH)
+#define F_EN_DB_FIFTH    V_EN_DB_FIFTH(1U)
+
+#define S_DB_PROG_THRESH    8
+#define M_DB_PROG_THRESH    0x1ffU
+#define V_DB_PROG_THRESH(x) ((x) << S_DB_PROG_THRESH)
+#define G_DB_PROG_THRESH(x) (((x) >> S_DB_PROG_THRESH) & M_DB_PROG_THRESH)
+
+#define S_100NS_TIMER    0
+#define M_100NS_TIMER    0xffU
+#define V_100NS_TIMER(x) ((x) << S_100NS_TIMER)
+#define G_100NS_TIMER(x) (((x) >> S_100NS_TIMER) & M_100NS_TIMER)
+
 #define A_SGE_PD_RSP_CREDIT23 0x10c8
 
 #define S_RSPCREDITEN2    31
@@ -1265,6 +1826,23 @@
 #define V_MAXRSPCNT3(x) ((x) << S_MAXRSPCNT3)
 #define G_MAXRSPCNT3(x) (((x) >> S_MAXRSPCNT3) & M_MAXRSPCNT3)
 
+#define A_SGE_GK_CONTROL2 0x10c8
+
+#define S_DBQ_TIMER_TICK    16
+#define M_DBQ_TIMER_TICK    0xffffU
+#define V_DBQ_TIMER_TICK(x) ((x) << S_DBQ_TIMER_TICK)
+#define G_DBQ_TIMER_TICK(x) (((x) >> S_DBQ_TIMER_TICK) & M_DBQ_TIMER_TICK)
+
+#define S_FL_MERGE_CNT_THRESH    8
+#define M_FL_MERGE_CNT_THRESH    0xfU
+#define V_FL_MERGE_CNT_THRESH(x) ((x) << S_FL_MERGE_CNT_THRESH)
+#define G_FL_MERGE_CNT_THRESH(x) (((x) >> S_FL_MERGE_CNT_THRESH) & M_FL_MERGE_CNT_THRESH)
+
+#define S_MERGE_CNT_THRESH    0
+#define M_MERGE_CNT_THRESH    0x3fU
+#define V_MERGE_CNT_THRESH(x) ((x) << S_MERGE_CNT_THRESH)
+#define G_MERGE_CNT_THRESH(x) (((x) >> S_MERGE_CNT_THRESH) & M_MERGE_CNT_THRESH)
+
 #define A_SGE_DEBUG_INDEX 0x10cc
 #define A_SGE_DEBUG_DATA_HIGH 0x10d0
 #define A_SGE_DEBUG_DATA_LOW 0x10d4
@@ -1307,6 +1885,114 @@
 #define V_ERR_UNEXPECTED_TIMER(x) ((x) << S_ERR_UNEXPECTED_TIMER)
 #define F_ERR_UNEXPECTED_TIMER    V_ERR_UNEXPECTED_TIMER(1U)
 
+#define S_BAR2_EGRESS_LEN_OR_ADDR_ERR    29
+#define V_BAR2_EGRESS_LEN_OR_ADDR_ERR(x) ((x) << S_BAR2_EGRESS_LEN_OR_ADDR_ERR)
+#define F_BAR2_EGRESS_LEN_OR_ADDR_ERR    V_BAR2_EGRESS_LEN_OR_ADDR_ERR(1U)
+
+#define S_ERR_CPL_EXCEED_MAX_IQE_SIZE1    28
+#define V_ERR_CPL_EXCEED_MAX_IQE_SIZE1(x) ((x) << S_ERR_CPL_EXCEED_MAX_IQE_SIZE1)
+#define F_ERR_CPL_EXCEED_MAX_IQE_SIZE1    V_ERR_CPL_EXCEED_MAX_IQE_SIZE1(1U)
+
+#define S_ERR_CPL_EXCEED_MAX_IQE_SIZE0    27
+#define V_ERR_CPL_EXCEED_MAX_IQE_SIZE0(x) ((x) << S_ERR_CPL_EXCEED_MAX_IQE_SIZE0)
+#define F_ERR_CPL_EXCEED_MAX_IQE_SIZE0    V_ERR_CPL_EXCEED_MAX_IQE_SIZE0(1U)
+
+#define S_ERR_WR_LEN_TOO_LARGE3    26
+#define V_ERR_WR_LEN_TOO_LARGE3(x) ((x) << S_ERR_WR_LEN_TOO_LARGE3)
+#define F_ERR_WR_LEN_TOO_LARGE3    V_ERR_WR_LEN_TOO_LARGE3(1U)
+
+#define S_ERR_WR_LEN_TOO_LARGE2    25
+#define V_ERR_WR_LEN_TOO_LARGE2(x) ((x) << S_ERR_WR_LEN_TOO_LARGE2)
+#define F_ERR_WR_LEN_TOO_LARGE2    V_ERR_WR_LEN_TOO_LARGE2(1U)
+
+#define S_ERR_WR_LEN_TOO_LARGE1    24
+#define V_ERR_WR_LEN_TOO_LARGE1(x) ((x) << S_ERR_WR_LEN_TOO_LARGE1)
+#define F_ERR_WR_LEN_TOO_LARGE1    V_ERR_WR_LEN_TOO_LARGE1(1U)
+
+#define S_ERR_WR_LEN_TOO_LARGE0    23
+#define V_ERR_WR_LEN_TOO_LARGE0(x) ((x) << S_ERR_WR_LEN_TOO_LARGE0)
+#define F_ERR_WR_LEN_TOO_LARGE0    V_ERR_WR_LEN_TOO_LARGE0(1U)
+
+#define S_ERR_LARGE_MINFETCH_WITH_TXCOAL3    22
+#define V_ERR_LARGE_MINFETCH_WITH_TXCOAL3(x) ((x) << S_ERR_LARGE_MINFETCH_WITH_TXCOAL3)
+#define F_ERR_LARGE_MINFETCH_WITH_TXCOAL3    V_ERR_LARGE_MINFETCH_WITH_TXCOAL3(1U)
+
+#define S_ERR_LARGE_MINFETCH_WITH_TXCOAL2    21
+#define V_ERR_LARGE_MINFETCH_WITH_TXCOAL2(x) ((x) << S_ERR_LARGE_MINFETCH_WITH_TXCOAL2)
+#define F_ERR_LARGE_MINFETCH_WITH_TXCOAL2    V_ERR_LARGE_MINFETCH_WITH_TXCOAL2(1U)
+
+#define S_ERR_LARGE_MINFETCH_WITH_TXCOAL1    20
+#define V_ERR_LARGE_MINFETCH_WITH_TXCOAL1(x) ((x) << S_ERR_LARGE_MINFETCH_WITH_TXCOAL1)
+#define F_ERR_LARGE_MINFETCH_WITH_TXCOAL1    V_ERR_LARGE_MINFETCH_WITH_TXCOAL1(1U)
+
+#define S_ERR_LARGE_MINFETCH_WITH_TXCOAL0    19
+#define V_ERR_LARGE_MINFETCH_WITH_TXCOAL0(x) ((x) << S_ERR_LARGE_MINFETCH_WITH_TXCOAL0)
+#define F_ERR_LARGE_MINFETCH_WITH_TXCOAL0    V_ERR_LARGE_MINFETCH_WITH_TXCOAL0(1U)
+
+#define S_COAL_WITH_HP_DISABLE_ERR    18
+#define V_COAL_WITH_HP_DISABLE_ERR(x) ((x) << S_COAL_WITH_HP_DISABLE_ERR)
+#define F_COAL_WITH_HP_DISABLE_ERR    V_COAL_WITH_HP_DISABLE_ERR(1U)
+
+#define S_BAR2_EGRESS_COAL0_ERR    17
+#define V_BAR2_EGRESS_COAL0_ERR(x) ((x) << S_BAR2_EGRESS_COAL0_ERR)
+#define F_BAR2_EGRESS_COAL0_ERR    V_BAR2_EGRESS_COAL0_ERR(1U)
+
+#define S_BAR2_EGRESS_SIZE_ERR    16
+#define V_BAR2_EGRESS_SIZE_ERR(x) ((x) << S_BAR2_EGRESS_SIZE_ERR)
+#define F_BAR2_EGRESS_SIZE_ERR    V_BAR2_EGRESS_SIZE_ERR(1U)
+
+#define S_FLM_PC_RSP_ERR    15
+#define V_FLM_PC_RSP_ERR(x) ((x) << S_FLM_PC_RSP_ERR)
+#define F_FLM_PC_RSP_ERR    V_FLM_PC_RSP_ERR(1U)
+
+#define S_DBFIFO_HP_INT_LOW    14
+#define V_DBFIFO_HP_INT_LOW(x) ((x) << S_DBFIFO_HP_INT_LOW)
+#define F_DBFIFO_HP_INT_LOW    V_DBFIFO_HP_INT_LOW(1U)
+
+#define S_DBFIFO_LP_INT_LOW    13
+#define V_DBFIFO_LP_INT_LOW(x) ((x) << S_DBFIFO_LP_INT_LOW)
+#define F_DBFIFO_LP_INT_LOW    V_DBFIFO_LP_INT_LOW(1U)
+
+#define S_DBFIFO_FL_INT_LOW    12
+#define V_DBFIFO_FL_INT_LOW(x) ((x) << S_DBFIFO_FL_INT_LOW)
+#define F_DBFIFO_FL_INT_LOW    V_DBFIFO_FL_INT_LOW(1U)
+
+#define S_DBFIFO_FL_INT    11
+#define V_DBFIFO_FL_INT(x) ((x) << S_DBFIFO_FL_INT)
+#define F_DBFIFO_FL_INT    V_DBFIFO_FL_INT(1U)
+
+#define S_ERR_RX_CPL_PACKET_SIZE1    10
+#define V_ERR_RX_CPL_PACKET_SIZE1(x) ((x) << S_ERR_RX_CPL_PACKET_SIZE1)
+#define F_ERR_RX_CPL_PACKET_SIZE1    V_ERR_RX_CPL_PACKET_SIZE1(1U)
+
+#define S_ERR_RX_CPL_PACKET_SIZE0    9
+#define V_ERR_RX_CPL_PACKET_SIZE0(x) ((x) << S_ERR_RX_CPL_PACKET_SIZE0)
+#define F_ERR_RX_CPL_PACKET_SIZE0    V_ERR_RX_CPL_PACKET_SIZE0(1U)
+
+#define S_ERR_ISHIFT_UR1    31
+#define V_ERR_ISHIFT_UR1(x) ((x) << S_ERR_ISHIFT_UR1)
+#define F_ERR_ISHIFT_UR1    V_ERR_ISHIFT_UR1(1U)
+
+#define S_ERR_ISHIFT_UR0    30
+#define V_ERR_ISHIFT_UR0(x) ((x) << S_ERR_ISHIFT_UR0)
+#define F_ERR_ISHIFT_UR0    V_ERR_ISHIFT_UR0(1U)
+
+#define S_ERR_TH3_MAX_FETCH    14
+#define V_ERR_TH3_MAX_FETCH(x) ((x) << S_ERR_TH3_MAX_FETCH)
+#define F_ERR_TH3_MAX_FETCH    V_ERR_TH3_MAX_FETCH(1U)
+
+#define S_ERR_TH2_MAX_FETCH    13
+#define V_ERR_TH2_MAX_FETCH(x) ((x) << S_ERR_TH2_MAX_FETCH)
+#define F_ERR_TH2_MAX_FETCH    V_ERR_TH2_MAX_FETCH(1U)
+
+#define S_ERR_TH1_MAX_FETCH    12
+#define V_ERR_TH1_MAX_FETCH(x) ((x) << S_ERR_TH1_MAX_FETCH)
+#define F_ERR_TH1_MAX_FETCH    V_ERR_TH1_MAX_FETCH(1U)
+
+#define S_ERR_TH0_MAX_FETCH    11
+#define V_ERR_TH0_MAX_FETCH(x) ((x) << S_ERR_TH0_MAX_FETCH)
+#define F_ERR_TH0_MAX_FETCH    V_ERR_TH0_MAX_FETCH(1U)
+
 #define A_SGE_INT_ENABLE4 0x10e0
 #define A_SGE_STAT_TOTAL 0x10e4
 #define A_SGE_STAT_MATCH 0x10e8
@@ -1336,6 +2022,16 @@
 #define V_STATSOURCE(x) ((x) << S_STATSOURCE)
 #define G_STATSOURCE(x) (((x) >> S_STATSOURCE) & M_STATSOURCE)
 
+#define S_STATSOURCE_T5    9
+#define M_STATSOURCE_T5    0xfU
+#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
+
+#define S_T6_STATMODE    0
+#define M_T6_STATMODE    0xfU
+#define V_T6_STATMODE(x) ((x) << S_T6_STATMODE)
+#define G_T6_STATMODE(x) (((x) >> S_T6_STATMODE) & M_T6_STATMODE)
+
 #define A_SGE_HINT_CFG 0x10f0
 
 #define S_HINTSALLOWEDNOHDR    6
@@ -1348,6 +2044,11 @@
 #define V_HINTSALLOWEDHDR(x) ((x) << S_HINTSALLOWEDHDR)
 #define G_HINTSALLOWEDHDR(x) (((x) >> S_HINTSALLOWEDHDR) & M_HINTSALLOWEDHDR)
 
+#define S_UPCUTOFFTHRESHLP    12
+#define M_UPCUTOFFTHRESHLP    0x7ffU
+#define V_UPCUTOFFTHRESHLP(x) ((x) << S_UPCUTOFFTHRESHLP)
+#define G_UPCUTOFFTHRESHLP(x) (((x) >> S_UPCUTOFFTHRESHLP) & M_UPCUTOFFTHRESHLP)
+
 #define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
 #define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
 #define A_SGE_PD_WRR_CONFIG 0x10fc
@@ -1372,6 +2073,16 @@
 #define V_ERROR_QID(x) ((x) << S_ERROR_QID)
 #define G_ERROR_QID(x) (((x) >> S_ERROR_QID) & M_ERROR_QID)
 
+#define S_CAUSE_REGISTER    24
+#define M_CAUSE_REGISTER    0x7U
+#define V_CAUSE_REGISTER(x) ((x) << S_CAUSE_REGISTER)
+#define G_CAUSE_REGISTER(x) (((x) >> S_CAUSE_REGISTER) & M_CAUSE_REGISTER)
+
+#define S_CAUSE_BIT    19
+#define M_CAUSE_BIT    0x1fU
+#define V_CAUSE_BIT(x) ((x) << S_CAUSE_BIT)
+#define G_CAUSE_BIT(x) (((x) >> S_CAUSE_BIT) & M_CAUSE_BIT)
+
 #define A_SGE_SHARED_TAG_CHAN_CFG 0x1104
 
 #define S_MINTAG3    24
@@ -1394,6 +2105,7 @@
 #define V_MINTAG0(x) ((x) << S_MINTAG0)
 #define G_MINTAG0(x) (((x) >> S_MINTAG0) & M_MINTAG0)
 
+#define A_SGE_IDMA0_DROP_CNT 0x1104
 #define A_SGE_SHARED_TAG_POOL_CFG 0x1108
 
 #define S_TAGPOOLTOTAL    0
@@ -1401,6 +2113,506 @@
 #define V_TAGPOOLTOTAL(x) ((x) << S_TAGPOOLTOTAL)
 #define G_TAGPOOLTOTAL(x) (((x) >> S_TAGPOOLTOTAL) & M_TAGPOOLTOTAL)
 
+#define A_SGE_IDMA1_DROP_CNT 0x1108
+#define A_SGE_INT_CAUSE5 0x110c
+
+#define S_ERR_T_RXCRC    31
+#define V_ERR_T_RXCRC(x) ((x) << S_ERR_T_RXCRC)
+#define F_ERR_T_RXCRC    V_ERR_T_RXCRC(1U)
+
+#define S_PERR_MC_RSPDATA    30
+#define V_PERR_MC_RSPDATA(x) ((x) << S_PERR_MC_RSPDATA)
+#define F_PERR_MC_RSPDATA    V_PERR_MC_RSPDATA(1U)
+
+#define S_PERR_PC_RSPDATA    29
+#define V_PERR_PC_RSPDATA(x) ((x) << S_PERR_PC_RSPDATA)
+#define F_PERR_PC_RSPDATA    V_PERR_PC_RSPDATA(1U)
+
+#define S_PERR_PD_RDRSPDATA    28
+#define V_PERR_PD_RDRSPDATA(x) ((x) << S_PERR_PD_RDRSPDATA)
+#define F_PERR_PD_RDRSPDATA    V_PERR_PD_RDRSPDATA(1U)
+
+#define S_PERR_U_RXDATA    27
+#define V_PERR_U_RXDATA(x) ((x) << S_PERR_U_RXDATA)
+#define F_PERR_U_RXDATA    V_PERR_U_RXDATA(1U)
+
+#define S_PERR_UD_RXDATA    26
+#define V_PERR_UD_RXDATA(x) ((x) << S_PERR_UD_RXDATA)
+#define F_PERR_UD_RXDATA    V_PERR_UD_RXDATA(1U)
+
+#define S_PERR_UP_DATA    25
+#define V_PERR_UP_DATA(x) ((x) << S_PERR_UP_DATA)
+#define F_PERR_UP_DATA    V_PERR_UP_DATA(1U)
+
+#define S_PERR_CIM2SGE_RXDATA    24
+#define V_PERR_CIM2SGE_RXDATA(x) ((x) << S_PERR_CIM2SGE_RXDATA)
+#define F_PERR_CIM2SGE_RXDATA    V_PERR_CIM2SGE_RXDATA(1U)
+
+#define S_PERR_HINT_DELAY_FIFO1_T5    23
+#define V_PERR_HINT_DELAY_FIFO1_T5(x) ((x) << S_PERR_HINT_DELAY_FIFO1_T5)
+#define F_PERR_HINT_DELAY_FIFO1_T5    V_PERR_HINT_DELAY_FIFO1_T5(1U)
+
+#define S_PERR_HINT_DELAY_FIFO0_T5    22
+#define V_PERR_HINT_DELAY_FIFO0_T5(x) ((x) << S_PERR_HINT_DELAY_FIFO0_T5)
+#define F_PERR_HINT_DELAY_FIFO0_T5    V_PERR_HINT_DELAY_FIFO0_T5(1U)
+
+#define S_PERR_IMSG_PD_FIFO_T5    21
+#define V_PERR_IMSG_PD_FIFO_T5(x) ((x) << S_PERR_IMSG_PD_FIFO_T5)
+#define F_PERR_IMSG_PD_FIFO_T5    V_PERR_IMSG_PD_FIFO_T5(1U)
+
+#define S_PERR_ULPTX_FIFO1_T5    20
+#define V_PERR_ULPTX_FIFO1_T5(x) ((x) << S_PERR_ULPTX_FIFO1_T5)
+#define F_PERR_ULPTX_FIFO1_T5    V_PERR_ULPTX_FIFO1_T5(1U)
+
+#define S_PERR_ULPTX_FIFO0_T5    19
+#define V_PERR_ULPTX_FIFO0_T5(x) ((x) << S_PERR_ULPTX_FIFO0_T5)
+#define F_PERR_ULPTX_FIFO0_T5    V_PERR_ULPTX_FIFO0_T5(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO1_T5    18
+#define V_PERR_IDMA2IMSG_FIFO1_T5(x) ((x) << S_PERR_IDMA2IMSG_FIFO1_T5)
+#define F_PERR_IDMA2IMSG_FIFO1_T5    V_PERR_IDMA2IMSG_FIFO1_T5(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO0_T5    17
+#define V_PERR_IDMA2IMSG_FIFO0_T5(x) ((x) << S_PERR_IDMA2IMSG_FIFO0_T5)
+#define F_PERR_IDMA2IMSG_FIFO0_T5    V_PERR_IDMA2IMSG_FIFO0_T5(1U)
+
+#define S_PERR_POINTER_DATA_FIFO0    16
+#define V_PERR_POINTER_DATA_FIFO0(x) ((x) << S_PERR_POINTER_DATA_FIFO0)
+#define F_PERR_POINTER_DATA_FIFO0    V_PERR_POINTER_DATA_FIFO0(1U)
+
+#define S_PERR_POINTER_DATA_FIFO1    15
+#define V_PERR_POINTER_DATA_FIFO1(x) ((x) << S_PERR_POINTER_DATA_FIFO1)
+#define F_PERR_POINTER_DATA_FIFO1    V_PERR_POINTER_DATA_FIFO1(1U)
+
+#define S_PERR_POINTER_HDR_FIFO0    14
+#define V_PERR_POINTER_HDR_FIFO0(x) ((x) << S_PERR_POINTER_HDR_FIFO0)
+#define F_PERR_POINTER_HDR_FIFO0    V_PERR_POINTER_HDR_FIFO0(1U)
+
+#define S_PERR_POINTER_HDR_FIFO1    13
+#define V_PERR_POINTER_HDR_FIFO1(x) ((x) << S_PERR_POINTER_HDR_FIFO1)
+#define F_PERR_POINTER_HDR_FIFO1    V_PERR_POINTER_HDR_FIFO1(1U)
+
+#define S_PERR_PAYLOAD_FIFO0    12
+#define V_PERR_PAYLOAD_FIFO0(x) ((x) << S_PERR_PAYLOAD_FIFO0)
+#define F_PERR_PAYLOAD_FIFO0    V_PERR_PAYLOAD_FIFO0(1U)
+
+#define S_PERR_PAYLOAD_FIFO1    11
+#define V_PERR_PAYLOAD_FIFO1(x) ((x) << S_PERR_PAYLOAD_FIFO1)
+#define F_PERR_PAYLOAD_FIFO1    V_PERR_PAYLOAD_FIFO1(1U)
+
+#define S_PERR_EDMA_INPUT_FIFO3    10
+#define V_PERR_EDMA_INPUT_FIFO3(x) ((x) << S_PERR_EDMA_INPUT_FIFO3)
+#define F_PERR_EDMA_INPUT_FIFO3    V_PERR_EDMA_INPUT_FIFO3(1U)
+
+#define S_PERR_EDMA_INPUT_FIFO2    9
+#define V_PERR_EDMA_INPUT_FIFO2(x) ((x) << S_PERR_EDMA_INPUT_FIFO2)
+#define F_PERR_EDMA_INPUT_FIFO2    V_PERR_EDMA_INPUT_FIFO2(1U)
+
+#define S_PERR_EDMA_INPUT_FIFO1    8
+#define V_PERR_EDMA_INPUT_FIFO1(x) ((x) << S_PERR_EDMA_INPUT_FIFO1)
+#define F_PERR_EDMA_INPUT_FIFO1    V_PERR_EDMA_INPUT_FIFO1(1U)
+
+#define S_PERR_EDMA_INPUT_FIFO0    7
+#define V_PERR_EDMA_INPUT_FIFO0(x) ((x) << S_PERR_EDMA_INPUT_FIFO0)
+#define F_PERR_EDMA_INPUT_FIFO0    V_PERR_EDMA_INPUT_FIFO0(1U)
+
+#define S_PERR_MGT_BAR2_FIFO    6
+#define V_PERR_MGT_BAR2_FIFO(x) ((x) << S_PERR_MGT_BAR2_FIFO)
+#define F_PERR_MGT_BAR2_FIFO    V_PERR_MGT_BAR2_FIFO(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO1_T5    5
+#define V_PERR_HEADERSPLIT_FIFO1_T5(x) ((x) << S_PERR_HEADERSPLIT_FIFO1_T5)
+#define F_PERR_HEADERSPLIT_FIFO1_T5    V_PERR_HEADERSPLIT_FIFO1_T5(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO0_T5    4
+#define V_PERR_HEADERSPLIT_FIFO0_T5(x) ((x) << S_PERR_HEADERSPLIT_FIFO0_T5)
+#define F_PERR_HEADERSPLIT_FIFO0_T5    V_PERR_HEADERSPLIT_FIFO0_T5(1U)
+
+#define S_PERR_CIM_FIFO1    3
+#define V_PERR_CIM_FIFO1(x) ((x) << S_PERR_CIM_FIFO1)
+#define F_PERR_CIM_FIFO1    V_PERR_CIM_FIFO1(1U)
+
+#define S_PERR_CIM_FIFO0    2
+#define V_PERR_CIM_FIFO0(x) ((x) << S_PERR_CIM_FIFO0)
+#define F_PERR_CIM_FIFO0    V_PERR_CIM_FIFO0(1U)
+
+#define S_PERR_IDMA_SWITCH_OUTPUT_FIFO1    1
+#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO1(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO1)
+#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO1    V_PERR_IDMA_SWITCH_OUTPUT_FIFO1(1U)
+
+#define S_PERR_IDMA_SWITCH_OUTPUT_FIFO0    0
+#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO0)
+#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO0    V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(1U)
+
+#define A_SGE_INT_ENABLE5 0x1110
+#define A_SGE_PERR_ENABLE5 0x1114
+#define A_SGE_DBFIFO_STATUS2 0x1118
+
+#define S_FL_INT_THRESH    24
+#define M_FL_INT_THRESH    0xfU
+#define V_FL_INT_THRESH(x) ((x) << S_FL_INT_THRESH)
+#define G_FL_INT_THRESH(x) (((x) >> S_FL_INT_THRESH) & M_FL_INT_THRESH)
+
+#define S_FL_COUNT    14
+#define M_FL_COUNT    0x3ffU
+#define V_FL_COUNT(x) ((x) << S_FL_COUNT)
+#define G_FL_COUNT(x) (((x) >> S_FL_COUNT) & M_FL_COUNT)
+
+#define S_HP_INT_THRESH_T5    10
+#define M_HP_INT_THRESH_T5    0xfU
+#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
+#define G_HP_INT_THRESH_T5(x) (((x) >> S_HP_INT_THRESH_T5) & M_HP_INT_THRESH_T5)
+
+#define S_HP_COUNT_T5    0
+#define M_HP_COUNT_T5    0x3ffU
+#define V_HP_COUNT_T5(x) ((x) << S_HP_COUNT_T5)
+#define G_HP_COUNT_T5(x) (((x) >> S_HP_COUNT_T5) & M_HP_COUNT_T5)
+
+#define A_SGE_FETCH_BURST_MAX_0_AND_1 0x111c
+
+#define S_FETCHBURSTMAX0    16
+#define M_FETCHBURSTMAX0    0x3ffU
+#define V_FETCHBURSTMAX0(x) ((x) << S_FETCHBURSTMAX0)
+#define G_FETCHBURSTMAX0(x) (((x) >> S_FETCHBURSTMAX0) & M_FETCHBURSTMAX0)
+
+#define S_FETCHBURSTMAX1    0
+#define M_FETCHBURSTMAX1    0x3ffU
+#define V_FETCHBURSTMAX1(x) ((x) << S_FETCHBURSTMAX1)
+#define G_FETCHBURSTMAX1(x) (((x) >> S_FETCHBURSTMAX1) & M_FETCHBURSTMAX1)
+
+#define A_SGE_FETCH_BURST_MAX_2_AND_3 0x1120
+
+#define S_FETCHBURSTMAX2    16
+#define M_FETCHBURSTMAX2    0x3ffU
+#define V_FETCHBURSTMAX2(x) ((x) << S_FETCHBURSTMAX2)
+#define G_FETCHBURSTMAX2(x) (((x) >> S_FETCHBURSTMAX2) & M_FETCHBURSTMAX2)
+
+#define S_FETCHBURSTMAX3    0
+#define M_FETCHBURSTMAX3    0x3ffU
+#define V_FETCHBURSTMAX3(x) ((x) << S_FETCHBURSTMAX3)
+#define G_FETCHBURSTMAX3(x) (((x) >> S_FETCHBURSTMAX3) & M_FETCHBURSTMAX3)
+
+#define A_SGE_CONTROL2 0x1124
+
+#define S_UPFLCUTOFFDIS    21
+#define V_UPFLCUTOFFDIS(x) ((x) << S_UPFLCUTOFFDIS)
+#define F_UPFLCUTOFFDIS    V_UPFLCUTOFFDIS(1U)
+
+#define S_RXCPLSIZEAUTOCORRECT    20
+#define V_RXCPLSIZEAUTOCORRECT(x) ((x) << S_RXCPLSIZEAUTOCORRECT)
+#define F_RXCPLSIZEAUTOCORRECT    V_RXCPLSIZEAUTOCORRECT(1U)
+
+#define S_IDMAARBROUNDROBIN    19
+#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN)
+#define F_IDMAARBROUNDROBIN    V_IDMAARBROUNDROBIN(1U)
+
+#define S_INGPACKBOUNDARY    16
+#define M_INGPACKBOUNDARY    0x7U
+#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY)
+#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY)
+
+#define S_CGEN_EGRESS_CONTEXT    15
+#define V_CGEN_EGRESS_CONTEXT(x) ((x) << S_CGEN_EGRESS_CONTEXT)
+#define F_CGEN_EGRESS_CONTEXT    V_CGEN_EGRESS_CONTEXT(1U)
+
+#define S_CGEN_INGRESS_CONTEXT    14
+#define V_CGEN_INGRESS_CONTEXT(x) ((x) << S_CGEN_INGRESS_CONTEXT)
+#define F_CGEN_INGRESS_CONTEXT    V_CGEN_INGRESS_CONTEXT(1U)
+
+#define S_CGEN_IDMA    13
+#define V_CGEN_IDMA(x) ((x) << S_CGEN_IDMA)
+#define F_CGEN_IDMA    V_CGEN_IDMA(1U)
+
+#define S_CGEN_DBP    12
+#define V_CGEN_DBP(x) ((x) << S_CGEN_DBP)
+#define F_CGEN_DBP    V_CGEN_DBP(1U)
+
+#define S_CGEN_EDMA    11
+#define V_CGEN_EDMA(x) ((x) << S_CGEN_EDMA)
+#define F_CGEN_EDMA    V_CGEN_EDMA(1U)
+
+#define S_VFIFO_ENABLE    10
+#define V_VFIFO_ENABLE(x) ((x) << S_VFIFO_ENABLE)
+#define F_VFIFO_ENABLE    V_VFIFO_ENABLE(1U)
+
+#define S_FLM_RESCHEDULE_MODE    9
+#define V_FLM_RESCHEDULE_MODE(x) ((x) << S_FLM_RESCHEDULE_MODE)
+#define F_FLM_RESCHEDULE_MODE    V_FLM_RESCHEDULE_MODE(1U)
+
+#define S_HINTDEPTHCTLFL    4
+#define M_HINTDEPTHCTLFL    0x1fU
+#define V_HINTDEPTHCTLFL(x) ((x) << S_HINTDEPTHCTLFL)
+#define G_HINTDEPTHCTLFL(x) (((x) >> S_HINTDEPTHCTLFL) & M_HINTDEPTHCTLFL)
+
+#define S_FORCE_ORDERING    3
+#define V_FORCE_ORDERING(x) ((x) << S_FORCE_ORDERING)
+#define F_FORCE_ORDERING    V_FORCE_ORDERING(1U)
+
+#define S_TX_COALESCE_SIZE    2
+#define V_TX_COALESCE_SIZE(x) ((x) << S_TX_COALESCE_SIZE)
+#define F_TX_COALESCE_SIZE    V_TX_COALESCE_SIZE(1U)
+
+#define S_COAL_STRICT_CIM_PRI    1
+#define V_COAL_STRICT_CIM_PRI(x) ((x) << S_COAL_STRICT_CIM_PRI)
+#define F_COAL_STRICT_CIM_PRI    V_COAL_STRICT_CIM_PRI(1U)
+
+#define S_TX_COALESCE_PRI    0
+#define V_TX_COALESCE_PRI(x) ((x) << S_TX_COALESCE_PRI)
+#define F_TX_COALESCE_PRI    V_TX_COALESCE_PRI(1U)
+
+#define A_SGE_DEEP_SLEEP 0x1128
+
+#define S_IDMA1_SLEEP_STATUS    11
+#define V_IDMA1_SLEEP_STATUS(x) ((x) << S_IDMA1_SLEEP_STATUS)
+#define F_IDMA1_SLEEP_STATUS    V_IDMA1_SLEEP_STATUS(1U)
+
+#define S_IDMA0_SLEEP_STATUS    10
+#define V_IDMA0_SLEEP_STATUS(x) ((x) << S_IDMA0_SLEEP_STATUS)
+#define F_IDMA0_SLEEP_STATUS    V_IDMA0_SLEEP_STATUS(1U)
+
+#define S_IDMA1_SLEEP_REQ    9
+#define V_IDMA1_SLEEP_REQ(x) ((x) << S_IDMA1_SLEEP_REQ)
+#define F_IDMA1_SLEEP_REQ    V_IDMA1_SLEEP_REQ(1U)
+
+#define S_IDMA0_SLEEP_REQ    8
+#define V_IDMA0_SLEEP_REQ(x) ((x) << S_IDMA0_SLEEP_REQ)
+#define F_IDMA0_SLEEP_REQ    V_IDMA0_SLEEP_REQ(1U)
+
+#define S_EDMA3_SLEEP_STATUS    7
+#define V_EDMA3_SLEEP_STATUS(x) ((x) << S_EDMA3_SLEEP_STATUS)
+#define F_EDMA3_SLEEP_STATUS    V_EDMA3_SLEEP_STATUS(1U)
+
+#define S_EDMA2_SLEEP_STATUS    6
+#define V_EDMA2_SLEEP_STATUS(x) ((x) << S_EDMA2_SLEEP_STATUS)
+#define F_EDMA2_SLEEP_STATUS    V_EDMA2_SLEEP_STATUS(1U)
+
+#define S_EDMA1_SLEEP_STATUS    5
+#define V_EDMA1_SLEEP_STATUS(x) ((x) << S_EDMA1_SLEEP_STATUS)
+#define F_EDMA1_SLEEP_STATUS    V_EDMA1_SLEEP_STATUS(1U)
+
+#define S_EDMA0_SLEEP_STATUS    4
+#define V_EDMA0_SLEEP_STATUS(x) ((x) << S_EDMA0_SLEEP_STATUS)
+#define F_EDMA0_SLEEP_STATUS    V_EDMA0_SLEEP_STATUS(1U)
+
+#define S_EDMA3_SLEEP_REQ    3
+#define V_EDMA3_SLEEP_REQ(x) ((x) << S_EDMA3_SLEEP_REQ)
+#define F_EDMA3_SLEEP_REQ    V_EDMA3_SLEEP_REQ(1U)
+
+#define S_EDMA2_SLEEP_REQ    2
+#define V_EDMA2_SLEEP_REQ(x) ((x) << S_EDMA2_SLEEP_REQ)
+#define F_EDMA2_SLEEP_REQ    V_EDMA2_SLEEP_REQ(1U)
+
+#define S_EDMA1_SLEEP_REQ    1
+#define V_EDMA1_SLEEP_REQ(x) ((x) << S_EDMA1_SLEEP_REQ)
+#define F_EDMA1_SLEEP_REQ    V_EDMA1_SLEEP_REQ(1U)
+
+#define S_EDMA0_SLEEP_REQ    0
+#define V_EDMA0_SLEEP_REQ(x) ((x) << S_EDMA0_SLEEP_REQ)
+#define F_EDMA0_SLEEP_REQ    V_EDMA0_SLEEP_REQ(1U)
+
+#define A_SGE_INT_CAUSE6 0x1128
+
+#define S_ERR_DB_SYNC    21
+#define V_ERR_DB_SYNC(x) ((x) << S_ERR_DB_SYNC)
+#define F_ERR_DB_SYNC    V_ERR_DB_SYNC(1U)
+
+#define S_ERR_GTS_SYNC    20
+#define V_ERR_GTS_SYNC(x) ((x) << S_ERR_GTS_SYNC)
+#define F_ERR_GTS_SYNC    V_ERR_GTS_SYNC(1U)
+
+#define S_FATAL_LARGE_COAL    19
+#define V_FATAL_LARGE_COAL(x) ((x) << S_FATAL_LARGE_COAL)
+#define F_FATAL_LARGE_COAL    V_FATAL_LARGE_COAL(1U)
+
+#define S_PL_BAR2_FRM_ERR    18
+#define V_PL_BAR2_FRM_ERR(x) ((x) << S_PL_BAR2_FRM_ERR)
+#define F_PL_BAR2_FRM_ERR    V_PL_BAR2_FRM_ERR(1U)
+
+#define S_SILENT_DROP_TX_COAL    17
+#define V_SILENT_DROP_TX_COAL(x) ((x) << S_SILENT_DROP_TX_COAL)
+#define F_SILENT_DROP_TX_COAL    V_SILENT_DROP_TX_COAL(1U)
+
+#define S_ERR_INV_CTXT4    16
+#define V_ERR_INV_CTXT4(x) ((x) << S_ERR_INV_CTXT4)
+#define F_ERR_INV_CTXT4    V_ERR_INV_CTXT4(1U)
+
+#define S_ERR_BAD_DB_PIDX4    15
+#define V_ERR_BAD_DB_PIDX4(x) ((x) << S_ERR_BAD_DB_PIDX4)
+#define F_ERR_BAD_DB_PIDX4    V_ERR_BAD_DB_PIDX4(1U)
+
+#define S_ERR_BAD_UPFL_INC_CREDIT4    14
+#define V_ERR_BAD_UPFL_INC_CREDIT4(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT4)
+#define F_ERR_BAD_UPFL_INC_CREDIT4    V_ERR_BAD_UPFL_INC_CREDIT4(1U)
+
+#define S_FATAL_TAG_MISMATCH    13
+#define V_FATAL_TAG_MISMATCH(x) ((x) << S_FATAL_TAG_MISMATCH)
+#define F_FATAL_TAG_MISMATCH    V_FATAL_TAG_MISMATCH(1U)
+
+#define S_FATAL_ENQ_CTL_RDY    12
+#define V_FATAL_ENQ_CTL_RDY(x) ((x) << S_FATAL_ENQ_CTL_RDY)
+#define F_FATAL_ENQ_CTL_RDY    V_FATAL_ENQ_CTL_RDY(1U)
+
+#define S_ERR_PC_RSP_LEN3    11
+#define V_ERR_PC_RSP_LEN3(x) ((x) << S_ERR_PC_RSP_LEN3)
+#define F_ERR_PC_RSP_LEN3    V_ERR_PC_RSP_LEN3(1U)
+
+#define S_ERR_PC_RSP_LEN2    10
+#define V_ERR_PC_RSP_LEN2(x) ((x) << S_ERR_PC_RSP_LEN2)
+#define F_ERR_PC_RSP_LEN2    V_ERR_PC_RSP_LEN2(1U)
+
+#define S_ERR_PC_RSP_LEN1    9
+#define V_ERR_PC_RSP_LEN1(x) ((x) << S_ERR_PC_RSP_LEN1)
+#define F_ERR_PC_RSP_LEN1    V_ERR_PC_RSP_LEN1(1U)
+
+#define S_ERR_PC_RSP_LEN0    8
+#define V_ERR_PC_RSP_LEN0(x) ((x) << S_ERR_PC_RSP_LEN0)
+#define F_ERR_PC_RSP_LEN0    V_ERR_PC_RSP_LEN0(1U)
+
+#define S_FATAL_ENQ2LL_VLD    7
+#define V_FATAL_ENQ2LL_VLD(x) ((x) << S_FATAL_ENQ2LL_VLD)
+#define F_FATAL_ENQ2LL_VLD    V_FATAL_ENQ2LL_VLD(1U)
+
+#define S_FATAL_LL_EMPTY    6
+#define V_FATAL_LL_EMPTY(x) ((x) << S_FATAL_LL_EMPTY)
+#define F_FATAL_LL_EMPTY    V_FATAL_LL_EMPTY(1U)
+
+#define S_FATAL_OFF_WDENQ    5
+#define V_FATAL_OFF_WDENQ(x) ((x) << S_FATAL_OFF_WDENQ)
+#define F_FATAL_OFF_WDENQ    V_FATAL_OFF_WDENQ(1U)
+
+#define S_FATAL_DEQ_DRDY    3
+#define M_FATAL_DEQ_DRDY    0x3U
+#define V_FATAL_DEQ_DRDY(x) ((x) << S_FATAL_DEQ_DRDY)
+#define G_FATAL_DEQ_DRDY(x) (((x) >> S_FATAL_DEQ_DRDY) & M_FATAL_DEQ_DRDY)
+
+#define S_FATAL_OUTP_DRDY    1
+#define M_FATAL_OUTP_DRDY    0x3U
+#define V_FATAL_OUTP_DRDY(x) ((x) << S_FATAL_OUTP_DRDY)
+#define G_FATAL_OUTP_DRDY(x) (((x) >> S_FATAL_OUTP_DRDY) & M_FATAL_OUTP_DRDY)
+
+#define S_FATAL_DEQ    0
+#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ)
+#define F_FATAL_DEQ    V_FATAL_DEQ(1U)
+
+#define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c
+
+#define S_THROTTLE_THRESHOLD_FL    16
+#define M_THROTTLE_THRESHOLD_FL    0xfU
+#define V_THROTTLE_THRESHOLD_FL(x) ((x) << S_THROTTLE_THRESHOLD_FL)
+#define G_THROTTLE_THRESHOLD_FL(x) (((x) >> S_THROTTLE_THRESHOLD_FL) & M_THROTTLE_THRESHOLD_FL)
+
+#define S_THROTTLE_THRESHOLD_HP    12
+#define M_THROTTLE_THRESHOLD_HP    0xfU
+#define V_THROTTLE_THRESHOLD_HP(x) ((x) << S_THROTTLE_THRESHOLD_HP)
+#define G_THROTTLE_THRESHOLD_HP(x) (((x) >> S_THROTTLE_THRESHOLD_HP) & M_THROTTLE_THRESHOLD_HP)
+
+#define S_THROTTLE_THRESHOLD_LP    0
+#define M_THROTTLE_THRESHOLD_LP    0xfffU
+#define V_THROTTLE_THRESHOLD_LP(x) ((x) << S_THROTTLE_THRESHOLD_LP)
+#define G_THROTTLE_THRESHOLD_LP(x) (((x) >> S_THROTTLE_THRESHOLD_LP) & M_THROTTLE_THRESHOLD_LP)
+
+#define A_SGE_INT_ENABLE6 0x112c
+#define A_SGE_DBP_FETCH_THRESHOLD 0x1130
+
+#define S_DBP_FETCH_THRESHOLD_FL    21
+#define M_DBP_FETCH_THRESHOLD_FL    0xfU
+#define V_DBP_FETCH_THRESHOLD_FL(x) ((x) << S_DBP_FETCH_THRESHOLD_FL)
+#define G_DBP_FETCH_THRESHOLD_FL(x) (((x) >> S_DBP_FETCH_THRESHOLD_FL) & M_DBP_FETCH_THRESHOLD_FL)
+
+#define S_DBP_FETCH_THRESHOLD_HP    17
+#define M_DBP_FETCH_THRESHOLD_HP    0xfU
+#define V_DBP_FETCH_THRESHOLD_HP(x) ((x) << S_DBP_FETCH_THRESHOLD_HP)
+#define G_DBP_FETCH_THRESHOLD_HP(x) (((x) >> S_DBP_FETCH_THRESHOLD_HP) & M_DBP_FETCH_THRESHOLD_HP)
+
+#define S_DBP_FETCH_THRESHOLD_LP    5
+#define M_DBP_FETCH_THRESHOLD_LP    0xfffU
+#define V_DBP_FETCH_THRESHOLD_LP(x) ((x) << S_DBP_FETCH_THRESHOLD_LP)
+#define G_DBP_FETCH_THRESHOLD_LP(x) (((x) >> S_DBP_FETCH_THRESHOLD_LP) & M_DBP_FETCH_THRESHOLD_LP)
+
+#define S_DBP_FETCH_THRESHOLD_MODE    4
+#define V_DBP_FETCH_THRESHOLD_MODE(x) ((x) << S_DBP_FETCH_THRESHOLD_MODE)
+#define F_DBP_FETCH_THRESHOLD_MODE    V_DBP_FETCH_THRESHOLD_MODE(1U)
+
+#define S_DBP_FETCH_THRESHOLD_EN3    3
+#define V_DBP_FETCH_THRESHOLD_EN3(x) ((x) << S_DBP_FETCH_THRESHOLD_EN3)
+#define F_DBP_FETCH_THRESHOLD_EN3    V_DBP_FETCH_THRESHOLD_EN3(1U)
+
+#define S_DBP_FETCH_THRESHOLD_EN2    2
+#define V_DBP_FETCH_THRESHOLD_EN2(x) ((x) << S_DBP_FETCH_THRESHOLD_EN2)
+#define F_DBP_FETCH_THRESHOLD_EN2    V_DBP_FETCH_THRESHOLD_EN2(1U)
+
+#define S_DBP_FETCH_THRESHOLD_EN1    1
+#define V_DBP_FETCH_THRESHOLD_EN1(x) ((x) << S_DBP_FETCH_THRESHOLD_EN1)
+#define F_DBP_FETCH_THRESHOLD_EN1    V_DBP_FETCH_THRESHOLD_EN1(1U)
+
+#define S_DBP_FETCH_THRESHOLD_EN0    0
+#define V_DBP_FETCH_THRESHOLD_EN0(x) ((x) << S_DBP_FETCH_THRESHOLD_EN0)
+#define F_DBP_FETCH_THRESHOLD_EN0    V_DBP_FETCH_THRESHOLD_EN0(1U)
+
+#define A_SGE_DBP_FETCH_THRESHOLD_QUEUE 0x1134
+
+#define S_DBP_FETCH_THRESHOLD_IQ1    16
+#define M_DBP_FETCH_THRESHOLD_IQ1    0xffffU
+#define V_DBP_FETCH_THRESHOLD_IQ1(x) ((x) << S_DBP_FETCH_THRESHOLD_IQ1)
+#define G_DBP_FETCH_THRESHOLD_IQ1(x) (((x) >> S_DBP_FETCH_THRESHOLD_IQ1) & M_DBP_FETCH_THRESHOLD_IQ1)
+
+#define S_DBP_FETCH_THRESHOLD_IQ0    0
+#define M_DBP_FETCH_THRESHOLD_IQ0    0xffffU
+#define V_DBP_FETCH_THRESHOLD_IQ0(x) ((x) << S_DBP_FETCH_THRESHOLD_IQ0)
+#define G_DBP_FETCH_THRESHOLD_IQ0(x) (((x) >> S_DBP_FETCH_THRESHOLD_IQ0) & M_DBP_FETCH_THRESHOLD_IQ0)
+
+#define A_SGE_DBVFIFO_BADDR 0x1138
+#define A_SGE_DBVFIFO_SIZE 0x113c
+
+#define S_DBVFIFO_SIZE    6
+#define M_DBVFIFO_SIZE    0xfffU
+#define V_DBVFIFO_SIZE(x) ((x) << S_DBVFIFO_SIZE)
+#define G_DBVFIFO_SIZE(x) (((x) >> S_DBVFIFO_SIZE) & M_DBVFIFO_SIZE)
+
+#define S_T6_DBVFIFO_SIZE    0
+#define M_T6_DBVFIFO_SIZE    0x1fffU
+#define V_T6_DBVFIFO_SIZE(x) ((x) << S_T6_DBVFIFO_SIZE)
+#define G_T6_DBVFIFO_SIZE(x) (((x) >> S_T6_DBVFIFO_SIZE) & M_T6_DBVFIFO_SIZE)
+
+#define A_SGE_DBFIFO_STATUS3 0x1140
+
+#define S_LP_PTRS_EQUAL    21
+#define V_LP_PTRS_EQUAL(x) ((x) << S_LP_PTRS_EQUAL)
+#define F_LP_PTRS_EQUAL    V_LP_PTRS_EQUAL(1U)
+
+#define S_LP_SNAPHOT    20
+#define V_LP_SNAPHOT(x) ((x) << S_LP_SNAPHOT)
+#define F_LP_SNAPHOT    V_LP_SNAPHOT(1U)
+
+#define S_FL_INT_THRESH_LOW    16
+#define M_FL_INT_THRESH_LOW    0xfU
+#define V_FL_INT_THRESH_LOW(x) ((x) << S_FL_INT_THRESH_LOW)
+#define G_FL_INT_THRESH_LOW(x) (((x) >> S_FL_INT_THRESH_LOW) & M_FL_INT_THRESH_LOW)
+
+#define S_HP_INT_THRESH_LOW    12
+#define M_HP_INT_THRESH_LOW    0xfU
+#define V_HP_INT_THRESH_LOW(x) ((x) << S_HP_INT_THRESH_LOW)
+#define G_HP_INT_THRESH_LOW(x) (((x) >> S_HP_INT_THRESH_LOW) & M_HP_INT_THRESH_LOW)
+
+#define S_LP_INT_THRESH_LOW    0
+#define M_LP_INT_THRESH_LOW    0xfffU
+#define V_LP_INT_THRESH_LOW(x) ((x) << S_LP_INT_THRESH_LOW)
+#define G_LP_INT_THRESH_LOW(x) (((x) >> S_LP_INT_THRESH_LOW) & M_LP_INT_THRESH_LOW)
+
+#define A_SGE_CHANGESET 0x1144
+#define A_SGE_PC_RSP_ERROR 0x1148
+#define A_SGE_TBUF_CONTROL 0x114c
+
+#define S_DBPTBUFRSV1    9
+#define M_DBPTBUFRSV1    0x1ffU
+#define V_DBPTBUFRSV1(x) ((x) << S_DBPTBUFRSV1)
+#define G_DBPTBUFRSV1(x) (((x) >> S_DBPTBUFRSV1) & M_DBPTBUFRSV1)
+
+#define S_DBPTBUFRSV0    0
+#define M_DBPTBUFRSV0    0x1ffU
+#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0)
+#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0)
+
 #define A_SGE_PC0_REQ_BIST_CMD 0x1180
 #define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
 #define A_SGE_PC1_REQ_BIST_CMD 0x1190
@@ -1446,6 +2658,1203 @@
 #define A_SGE_CTXT_MASK5 0x1234
 #define A_SGE_CTXT_MASK6 0x1238
 #define A_SGE_CTXT_MASK7 0x123c
+#define A_SGE_QBASE_MAP0 0x1240
+
+#define S_EGRESS0_SIZE    24
+#define M_EGRESS0_SIZE    0x1fU
+#define V_EGRESS0_SIZE(x) ((x) << S_EGRESS0_SIZE)
+#define G_EGRESS0_SIZE(x) (((x) >> S_EGRESS0_SIZE) & M_EGRESS0_SIZE)
+
+#define S_EGRESS1_SIZE    16
+#define M_EGRESS1_SIZE    0x1fU
+#define V_EGRESS1_SIZE(x) ((x) << S_EGRESS1_SIZE)
+#define G_EGRESS1_SIZE(x) (((x) >> S_EGRESS1_SIZE) & M_EGRESS1_SIZE)
+
+#define S_INGRESS0_SIZE    8
+#define M_INGRESS0_SIZE    0x1fU
+#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE)
+#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE)
+
+#define A_SGE_QBASE_MAP1 0x1244
+
+#define S_EGRESS0_BASE    0
+#define M_EGRESS0_BASE    0x1ffffU
+#define V_EGRESS0_BASE(x) ((x) << S_EGRESS0_BASE)
+#define G_EGRESS0_BASE(x) (((x) >> S_EGRESS0_BASE) & M_EGRESS0_BASE)
+
+#define A_SGE_QBASE_MAP2 0x1248
+
+#define S_EGRESS1_BASE    0
+#define M_EGRESS1_BASE    0x1ffffU
+#define V_EGRESS1_BASE(x) ((x) << S_EGRESS1_BASE)
+#define G_EGRESS1_BASE(x) (((x) >> S_EGRESS1_BASE) & M_EGRESS1_BASE)
+
+#define A_SGE_QBASE_MAP3 0x124c
+
+#define S_INGRESS1_BASE_256VF    16
+#define M_INGRESS1_BASE_256VF    0xffffU
+#define V_INGRESS1_BASE_256VF(x) ((x) << S_INGRESS1_BASE_256VF)
+#define G_INGRESS1_BASE_256VF(x) (((x) >> S_INGRESS1_BASE_256VF) & M_INGRESS1_BASE_256VF)
+
+#define S_INGRESS0_BASE    0
+#define M_INGRESS0_BASE    0xffffU
+#define V_INGRESS0_BASE(x) ((x) << S_INGRESS0_BASE)
+#define G_INGRESS0_BASE(x) (((x) >> S_INGRESS0_BASE) & M_INGRESS0_BASE)
+
+#define A_SGE_QBASE_INDEX 0x1250
+
+#define S_QIDX    0
+#define M_QIDX    0x1ffU
+#define V_QIDX(x) ((x) << S_QIDX)
+#define G_QIDX(x) (((x) >> S_QIDX) & M_QIDX)
+
+#define A_SGE_CONM_CTRL2 0x1254
+
+#define S_FLMTHRESHPACK    8
+#define M_FLMTHRESHPACK    0x7fU
+#define V_FLMTHRESHPACK(x) ((x) << S_FLMTHRESHPACK)
+#define G_FLMTHRESHPACK(x) (((x) >> S_FLMTHRESHPACK) & M_FLMTHRESHPACK)
+
+#define S_FLMTHRESH    0
+#define M_FLMTHRESH    0x7fU
+#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH)
+#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH)
+
+#define A_SGE_DEBUG_CONM 0x1258
+
+#define S_MPS_CH_CNG    16
+#define M_MPS_CH_CNG    0xffffU
+#define V_MPS_CH_CNG(x) ((x) << S_MPS_CH_CNG)
+#define G_MPS_CH_CNG(x) (((x) >> S_MPS_CH_CNG) & M_MPS_CH_CNG)
+
+#define S_TP_CH_CNG    14
+#define M_TP_CH_CNG    0x3U
+#define V_TP_CH_CNG(x) ((x) << S_TP_CH_CNG)
+#define G_TP_CH_CNG(x) (((x) >> S_TP_CH_CNG) & M_TP_CH_CNG)
+
+#define S_ST_CONG    12
+#define M_ST_CONG    0x3U
+#define V_ST_CONG(x) ((x) << S_ST_CONG)
+#define G_ST_CONG(x) (((x) >> S_ST_CONG) & M_ST_CONG)
+
+#define S_LAST_XOFF    10
+#define V_LAST_XOFF(x) ((x) << S_LAST_XOFF)
+#define F_LAST_XOFF    V_LAST_XOFF(1U)
+
+#define S_LAST_QID    0
+#define M_LAST_QID    0x3ffU
+#define V_LAST_QID(x) ((x) << S_LAST_QID)
+#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID)
+
+#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c
+
+#define S_IMSG_GTS_SEL    18
+#define V_IMSG_GTS_SEL(x) ((x) << S_IMSG_GTS_SEL)
+#define F_IMSG_GTS_SEL    V_IMSG_GTS_SEL(1U)
+
+#define S_MGT_SEL    17
+#define V_MGT_SEL(x) ((x) << S_MGT_SEL)
+#define F_MGT_SEL    V_MGT_SEL(1U)
+
+#define S_DB_GTS_QID    0
+#define M_DB_GTS_QID    0x1ffffU
+#define V_DB_GTS_QID(x) ((x) << S_DB_GTS_QID)
+#define G_DB_GTS_QID(x) (((x) >> S_DB_GTS_QID) & M_DB_GTS_QID)
+
+#define A_SGE_DBG_QUEUE_STAT1_CTRL 0x1260
+#define A_SGE_DBG_QUEUE_STAT0 0x1264
+#define A_SGE_DBG_QUEUE_STAT1 0x1268
+#define A_SGE_DBG_BAR2_PKT_CNT 0x126c
+#define A_SGE_DBG_DB_PKT_CNT 0x1270
+#define A_SGE_DBG_GTS_PKT_CNT 0x1274
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280
+
+#define S_CIM_WM    24
+#define M_CIM_WM    0x3U
+#define V_CIM_WM(x) ((x) << S_CIM_WM)
+#define G_CIM_WM(x) (((x) >> S_CIM_WM) & M_CIM_WM)
+
+#define S_DEBUG_UP_SOP_CNT    20
+#define M_DEBUG_UP_SOP_CNT    0xfU
+#define V_DEBUG_UP_SOP_CNT(x) ((x) << S_DEBUG_UP_SOP_CNT)
+#define G_DEBUG_UP_SOP_CNT(x) (((x) >> S_DEBUG_UP_SOP_CNT) & M_DEBUG_UP_SOP_CNT)
+
+#define S_DEBUG_UP_EOP_CNT    16
+#define M_DEBUG_UP_EOP_CNT    0xfU
+#define V_DEBUG_UP_EOP_CNT(x) ((x) << S_DEBUG_UP_EOP_CNT)
+#define G_DEBUG_UP_EOP_CNT(x) (((x) >> S_DEBUG_UP_EOP_CNT) & M_DEBUG_UP_EOP_CNT)
+
+#define S_DEBUG_CIM_SOP1_CNT    12
+#define M_DEBUG_CIM_SOP1_CNT    0xfU
+#define V_DEBUG_CIM_SOP1_CNT(x) ((x) << S_DEBUG_CIM_SOP1_CNT)
+#define G_DEBUG_CIM_SOP1_CNT(x) (((x) >> S_DEBUG_CIM_SOP1_CNT) & M_DEBUG_CIM_SOP1_CNT)
+
+#define S_DEBUG_CIM_EOP1_CNT    8
+#define M_DEBUG_CIM_EOP1_CNT    0xfU
+#define V_DEBUG_CIM_EOP1_CNT(x) ((x) << S_DEBUG_CIM_EOP1_CNT)
+#define G_DEBUG_CIM_EOP1_CNT(x) (((x) >> S_DEBUG_CIM_EOP1_CNT) & M_DEBUG_CIM_EOP1_CNT)
+
+#define S_DEBUG_CIM_SOP0_CNT    4
+#define M_DEBUG_CIM_SOP0_CNT    0xfU
+#define V_DEBUG_CIM_SOP0_CNT(x) ((x) << S_DEBUG_CIM_SOP0_CNT)
+#define G_DEBUG_CIM_SOP0_CNT(x) (((x) >> S_DEBUG_CIM_SOP0_CNT) & M_DEBUG_CIM_SOP0_CNT)
+
+#define S_DEBUG_CIM_EOP0_CNT    0
+#define M_DEBUG_CIM_EOP0_CNT    0xfU
+#define V_DEBUG_CIM_EOP0_CNT(x) ((x) << S_DEBUG_CIM_EOP0_CNT)
+#define G_DEBUG_CIM_EOP0_CNT(x) (((x) >> S_DEBUG_CIM_EOP0_CNT) & M_DEBUG_CIM_EOP0_CNT)
+
+#define S_DEBUG_BAR2_SOP_CNT    28
+#define M_DEBUG_BAR2_SOP_CNT    0xfU
+#define V_DEBUG_BAR2_SOP_CNT(x) ((x) << S_DEBUG_BAR2_SOP_CNT)
+#define G_DEBUG_BAR2_SOP_CNT(x) (((x) >> S_DEBUG_BAR2_SOP_CNT) & M_DEBUG_BAR2_SOP_CNT)
+
+#define S_DEBUG_BAR2_EOP_CNT    24
+#define M_DEBUG_BAR2_EOP_CNT    0xfU
+#define V_DEBUG_BAR2_EOP_CNT(x) ((x) << S_DEBUG_BAR2_EOP_CNT)
+#define G_DEBUG_BAR2_EOP_CNT(x) (((x) >> S_DEBUG_BAR2_EOP_CNT) & M_DEBUG_BAR2_EOP_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_1 0x1284
+
+#define S_DEBUG_T_RX_SOP1_CNT    28
+#define M_DEBUG_T_RX_SOP1_CNT    0xfU
+#define V_DEBUG_T_RX_SOP1_CNT(x) ((x) << S_DEBUG_T_RX_SOP1_CNT)
+#define G_DEBUG_T_RX_SOP1_CNT(x) (((x) >> S_DEBUG_T_RX_SOP1_CNT) & M_DEBUG_T_RX_SOP1_CNT)
+
+#define S_DEBUG_T_RX_EOP1_CNT    24
+#define M_DEBUG_T_RX_EOP1_CNT    0xfU
+#define V_DEBUG_T_RX_EOP1_CNT(x) ((x) << S_DEBUG_T_RX_EOP1_CNT)
+#define G_DEBUG_T_RX_EOP1_CNT(x) (((x) >> S_DEBUG_T_RX_EOP1_CNT) & M_DEBUG_T_RX_EOP1_CNT)
+
+#define S_DEBUG_T_RX_SOP0_CNT    20
+#define M_DEBUG_T_RX_SOP0_CNT    0xfU
+#define V_DEBUG_T_RX_SOP0_CNT(x) ((x) << S_DEBUG_T_RX_SOP0_CNT)
+#define G_DEBUG_T_RX_SOP0_CNT(x) (((x) >> S_DEBUG_T_RX_SOP0_CNT) & M_DEBUG_T_RX_SOP0_CNT)
+
+#define S_DEBUG_T_RX_EOP0_CNT    16
+#define M_DEBUG_T_RX_EOP0_CNT    0xfU
+#define V_DEBUG_T_RX_EOP0_CNT(x) ((x) << S_DEBUG_T_RX_EOP0_CNT)
+#define G_DEBUG_T_RX_EOP0_CNT(x) (((x) >> S_DEBUG_T_RX_EOP0_CNT) & M_DEBUG_T_RX_EOP0_CNT)
+
+#define S_DEBUG_U_RX_SOP1_CNT    12
+#define M_DEBUG_U_RX_SOP1_CNT    0xfU
+#define V_DEBUG_U_RX_SOP1_CNT(x) ((x) << S_DEBUG_U_RX_SOP1_CNT)
+#define G_DEBUG_U_RX_SOP1_CNT(x) (((x) >> S_DEBUG_U_RX_SOP1_CNT) & M_DEBUG_U_RX_SOP1_CNT)
+
+#define S_DEBUG_U_RX_EOP1_CNT    8
+#define M_DEBUG_U_RX_EOP1_CNT    0xfU
+#define V_DEBUG_U_RX_EOP1_CNT(x) ((x) << S_DEBUG_U_RX_EOP1_CNT)
+#define G_DEBUG_U_RX_EOP1_CNT(x) (((x) >> S_DEBUG_U_RX_EOP1_CNT) & M_DEBUG_U_RX_EOP1_CNT)
+
+#define S_DEBUG_U_RX_SOP0_CNT    4
+#define M_DEBUG_U_RX_SOP0_CNT    0xfU
+#define V_DEBUG_U_RX_SOP0_CNT(x) ((x) << S_DEBUG_U_RX_SOP0_CNT)
+#define G_DEBUG_U_RX_SOP0_CNT(x) (((x) >> S_DEBUG_U_RX_SOP0_CNT) & M_DEBUG_U_RX_SOP0_CNT)
+
+#define S_DEBUG_U_RX_EOP0_CNT    0
+#define M_DEBUG_U_RX_EOP0_CNT    0xfU
+#define V_DEBUG_U_RX_EOP0_CNT(x) ((x) << S_DEBUG_U_RX_EOP0_CNT)
+#define G_DEBUG_U_RX_EOP0_CNT(x) (((x) >> S_DEBUG_U_RX_EOP0_CNT) & M_DEBUG_U_RX_EOP0_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_2 0x1288
+
+#define S_DEBUG_UD_RX_SOP3_CNT    28
+#define M_DEBUG_UD_RX_SOP3_CNT    0xfU
+#define V_DEBUG_UD_RX_SOP3_CNT(x) ((x) << S_DEBUG_UD_RX_SOP3_CNT)
+#define G_DEBUG_UD_RX_SOP3_CNT(x) (((x) >> S_DEBUG_UD_RX_SOP3_CNT) & M_DEBUG_UD_RX_SOP3_CNT)
+
+#define S_DEBUG_UD_RX_EOP3_CNT    24
+#define M_DEBUG_UD_RX_EOP3_CNT    0xfU
+#define V_DEBUG_UD_RX_EOP3_CNT(x) ((x) << S_DEBUG_UD_RX_EOP3_CNT)
+#define G_DEBUG_UD_RX_EOP3_CNT(x) (((x) >> S_DEBUG_UD_RX_EOP3_CNT) & M_DEBUG_UD_RX_EOP3_CNT)
+
+#define S_DEBUG_UD_RX_SOP2_CNT    20
+#define M_DEBUG_UD_RX_SOP2_CNT    0xfU
+#define V_DEBUG_UD_RX_SOP2_CNT(x) ((x) << S_DEBUG_UD_RX_SOP2_CNT)
+#define G_DEBUG_UD_RX_SOP2_CNT(x) (((x) >> S_DEBUG_UD_RX_SOP2_CNT) & M_DEBUG_UD_RX_SOP2_CNT)
+
+#define S_DEBUG_UD_RX_EOP2_CNT    16
+#define M_DEBUG_UD_RX_EOP2_CNT    0xfU
+#define V_DEBUG_UD_RX_EOP2_CNT(x) ((x) << S_DEBUG_UD_RX_EOP2_CNT)
+#define G_DEBUG_UD_RX_EOP2_CNT(x) (((x) >> S_DEBUG_UD_RX_EOP2_CNT) & M_DEBUG_UD_RX_EOP2_CNT)
+
+#define S_DEBUG_UD_RX_SOP1_CNT    12
+#define M_DEBUG_UD_RX_SOP1_CNT    0xfU
+#define V_DEBUG_UD_RX_SOP1_CNT(x) ((x) << S_DEBUG_UD_RX_SOP1_CNT)
+#define G_DEBUG_UD_RX_SOP1_CNT(x) (((x) >> S_DEBUG_UD_RX_SOP1_CNT) & M_DEBUG_UD_RX_SOP1_CNT)
+
+#define S_DEBUG_UD_RX_EOP1_CNT    8
+#define M_DEBUG_UD_RX_EOP1_CNT    0xfU
+#define V_DEBUG_UD_RX_EOP1_CNT(x) ((x) << S_DEBUG_UD_RX_EOP1_CNT)
+#define G_DEBUG_UD_RX_EOP1_CNT(x) (((x) >> S_DEBUG_UD_RX_EOP1_CNT) & M_DEBUG_UD_RX_EOP1_CNT)
+
+#define S_DEBUG_UD_RX_SOP0_CNT    4
+#define M_DEBUG_UD_RX_SOP0_CNT    0xfU
+#define V_DEBUG_UD_RX_SOP0_CNT(x) ((x) << S_DEBUG_UD_RX_SOP0_CNT)
+#define G_DEBUG_UD_RX_SOP0_CNT(x) (((x) >> S_DEBUG_UD_RX_SOP0_CNT) & M_DEBUG_UD_RX_SOP0_CNT)
+
+#define S_DEBUG_UD_RX_EOP0_CNT    0
+#define M_DEBUG_UD_RX_EOP0_CNT    0xfU
+#define V_DEBUG_UD_RX_EOP0_CNT(x) ((x) << S_DEBUG_UD_RX_EOP0_CNT)
+#define G_DEBUG_UD_RX_EOP0_CNT(x) (((x) >> S_DEBUG_UD_RX_EOP0_CNT) & M_DEBUG_UD_RX_EOP0_CNT)
+
+#define S_DBG_TBUF_USED1    9
+#define M_DBG_TBUF_USED1    0x1ffU
+#define V_DBG_TBUF_USED1(x) ((x) << S_DBG_TBUF_USED1)
+#define G_DBG_TBUF_USED1(x) (((x) >> S_DBG_TBUF_USED1) & M_DBG_TBUF_USED1)
+
+#define S_DBG_TBUF_USED0    0
+#define M_DBG_TBUF_USED0    0x1ffU
+#define V_DBG_TBUF_USED0(x) ((x) << S_DBG_TBUF_USED0)
+#define G_DBG_TBUF_USED0(x) (((x) >> S_DBG_TBUF_USED0) & M_DBG_TBUF_USED0)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_3 0x128c
+
+#define S_DEBUG_U_TX_SOP3_CNT    28
+#define M_DEBUG_U_TX_SOP3_CNT    0xfU
+#define V_DEBUG_U_TX_SOP3_CNT(x) ((x) << S_DEBUG_U_TX_SOP3_CNT)
+#define G_DEBUG_U_TX_SOP3_CNT(x) (((x) >> S_DEBUG_U_TX_SOP3_CNT) & M_DEBUG_U_TX_SOP3_CNT)
+
+#define S_DEBUG_U_TX_EOP3_CNT    24
+#define M_DEBUG_U_TX_EOP3_CNT    0xfU
+#define V_DEBUG_U_TX_EOP3_CNT(x) ((x) << S_DEBUG_U_TX_EOP3_CNT)
+#define G_DEBUG_U_TX_EOP3_CNT(x) (((x) >> S_DEBUG_U_TX_EOP3_CNT) & M_DEBUG_U_TX_EOP3_CNT)
+
+#define S_DEBUG_U_TX_SOP2_CNT    20
+#define M_DEBUG_U_TX_SOP2_CNT    0xfU
+#define V_DEBUG_U_TX_SOP2_CNT(x) ((x) << S_DEBUG_U_TX_SOP2_CNT)
+#define G_DEBUG_U_TX_SOP2_CNT(x) (((x) >> S_DEBUG_U_TX_SOP2_CNT) & M_DEBUG_U_TX_SOP2_CNT)
+
+#define S_DEBUG_U_TX_EOP2_CNT    16
+#define M_DEBUG_U_TX_EOP2_CNT    0xfU
+#define V_DEBUG_U_TX_EOP2_CNT(x) ((x) << S_DEBUG_U_TX_EOP2_CNT)
+#define G_DEBUG_U_TX_EOP2_CNT(x) (((x) >> S_DEBUG_U_TX_EOP2_CNT) & M_DEBUG_U_TX_EOP2_CNT)
+
+#define S_DEBUG_U_TX_SOP1_CNT    12
+#define M_DEBUG_U_TX_SOP1_CNT    0xfU
+#define V_DEBUG_U_TX_SOP1_CNT(x) ((x) << S_DEBUG_U_TX_SOP1_CNT)
+#define G_DEBUG_U_TX_SOP1_CNT(x) (((x) >> S_DEBUG_U_TX_SOP1_CNT) & M_DEBUG_U_TX_SOP1_CNT)
+
+#define S_DEBUG_U_TX_EOP1_CNT    8
+#define M_DEBUG_U_TX_EOP1_CNT    0xfU
+#define V_DEBUG_U_TX_EOP1_CNT(x) ((x) << S_DEBUG_U_TX_EOP1_CNT)
+#define G_DEBUG_U_TX_EOP1_CNT(x) (((x) >> S_DEBUG_U_TX_EOP1_CNT) & M_DEBUG_U_TX_EOP1_CNT)
+
+#define S_DEBUG_U_TX_SOP0_CNT    4
+#define M_DEBUG_U_TX_SOP0_CNT    0xfU
+#define V_DEBUG_U_TX_SOP0_CNT(x) ((x) << S_DEBUG_U_TX_SOP0_CNT)
+#define G_DEBUG_U_TX_SOP0_CNT(x) (((x) >> S_DEBUG_U_TX_SOP0_CNT) & M_DEBUG_U_TX_SOP0_CNT)
+
+#define S_DEBUG_U_TX_EOP0_CNT    0
+#define M_DEBUG_U_TX_EOP0_CNT    0xfU
+#define V_DEBUG_U_TX_EOP0_CNT(x) ((x) << S_DEBUG_U_TX_EOP0_CNT)
+#define G_DEBUG_U_TX_EOP0_CNT(x) (((x) >> S_DEBUG_U_TX_EOP0_CNT) & M_DEBUG_U_TX_EOP0_CNT)
+
+#define A_SGE_DEBUG1_DBP_THREAD 0x128c
+
+#define S_WR_DEQ_CNT    12
+#define M_WR_DEQ_CNT    0xfU
+#define V_WR_DEQ_CNT(x) ((x) << S_WR_DEQ_CNT)
+#define G_WR_DEQ_CNT(x) (((x) >> S_WR_DEQ_CNT) & M_WR_DEQ_CNT)
+
+#define S_WR_ENQ_CNT    8
+#define M_WR_ENQ_CNT    0xfU
+#define V_WR_ENQ_CNT(x) ((x) << S_WR_ENQ_CNT)
+#define G_WR_ENQ_CNT(x) (((x) >> S_WR_ENQ_CNT) & M_WR_ENQ_CNT)
+
+#define S_FL_DEQ_CNT    4
+#define M_FL_DEQ_CNT    0xfU
+#define V_FL_DEQ_CNT(x) ((x) << S_FL_DEQ_CNT)
+#define G_FL_DEQ_CNT(x) (((x) >> S_FL_DEQ_CNT) & M_FL_DEQ_CNT)
+
+#define S_FL_ENQ_CNT    0
+#define M_FL_ENQ_CNT    0xfU
+#define V_FL_ENQ_CNT(x) ((x) << S_FL_ENQ_CNT)
+#define G_FL_ENQ_CNT(x) (((x) >> S_FL_ENQ_CNT) & M_FL_ENQ_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_4 0x1290
+
+#define S_DEBUG_PC_RSP_SOP1_CNT    28
+#define M_DEBUG_PC_RSP_SOP1_CNT    0xfU
+#define V_DEBUG_PC_RSP_SOP1_CNT(x) ((x) << S_DEBUG_PC_RSP_SOP1_CNT)
+#define G_DEBUG_PC_RSP_SOP1_CNT(x) (((x) >> S_DEBUG_PC_RSP_SOP1_CNT) & M_DEBUG_PC_RSP_SOP1_CNT)
+
+#define S_DEBUG_PC_RSP_EOP1_CNT    24
+#define M_DEBUG_PC_RSP_EOP1_CNT    0xfU
+#define V_DEBUG_PC_RSP_EOP1_CNT(x) ((x) << S_DEBUG_PC_RSP_EOP1_CNT)
+#define G_DEBUG_PC_RSP_EOP1_CNT(x) (((x) >> S_DEBUG_PC_RSP_EOP1_CNT) & M_DEBUG_PC_RSP_EOP1_CNT)
+
+#define S_DEBUG_PC_RSP_SOP0_CNT    20
+#define M_DEBUG_PC_RSP_SOP0_CNT    0xfU
+#define V_DEBUG_PC_RSP_SOP0_CNT(x) ((x) << S_DEBUG_PC_RSP_SOP0_CNT)
+#define G_DEBUG_PC_RSP_SOP0_CNT(x) (((x) >> S_DEBUG_PC_RSP_SOP0_CNT) & M_DEBUG_PC_RSP_SOP0_CNT)
+
+#define S_DEBUG_PC_RSP_EOP0_CNT    16
+#define M_DEBUG_PC_RSP_EOP0_CNT    0xfU
+#define V_DEBUG_PC_RSP_EOP0_CNT(x) ((x) << S_DEBUG_PC_RSP_EOP0_CNT)
+#define G_DEBUG_PC_RSP_EOP0_CNT(x) (((x) >> S_DEBUG_PC_RSP_EOP0_CNT) & M_DEBUG_PC_RSP_EOP0_CNT)
+
+#define S_DEBUG_PC_REQ_SOP1_CNT    12
+#define M_DEBUG_PC_REQ_SOP1_CNT    0xfU
+#define V_DEBUG_PC_REQ_SOP1_CNT(x) ((x) << S_DEBUG_PC_REQ_SOP1_CNT)
+#define G_DEBUG_PC_REQ_SOP1_CNT(x) (((x) >> S_DEBUG_PC_REQ_SOP1_CNT) & M_DEBUG_PC_REQ_SOP1_CNT)
+
+#define S_DEBUG_PC_REQ_EOP1_CNT    8
+#define M_DEBUG_PC_REQ_EOP1_CNT    0xfU
+#define V_DEBUG_PC_REQ_EOP1_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP1_CNT)
+#define G_DEBUG_PC_REQ_EOP1_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP1_CNT) & M_DEBUG_PC_REQ_EOP1_CNT)
+
+#define S_DEBUG_PC_REQ_SOP0_CNT    4
+#define M_DEBUG_PC_REQ_SOP0_CNT    0xfU
+#define V_DEBUG_PC_REQ_SOP0_CNT(x) ((x) << S_DEBUG_PC_REQ_SOP0_CNT)
+#define G_DEBUG_PC_REQ_SOP0_CNT(x) (((x) >> S_DEBUG_PC_REQ_SOP0_CNT) & M_DEBUG_PC_REQ_SOP0_CNT)
+
+#define S_DEBUG_PC_REQ_EOP0_CNT    0
+#define M_DEBUG_PC_REQ_EOP0_CNT    0xfU
+#define V_DEBUG_PC_REQ_EOP0_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP0_CNT)
+#define G_DEBUG_PC_REQ_EOP0_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP0_CNT) & M_DEBUG_PC_REQ_EOP0_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_5 0x1294
+
+#define S_DEBUG_PD_RDREQ_SOP3_CNT    28
+#define M_DEBUG_PD_RDREQ_SOP3_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_SOP3_CNT(x) ((x) << S_DEBUG_PD_RDREQ_SOP3_CNT)
+#define G_DEBUG_PD_RDREQ_SOP3_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_SOP3_CNT) & M_DEBUG_PD_RDREQ_SOP3_CNT)
+
+#define S_DEBUG_PD_RDREQ_EOP3_CNT    24
+#define M_DEBUG_PD_RDREQ_EOP3_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_EOP3_CNT(x) ((x) << S_DEBUG_PD_RDREQ_EOP3_CNT)
+#define G_DEBUG_PD_RDREQ_EOP3_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_EOP3_CNT) & M_DEBUG_PD_RDREQ_EOP3_CNT)
+
+#define S_DEBUG_PD_RDREQ_SOP2_CNT    20
+#define M_DEBUG_PD_RDREQ_SOP2_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_SOP2_CNT(x) ((x) << S_DEBUG_PD_RDREQ_SOP2_CNT)
+#define G_DEBUG_PD_RDREQ_SOP2_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_SOP2_CNT) & M_DEBUG_PD_RDREQ_SOP2_CNT)
+
+#define S_DEBUG_PD_RDREQ_EOP2_CNT    16
+#define M_DEBUG_PD_RDREQ_EOP2_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_EOP2_CNT(x) ((x) << S_DEBUG_PD_RDREQ_EOP2_CNT)
+#define G_DEBUG_PD_RDREQ_EOP2_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_EOP2_CNT) & M_DEBUG_PD_RDREQ_EOP2_CNT)
+
+#define S_DEBUG_PD_RDREQ_SOP1_CNT    12
+#define M_DEBUG_PD_RDREQ_SOP1_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_SOP1_CNT(x) ((x) << S_DEBUG_PD_RDREQ_SOP1_CNT)
+#define G_DEBUG_PD_RDREQ_SOP1_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_SOP1_CNT) & M_DEBUG_PD_RDREQ_SOP1_CNT)
+
+#define S_DEBUG_PD_RDREQ_EOP1_CNT    8
+#define M_DEBUG_PD_RDREQ_EOP1_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_EOP1_CNT(x) ((x) << S_DEBUG_PD_RDREQ_EOP1_CNT)
+#define G_DEBUG_PD_RDREQ_EOP1_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_EOP1_CNT) & M_DEBUG_PD_RDREQ_EOP1_CNT)
+
+#define S_DEBUG_PD_RDREQ_SOP0_CNT    4
+#define M_DEBUG_PD_RDREQ_SOP0_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_SOP0_CNT(x) ((x) << S_DEBUG_PD_RDREQ_SOP0_CNT)
+#define G_DEBUG_PD_RDREQ_SOP0_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_SOP0_CNT) & M_DEBUG_PD_RDREQ_SOP0_CNT)
+
+#define S_DEBUG_PD_RDREQ_EOP0_CNT    0
+#define M_DEBUG_PD_RDREQ_EOP0_CNT    0xfU
+#define V_DEBUG_PD_RDREQ_EOP0_CNT(x) ((x) << S_DEBUG_PD_RDREQ_EOP0_CNT)
+#define G_DEBUG_PD_RDREQ_EOP0_CNT(x) (((x) >> S_DEBUG_PD_RDREQ_EOP0_CNT) & M_DEBUG_PD_RDREQ_EOP0_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_6 0x1298
+
+#define S_DEBUG_PD_RDRSP_SOP3_CNT    28
+#define M_DEBUG_PD_RDRSP_SOP3_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_SOP3_CNT(x) ((x) << S_DEBUG_PD_RDRSP_SOP3_CNT)
+#define G_DEBUG_PD_RDRSP_SOP3_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_SOP3_CNT) & M_DEBUG_PD_RDRSP_SOP3_CNT)
+
+#define S_DEBUG_PD_RDRSP_EOP3_CNT    24
+#define M_DEBUG_PD_RDRSP_EOP3_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_EOP3_CNT(x) ((x) << S_DEBUG_PD_RDRSP_EOP3_CNT)
+#define G_DEBUG_PD_RDRSP_EOP3_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_EOP3_CNT) & M_DEBUG_PD_RDRSP_EOP3_CNT)
+
+#define S_DEBUG_PD_RDRSP_SOP2_CNT    20
+#define M_DEBUG_PD_RDRSP_SOP2_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_SOP2_CNT(x) ((x) << S_DEBUG_PD_RDRSP_SOP2_CNT)
+#define G_DEBUG_PD_RDRSP_SOP2_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_SOP2_CNT) & M_DEBUG_PD_RDRSP_SOP2_CNT)
+
+#define S_DEBUG_PD_RDRSP_EOP2_CNT    16
+#define M_DEBUG_PD_RDRSP_EOP2_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_EOP2_CNT(x) ((x) << S_DEBUG_PD_RDRSP_EOP2_CNT)
+#define G_DEBUG_PD_RDRSP_EOP2_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_EOP2_CNT) & M_DEBUG_PD_RDRSP_EOP2_CNT)
+
+#define S_DEBUG_PD_RDRSP_SOP1_CNT    12
+#define M_DEBUG_PD_RDRSP_SOP1_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_SOP1_CNT(x) ((x) << S_DEBUG_PD_RDRSP_SOP1_CNT)
+#define G_DEBUG_PD_RDRSP_SOP1_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_SOP1_CNT) & M_DEBUG_PD_RDRSP_SOP1_CNT)
+
+#define S_DEBUG_PD_RDRSP_EOP1_CNT    8
+#define M_DEBUG_PD_RDRSP_EOP1_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_EOP1_CNT(x) ((x) << S_DEBUG_PD_RDRSP_EOP1_CNT)
+#define G_DEBUG_PD_RDRSP_EOP1_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_EOP1_CNT) & M_DEBUG_PD_RDRSP_EOP1_CNT)
+
+#define S_DEBUG_PD_RDRSP_SOP0_CNT    4
+#define M_DEBUG_PD_RDRSP_SOP0_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_SOP0_CNT(x) ((x) << S_DEBUG_PD_RDRSP_SOP0_CNT)
+#define G_DEBUG_PD_RDRSP_SOP0_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_SOP0_CNT) & M_DEBUG_PD_RDRSP_SOP0_CNT)
+
+#define S_DEBUG_PD_RDRSP_EOP0_CNT    0
+#define M_DEBUG_PD_RDRSP_EOP0_CNT    0xfU
+#define V_DEBUG_PD_RDRSP_EOP0_CNT(x) ((x) << S_DEBUG_PD_RDRSP_EOP0_CNT)
+#define G_DEBUG_PD_RDRSP_EOP0_CNT(x) (((x) >> S_DEBUG_PD_RDRSP_EOP0_CNT) & M_DEBUG_PD_RDRSP_EOP0_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_7 0x129c
+
+#define S_DEBUG_PD_WRREQ_SOP3_CNT    28
+#define M_DEBUG_PD_WRREQ_SOP3_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_SOP3_CNT(x) ((x) << S_DEBUG_PD_WRREQ_SOP3_CNT)
+#define G_DEBUG_PD_WRREQ_SOP3_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_SOP3_CNT) & M_DEBUG_PD_WRREQ_SOP3_CNT)
+
+#define S_DEBUG_PD_WRREQ_EOP3_CNT    24
+#define M_DEBUG_PD_WRREQ_EOP3_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_EOP3_CNT(x) ((x) << S_DEBUG_PD_WRREQ_EOP3_CNT)
+#define G_DEBUG_PD_WRREQ_EOP3_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_EOP3_CNT) & M_DEBUG_PD_WRREQ_EOP3_CNT)
+
+#define S_DEBUG_PD_WRREQ_SOP2_CNT    20
+#define M_DEBUG_PD_WRREQ_SOP2_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_SOP2_CNT(x) ((x) << S_DEBUG_PD_WRREQ_SOP2_CNT)
+#define G_DEBUG_PD_WRREQ_SOP2_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_SOP2_CNT) & M_DEBUG_PD_WRREQ_SOP2_CNT)
+
+#define S_DEBUG_PD_WRREQ_EOP2_CNT    16
+#define M_DEBUG_PD_WRREQ_EOP2_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_EOP2_CNT(x) ((x) << S_DEBUG_PD_WRREQ_EOP2_CNT)
+#define G_DEBUG_PD_WRREQ_EOP2_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_EOP2_CNT) & M_DEBUG_PD_WRREQ_EOP2_CNT)
+
+#define S_DEBUG_PD_WRREQ_SOP1_CNT    12
+#define M_DEBUG_PD_WRREQ_SOP1_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_SOP1_CNT(x) ((x) << S_DEBUG_PD_WRREQ_SOP1_CNT)
+#define G_DEBUG_PD_WRREQ_SOP1_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_SOP1_CNT) & M_DEBUG_PD_WRREQ_SOP1_CNT)
+
+#define S_DEBUG_PD_WRREQ_EOP1_CNT    8
+#define M_DEBUG_PD_WRREQ_EOP1_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_EOP1_CNT(x) ((x) << S_DEBUG_PD_WRREQ_EOP1_CNT)
+#define G_DEBUG_PD_WRREQ_EOP1_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_EOP1_CNT) & M_DEBUG_PD_WRREQ_EOP1_CNT)
+
+#define S_DEBUG_PD_WRREQ_SOP0_CNT    4
+#define M_DEBUG_PD_WRREQ_SOP0_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_SOP0_CNT(x) ((x) << S_DEBUG_PD_WRREQ_SOP0_CNT)
+#define G_DEBUG_PD_WRREQ_SOP0_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_SOP0_CNT) & M_DEBUG_PD_WRREQ_SOP0_CNT)
+
+#define S_DEBUG_PD_WRREQ_EOP0_CNT    0
+#define M_DEBUG_PD_WRREQ_EOP0_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_EOP0_CNT(x) ((x) << S_DEBUG_PD_WRREQ_EOP0_CNT)
+#define G_DEBUG_PD_WRREQ_EOP0_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_EOP0_CNT) & M_DEBUG_PD_WRREQ_EOP0_CNT)
+
+#define S_DEBUG_PC_RSP_SOP_CNT    28
+#define M_DEBUG_PC_RSP_SOP_CNT    0xfU
+#define V_DEBUG_PC_RSP_SOP_CNT(x) ((x) << S_DEBUG_PC_RSP_SOP_CNT)
+#define G_DEBUG_PC_RSP_SOP_CNT(x) (((x) >> S_DEBUG_PC_RSP_SOP_CNT) & M_DEBUG_PC_RSP_SOP_CNT)
+
+#define S_DEBUG_PC_RSP_EOP_CNT    24
+#define M_DEBUG_PC_RSP_EOP_CNT    0xfU
+#define V_DEBUG_PC_RSP_EOP_CNT(x) ((x) << S_DEBUG_PC_RSP_EOP_CNT)
+#define G_DEBUG_PC_RSP_EOP_CNT(x) (((x) >> S_DEBUG_PC_RSP_EOP_CNT) & M_DEBUG_PC_RSP_EOP_CNT)
+
+#define S_DEBUG_PC_REQ_SOP_CNT    20
+#define M_DEBUG_PC_REQ_SOP_CNT    0xfU
+#define V_DEBUG_PC_REQ_SOP_CNT(x) ((x) << S_DEBUG_PC_REQ_SOP_CNT)
+#define G_DEBUG_PC_REQ_SOP_CNT(x) (((x) >> S_DEBUG_PC_REQ_SOP_CNT) & M_DEBUG_PC_REQ_SOP_CNT)
+
+#define S_DEBUG_PC_REQ_EOP_CNT    16
+#define M_DEBUG_PC_REQ_EOP_CNT    0xfU
+#define V_DEBUG_PC_REQ_EOP_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP_CNT)
+#define G_DEBUG_PC_REQ_EOP_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP_CNT) & M_DEBUG_PC_REQ_EOP_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_8 0x12a0
+
+#define S_GLOBALENABLE_OFF    29
+#define V_GLOBALENABLE_OFF(x) ((x) << S_GLOBALENABLE_OFF)
+#define F_GLOBALENABLE_OFF    V_GLOBALENABLE_OFF(1U)
+
+#define S_DEBUG_CIM2SGE_RXAFULL_D    27
+#define M_DEBUG_CIM2SGE_RXAFULL_D    0x3U
+#define V_DEBUG_CIM2SGE_RXAFULL_D(x) ((x) << S_DEBUG_CIM2SGE_RXAFULL_D)
+#define G_DEBUG_CIM2SGE_RXAFULL_D(x) (((x) >> S_DEBUG_CIM2SGE_RXAFULL_D) & M_DEBUG_CIM2SGE_RXAFULL_D)
+
+#define S_DEBUG_CPLSW_CIM_TXAFULL_D    25
+#define M_DEBUG_CPLSW_CIM_TXAFULL_D    0x3U
+#define V_DEBUG_CPLSW_CIM_TXAFULL_D(x) ((x) << S_DEBUG_CPLSW_CIM_TXAFULL_D)
+#define G_DEBUG_CPLSW_CIM_TXAFULL_D(x) (((x) >> S_DEBUG_CPLSW_CIM_TXAFULL_D) & M_DEBUG_CPLSW_CIM_TXAFULL_D)
+
+#define S_DEBUG_UP_FULL    24
+#define V_DEBUG_UP_FULL(x) ((x) << S_DEBUG_UP_FULL)
+#define F_DEBUG_UP_FULL    V_DEBUG_UP_FULL(1U)
+
+#define S_DEBUG_M_RD_REQ_OUTSTANDING_PC    23
+#define V_DEBUG_M_RD_REQ_OUTSTANDING_PC(x) ((x) << S_DEBUG_M_RD_REQ_OUTSTANDING_PC)
+#define F_DEBUG_M_RD_REQ_OUTSTANDING_PC    V_DEBUG_M_RD_REQ_OUTSTANDING_PC(1U)
+
+#define S_DEBUG_M_RD_REQ_OUTSTANDING_VFIFO    22
+#define V_DEBUG_M_RD_REQ_OUTSTANDING_VFIFO(x) ((x) << S_DEBUG_M_RD_REQ_OUTSTANDING_VFIFO)
+#define F_DEBUG_M_RD_REQ_OUTSTANDING_VFIFO    V_DEBUG_M_RD_REQ_OUTSTANDING_VFIFO(1U)
+
+#define S_DEBUG_M_RD_REQ_OUTSTANDING_IMSG    21
+#define V_DEBUG_M_RD_REQ_OUTSTANDING_IMSG(x) ((x) << S_DEBUG_M_RD_REQ_OUTSTANDING_IMSG)
+#define F_DEBUG_M_RD_REQ_OUTSTANDING_IMSG    V_DEBUG_M_RD_REQ_OUTSTANDING_IMSG(1U)
+
+#define S_DEBUG_M_RD_REQ_OUTSTANDING_CMARB    20
+#define V_DEBUG_M_RD_REQ_OUTSTANDING_CMARB(x) ((x) << S_DEBUG_M_RD_REQ_OUTSTANDING_CMARB)
+#define F_DEBUG_M_RD_REQ_OUTSTANDING_CMARB    V_DEBUG_M_RD_REQ_OUTSTANDING_CMARB(1U)
+
+#define S_DEBUG_M_RD_REQ_OUTSTANDING_FLM    19
+#define V_DEBUG_M_RD_REQ_OUTSTANDING_FLM(x) ((x) << S_DEBUG_M_RD_REQ_OUTSTANDING_FLM)
+#define F_DEBUG_M_RD_REQ_OUTSTANDING_FLM    V_DEBUG_M_RD_REQ_OUTSTANDING_FLM(1U)
+
+#define S_DEBUG_M_REQVLD    18
+#define V_DEBUG_M_REQVLD(x) ((x) << S_DEBUG_M_REQVLD)
+#define F_DEBUG_M_REQVLD    V_DEBUG_M_REQVLD(1U)
+
+#define S_DEBUG_M_REQRDY    17
+#define V_DEBUG_M_REQRDY(x) ((x) << S_DEBUG_M_REQRDY)
+#define F_DEBUG_M_REQRDY    V_DEBUG_M_REQRDY(1U)
+
+#define S_DEBUG_M_RSPVLD    16
+#define V_DEBUG_M_RSPVLD(x) ((x) << S_DEBUG_M_RSPVLD)
+#define F_DEBUG_M_RSPVLD    V_DEBUG_M_RSPVLD(1U)
+
+#define S_DEBUG_PD_WRREQ_INT3_CNT    12
+#define M_DEBUG_PD_WRREQ_INT3_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_INT3_CNT(x) ((x) << S_DEBUG_PD_WRREQ_INT3_CNT)
+#define G_DEBUG_PD_WRREQ_INT3_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_INT3_CNT) & M_DEBUG_PD_WRREQ_INT3_CNT)
+
+#define S_DEBUG_PD_WRREQ_INT2_CNT    8
+#define M_DEBUG_PD_WRREQ_INT2_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_INT2_CNT(x) ((x) << S_DEBUG_PD_WRREQ_INT2_CNT)
+#define G_DEBUG_PD_WRREQ_INT2_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_INT2_CNT) & M_DEBUG_PD_WRREQ_INT2_CNT)
+
+#define S_DEBUG_PD_WRREQ_INT1_CNT    4
+#define M_DEBUG_PD_WRREQ_INT1_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_INT1_CNT(x) ((x) << S_DEBUG_PD_WRREQ_INT1_CNT)
+#define G_DEBUG_PD_WRREQ_INT1_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_INT1_CNT) & M_DEBUG_PD_WRREQ_INT1_CNT)
+
+#define S_DEBUG_PD_WRREQ_INT0_CNT    0
+#define M_DEBUG_PD_WRREQ_INT0_CNT    0xfU
+#define V_DEBUG_PD_WRREQ_INT0_CNT(x) ((x) << S_DEBUG_PD_WRREQ_INT0_CNT)
+#define G_DEBUG_PD_WRREQ_INT0_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_INT0_CNT) & M_DEBUG_PD_WRREQ_INT0_CNT)
+
+#define S_DEBUG_PL_BAR2_REQVLD    31
+#define V_DEBUG_PL_BAR2_REQVLD(x) ((x) << S_DEBUG_PL_BAR2_REQVLD)
+#define F_DEBUG_PL_BAR2_REQVLD    V_DEBUG_PL_BAR2_REQVLD(1U)
+
+#define S_DEBUG_PL_BAR2_REQFULL    30
+#define V_DEBUG_PL_BAR2_REQFULL(x) ((x) << S_DEBUG_PL_BAR2_REQFULL)
+#define F_DEBUG_PL_BAR2_REQFULL    V_DEBUG_PL_BAR2_REQFULL(1U)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_9 0x12a4
+
+#define S_DEBUG_CPLSW_TP_RX_SOP1_CNT    28
+#define M_DEBUG_CPLSW_TP_RX_SOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_TP_RX_SOP1_CNT(x) ((x) << S_DEBUG_CPLSW_TP_RX_SOP1_CNT)
+#define G_DEBUG_CPLSW_TP_RX_SOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_TP_RX_SOP1_CNT) & M_DEBUG_CPLSW_TP_RX_SOP1_CNT)
+
+#define S_DEBUG_CPLSW_TP_RX_EOP1_CNT    24
+#define M_DEBUG_CPLSW_TP_RX_EOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_TP_RX_EOP1_CNT(x) ((x) << S_DEBUG_CPLSW_TP_RX_EOP1_CNT)
+#define G_DEBUG_CPLSW_TP_RX_EOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_TP_RX_EOP1_CNT) & M_DEBUG_CPLSW_TP_RX_EOP1_CNT)
+
+#define S_DEBUG_CPLSW_TP_RX_SOP0_CNT    20
+#define M_DEBUG_CPLSW_TP_RX_SOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_TP_RX_SOP0_CNT(x) ((x) << S_DEBUG_CPLSW_TP_RX_SOP0_CNT)
+#define G_DEBUG_CPLSW_TP_RX_SOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_TP_RX_SOP0_CNT) & M_DEBUG_CPLSW_TP_RX_SOP0_CNT)
+
+#define S_DEBUG_CPLSW_TP_RX_EOP0_CNT    16
+#define M_DEBUG_CPLSW_TP_RX_EOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_TP_RX_EOP0_CNT(x) ((x) << S_DEBUG_CPLSW_TP_RX_EOP0_CNT)
+#define G_DEBUG_CPLSW_TP_RX_EOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_TP_RX_EOP0_CNT) & M_DEBUG_CPLSW_TP_RX_EOP0_CNT)
+
+#define S_DEBUG_CPLSW_CIM_SOP1_CNT    12
+#define M_DEBUG_CPLSW_CIM_SOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_CIM_SOP1_CNT(x) ((x) << S_DEBUG_CPLSW_CIM_SOP1_CNT)
+#define G_DEBUG_CPLSW_CIM_SOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_CIM_SOP1_CNT) & M_DEBUG_CPLSW_CIM_SOP1_CNT)
+
+#define S_DEBUG_CPLSW_CIM_EOP1_CNT    8
+#define M_DEBUG_CPLSW_CIM_EOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_CIM_EOP1_CNT(x) ((x) << S_DEBUG_CPLSW_CIM_EOP1_CNT)
+#define G_DEBUG_CPLSW_CIM_EOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_CIM_EOP1_CNT) & M_DEBUG_CPLSW_CIM_EOP1_CNT)
+
+#define S_DEBUG_CPLSW_CIM_SOP0_CNT    4
+#define M_DEBUG_CPLSW_CIM_SOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_CIM_SOP0_CNT(x) ((x) << S_DEBUG_CPLSW_CIM_SOP0_CNT)
+#define G_DEBUG_CPLSW_CIM_SOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_CIM_SOP0_CNT) & M_DEBUG_CPLSW_CIM_SOP0_CNT)
+
+#define S_DEBUG_CPLSW_CIM_EOP0_CNT    0
+#define M_DEBUG_CPLSW_CIM_EOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_CIM_EOP0_CNT(x) ((x) << S_DEBUG_CPLSW_CIM_EOP0_CNT)
+#define G_DEBUG_CPLSW_CIM_EOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_CIM_EOP0_CNT) & M_DEBUG_CPLSW_CIM_EOP0_CNT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
+
+#define S_DEBUG_T_RXAFULL_D    30
+#define M_DEBUG_T_RXAFULL_D    0x3U
+#define V_DEBUG_T_RXAFULL_D(x) ((x) << S_DEBUG_T_RXAFULL_D)
+#define G_DEBUG_T_RXAFULL_D(x) (((x) >> S_DEBUG_T_RXAFULL_D) & M_DEBUG_T_RXAFULL_D)
+
+#define S_DEBUG_PD_RDRSPAFULL_D    26
+#define M_DEBUG_PD_RDRSPAFULL_D    0xfU
+#define V_DEBUG_PD_RDRSPAFULL_D(x) ((x) << S_DEBUG_PD_RDRSPAFULL_D)
+#define G_DEBUG_PD_RDRSPAFULL_D(x) (((x) >> S_DEBUG_PD_RDRSPAFULL_D) & M_DEBUG_PD_RDRSPAFULL_D)
+
+#define S_DEBUG_PD_RDREQAFULL_D    22
+#define M_DEBUG_PD_RDREQAFULL_D    0xfU
+#define V_DEBUG_PD_RDREQAFULL_D(x) ((x) << S_DEBUG_PD_RDREQAFULL_D)
+#define G_DEBUG_PD_RDREQAFULL_D(x) (((x) >> S_DEBUG_PD_RDREQAFULL_D) & M_DEBUG_PD_RDREQAFULL_D)
+
+#define S_DEBUG_PD_WRREQAFULL_D    18
+#define M_DEBUG_PD_WRREQAFULL_D    0xfU
+#define V_DEBUG_PD_WRREQAFULL_D(x) ((x) << S_DEBUG_PD_WRREQAFULL_D)
+#define G_DEBUG_PD_WRREQAFULL_D(x) (((x) >> S_DEBUG_PD_WRREQAFULL_D) & M_DEBUG_PD_WRREQAFULL_D)
+
+#define S_DEBUG_PC_RSPAFULL_D    15
+#define M_DEBUG_PC_RSPAFULL_D    0x7U
+#define V_DEBUG_PC_RSPAFULL_D(x) ((x) << S_DEBUG_PC_RSPAFULL_D)
+#define G_DEBUG_PC_RSPAFULL_D(x) (((x) >> S_DEBUG_PC_RSPAFULL_D) & M_DEBUG_PC_RSPAFULL_D)
+
+#define S_DEBUG_PC_REQAFULL_D    12
+#define M_DEBUG_PC_REQAFULL_D    0x7U
+#define V_DEBUG_PC_REQAFULL_D(x) ((x) << S_DEBUG_PC_REQAFULL_D)
+#define G_DEBUG_PC_REQAFULL_D(x) (((x) >> S_DEBUG_PC_REQAFULL_D) & M_DEBUG_PC_REQAFULL_D)
+
+#define S_DEBUG_U_TXAFULL_D    8
+#define M_DEBUG_U_TXAFULL_D    0xfU
+#define V_DEBUG_U_TXAFULL_D(x) ((x) << S_DEBUG_U_TXAFULL_D)
+#define G_DEBUG_U_TXAFULL_D(x) (((x) >> S_DEBUG_U_TXAFULL_D) & M_DEBUG_U_TXAFULL_D)
+
+#define S_DEBUG_UD_RXAFULL_D    4
+#define M_DEBUG_UD_RXAFULL_D    0xfU
+#define V_DEBUG_UD_RXAFULL_D(x) ((x) << S_DEBUG_UD_RXAFULL_D)
+#define G_DEBUG_UD_RXAFULL_D(x) (((x) >> S_DEBUG_UD_RXAFULL_D) & M_DEBUG_UD_RXAFULL_D)
+
+#define S_DEBUG_U_RXAFULL_D    2
+#define M_DEBUG_U_RXAFULL_D    0x3U
+#define V_DEBUG_U_RXAFULL_D(x) ((x) << S_DEBUG_U_RXAFULL_D)
+#define G_DEBUG_U_RXAFULL_D(x) (((x) >> S_DEBUG_U_RXAFULL_D) & M_DEBUG_U_RXAFULL_D)
+
+#define S_DEBUG_CIM_AFULL_D    0
+#define M_DEBUG_CIM_AFULL_D    0x3U
+#define V_DEBUG_CIM_AFULL_D(x) ((x) << S_DEBUG_CIM_AFULL_D)
+#define G_DEBUG_CIM_AFULL_D(x) (((x) >> S_DEBUG_CIM_AFULL_D) & M_DEBUG_CIM_AFULL_D)
+
+#define S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING    28
+#define M_DEBUG_IDMA1_S_CPL_FLIT_REMAINING    0xfU
+#define V_DEBUG_IDMA1_S_CPL_FLIT_REMAINING(x) ((x) << S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING)
+#define G_DEBUG_IDMA1_S_CPL_FLIT_REMAINING(x) (((x) >> S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING) & M_DEBUG_IDMA1_S_CPL_FLIT_REMAINING)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY    27
+#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY)
+#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY    V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS    26
+#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS)
+#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS    V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL    25
+#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL)
+#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL    V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_FULL    24
+#define V_DEBUG_IDMA1_IDMA2IMSG_FULL(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_FULL)
+#define F_DEBUG_IDMA1_IDMA2IMSG_FULL    V_DEBUG_IDMA1_IDMA2IMSG_FULL(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_EOP    23
+#define V_DEBUG_IDMA1_IDMA2IMSG_EOP(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_EOP)
+#define F_DEBUG_IDMA1_IDMA2IMSG_EOP    V_DEBUG_IDMA1_IDMA2IMSG_EOP(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY    22
+#define V_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY)
+#define F_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY    V_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY(1U)
+
+#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY    21
+#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY)
+#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY    V_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY(1U)
+
+#define S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING    17
+#define M_DEBUG_IDMA0_S_CPL_FLIT_REMAINING    0xfU
+#define V_DEBUG_IDMA0_S_CPL_FLIT_REMAINING(x) ((x) << S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING)
+#define G_DEBUG_IDMA0_S_CPL_FLIT_REMAINING(x) (((x) >> S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING) & M_DEBUG_IDMA0_S_CPL_FLIT_REMAINING)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY    16
+#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY)
+#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY    V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS    15
+#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS)
+#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS    V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL    14
+#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL)
+#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL    V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_FULL    13
+#define V_DEBUG_IDMA0_IDMA2IMSG_FULL(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_FULL)
+#define F_DEBUG_IDMA0_IDMA2IMSG_FULL    V_DEBUG_IDMA0_IDMA2IMSG_FULL(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_EOP    12
+#define V_DEBUG_IDMA0_IDMA2IMSG_EOP(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_EOP)
+#define F_DEBUG_IDMA0_IDMA2IMSG_EOP    V_DEBUG_IDMA0_IDMA2IMSG_EOP(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY    11
+#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY)
+#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY    V_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY(1U)
+
+#define S_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY    10
+#define V_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY)
+#define F_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY    V_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY(1U)
+
+#define S_T6_DEBUG_T_RXAFULL_D    8
+#define M_T6_DEBUG_T_RXAFULL_D    0x3U
+#define V_T6_DEBUG_T_RXAFULL_D(x) ((x) << S_T6_DEBUG_T_RXAFULL_D)
+#define G_T6_DEBUG_T_RXAFULL_D(x) (((x) >> S_T6_DEBUG_T_RXAFULL_D) & M_T6_DEBUG_T_RXAFULL_D)
+
+#define S_T6_DEBUG_PD_WRREQAFULL_D    6
+#define M_T6_DEBUG_PD_WRREQAFULL_D    0x3U
+#define V_T6_DEBUG_PD_WRREQAFULL_D(x) ((x) << S_T6_DEBUG_PD_WRREQAFULL_D)
+#define G_T6_DEBUG_PD_WRREQAFULL_D(x) (((x) >> S_T6_DEBUG_PD_WRREQAFULL_D) & M_T6_DEBUG_PD_WRREQAFULL_D)
+
+#define S_T6_DEBUG_PC_RSPAFULL_D    5
+#define V_T6_DEBUG_PC_RSPAFULL_D(x) ((x) << S_T6_DEBUG_PC_RSPAFULL_D)
+#define F_T6_DEBUG_PC_RSPAFULL_D    V_T6_DEBUG_PC_RSPAFULL_D(1U)
+
+#define S_T6_DEBUG_PC_REQAFULL_D    4
+#define V_T6_DEBUG_PC_REQAFULL_D(x) ((x) << S_T6_DEBUG_PC_REQAFULL_D)
+#define F_T6_DEBUG_PC_REQAFULL_D    V_T6_DEBUG_PC_REQAFULL_D(1U)
+
+#define S_T6_DEBUG_CIM_AFULL_D    0
+#define V_T6_DEBUG_CIM_AFULL_D(x) ((x) << S_T6_DEBUG_CIM_AFULL_D)
+#define F_T6_DEBUG_CIM_AFULL_D    V_T6_DEBUG_CIM_AFULL_D(1U)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_11 0x12ac
+
+#define S_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE    24
+#define V_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE)
+#define F_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE    V_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA1_CACHE_HDR_ACTIVE    23
+#define V_DEBUG_FLM_IDMA1_CACHE_HDR_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA1_CACHE_HDR_ACTIVE)
+#define F_DEBUG_FLM_IDMA1_CACHE_HDR_ACTIVE    V_DEBUG_FLM_IDMA1_CACHE_HDR_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA1_CTXT_DATA_ACTIVE    22
+#define V_DEBUG_FLM_IDMA1_CTXT_DATA_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA1_CTXT_DATA_ACTIVE)
+#define F_DEBUG_FLM_IDMA1_CTXT_DATA_ACTIVE    V_DEBUG_FLM_IDMA1_CTXT_DATA_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA1_CTXT_HDR_ACTIVE    21
+#define V_DEBUG_FLM_IDMA1_CTXT_HDR_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA1_CTXT_HDR_ACTIVE)
+#define F_DEBUG_FLM_IDMA1_CTXT_HDR_ACTIVE    V_DEBUG_FLM_IDMA1_CTXT_HDR_ACTIVE(1U)
+
+#define S_DEBUG_ST_FLM_IDMA1_CACHE    19
+#define M_DEBUG_ST_FLM_IDMA1_CACHE    0x3U
+#define V_DEBUG_ST_FLM_IDMA1_CACHE(x) ((x) << S_DEBUG_ST_FLM_IDMA1_CACHE)
+#define G_DEBUG_ST_FLM_IDMA1_CACHE(x) (((x) >> S_DEBUG_ST_FLM_IDMA1_CACHE) & M_DEBUG_ST_FLM_IDMA1_CACHE)
+
+#define S_DEBUG_ST_FLM_IDMA1_CTXT    16
+#define M_DEBUG_ST_FLM_IDMA1_CTXT    0x7U
+#define V_DEBUG_ST_FLM_IDMA1_CTXT(x) ((x) << S_DEBUG_ST_FLM_IDMA1_CTXT)
+#define G_DEBUG_ST_FLM_IDMA1_CTXT(x) (((x) >> S_DEBUG_ST_FLM_IDMA1_CTXT) & M_DEBUG_ST_FLM_IDMA1_CTXT)
+
+#define S_DEBUG_FLM_IDMA0_CACHE_DATA_ACTIVE    8
+#define V_DEBUG_FLM_IDMA0_CACHE_DATA_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA0_CACHE_DATA_ACTIVE)
+#define F_DEBUG_FLM_IDMA0_CACHE_DATA_ACTIVE    V_DEBUG_FLM_IDMA0_CACHE_DATA_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA0_CACHE_HDR_ACTIVE    7
+#define V_DEBUG_FLM_IDMA0_CACHE_HDR_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA0_CACHE_HDR_ACTIVE)
+#define F_DEBUG_FLM_IDMA0_CACHE_HDR_ACTIVE    V_DEBUG_FLM_IDMA0_CACHE_HDR_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA0_CTXT_DATA_ACTIVE    6
+#define V_DEBUG_FLM_IDMA0_CTXT_DATA_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA0_CTXT_DATA_ACTIVE)
+#define F_DEBUG_FLM_IDMA0_CTXT_DATA_ACTIVE    V_DEBUG_FLM_IDMA0_CTXT_DATA_ACTIVE(1U)
+
+#define S_DEBUG_FLM_IDMA0_CTXT_HDR_ACTIVE    5
+#define V_DEBUG_FLM_IDMA0_CTXT_HDR_ACTIVE(x) ((x) << S_DEBUG_FLM_IDMA0_CTXT_HDR_ACTIVE)
+#define F_DEBUG_FLM_IDMA0_CTXT_HDR_ACTIVE    V_DEBUG_FLM_IDMA0_CTXT_HDR_ACTIVE(1U)
+
+#define S_DEBUG_ST_FLM_IDMA0_CACHE    3
+#define M_DEBUG_ST_FLM_IDMA0_CACHE    0x3U
+#define V_DEBUG_ST_FLM_IDMA0_CACHE(x) ((x) << S_DEBUG_ST_FLM_IDMA0_CACHE)
+#define G_DEBUG_ST_FLM_IDMA0_CACHE(x) (((x) >> S_DEBUG_ST_FLM_IDMA0_CACHE) & M_DEBUG_ST_FLM_IDMA0_CACHE)
+
+#define S_DEBUG_ST_FLM_IDMA0_CTXT    0
+#define M_DEBUG_ST_FLM_IDMA0_CTXT    0x7U
+#define V_DEBUG_ST_FLM_IDMA0_CTXT(x) ((x) << S_DEBUG_ST_FLM_IDMA0_CTXT)
+#define G_DEBUG_ST_FLM_IDMA0_CTXT(x) (((x) >> S_DEBUG_ST_FLM_IDMA0_CTXT) & M_DEBUG_ST_FLM_IDMA0_CTXT)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_12 0x12b0
+
+#define S_DEBUG_CPLSW_SOP1_CNT    28
+#define M_DEBUG_CPLSW_SOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_SOP1_CNT(x) ((x) << S_DEBUG_CPLSW_SOP1_CNT)
+#define G_DEBUG_CPLSW_SOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_SOP1_CNT) & M_DEBUG_CPLSW_SOP1_CNT)
+
+#define S_DEBUG_CPLSW_EOP1_CNT    24
+#define M_DEBUG_CPLSW_EOP1_CNT    0xfU
+#define V_DEBUG_CPLSW_EOP1_CNT(x) ((x) << S_DEBUG_CPLSW_EOP1_CNT)
+#define G_DEBUG_CPLSW_EOP1_CNT(x) (((x) >> S_DEBUG_CPLSW_EOP1_CNT) & M_DEBUG_CPLSW_EOP1_CNT)
+
+#define S_DEBUG_CPLSW_SOP0_CNT    20
+#define M_DEBUG_CPLSW_SOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_SOP0_CNT(x) ((x) << S_DEBUG_CPLSW_SOP0_CNT)
+#define G_DEBUG_CPLSW_SOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_SOP0_CNT) & M_DEBUG_CPLSW_SOP0_CNT)
+
+#define S_DEBUG_CPLSW_EOP0_CNT    16
+#define M_DEBUG_CPLSW_EOP0_CNT    0xfU
+#define V_DEBUG_CPLSW_EOP0_CNT(x) ((x) << S_DEBUG_CPLSW_EOP0_CNT)
+#define G_DEBUG_CPLSW_EOP0_CNT(x) (((x) >> S_DEBUG_CPLSW_EOP0_CNT) & M_DEBUG_CPLSW_EOP0_CNT)
+
+#define S_DEBUG_PC_RSP_SOP2_CNT    12
+#define M_DEBUG_PC_RSP_SOP2_CNT    0xfU
+#define V_DEBUG_PC_RSP_SOP2_CNT(x) ((x) << S_DEBUG_PC_RSP_SOP2_CNT)
+#define G_DEBUG_PC_RSP_SOP2_CNT(x) (((x) >> S_DEBUG_PC_RSP_SOP2_CNT) & M_DEBUG_PC_RSP_SOP2_CNT)
+
+#define S_DEBUG_PC_RSP_EOP2_CNT    8
+#define M_DEBUG_PC_RSP_EOP2_CNT    0xfU
+#define V_DEBUG_PC_RSP_EOP2_CNT(x) ((x) << S_DEBUG_PC_RSP_EOP2_CNT)
+#define G_DEBUG_PC_RSP_EOP2_CNT(x) (((x) >> S_DEBUG_PC_RSP_EOP2_CNT) & M_DEBUG_PC_RSP_EOP2_CNT)
+
+#define S_DEBUG_PC_REQ_SOP2_CNT    4
+#define M_DEBUG_PC_REQ_SOP2_CNT    0xfU
+#define V_DEBUG_PC_REQ_SOP2_CNT(x) ((x) << S_DEBUG_PC_REQ_SOP2_CNT)
+#define G_DEBUG_PC_REQ_SOP2_CNT(x) (((x) >> S_DEBUG_PC_REQ_SOP2_CNT) & M_DEBUG_PC_REQ_SOP2_CNT)
+
+#define S_DEBUG_PC_REQ_EOP2_CNT    0
+#define M_DEBUG_PC_REQ_EOP2_CNT    0xfU
+#define V_DEBUG_PC_REQ_EOP2_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP2_CNT)
+#define G_DEBUG_PC_REQ_EOP2_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP2_CNT) & M_DEBUG_PC_REQ_EOP2_CNT)
+
+#define S_DEBUG_IDMA1_ISHIFT_TX_SIZE    8
+#define M_DEBUG_IDMA1_ISHIFT_TX_SIZE    0x7fU
+#define V_DEBUG_IDMA1_ISHIFT_TX_SIZE(x) ((x) << S_DEBUG_IDMA1_ISHIFT_TX_SIZE)
+#define G_DEBUG_IDMA1_ISHIFT_TX_SIZE(x) (((x) >> S_DEBUG_IDMA1_ISHIFT_TX_SIZE) & M_DEBUG_IDMA1_ISHIFT_TX_SIZE)
+
+#define S_DEBUG_IDMA0_ISHIFT_TX_SIZE    0
+#define M_DEBUG_IDMA0_ISHIFT_TX_SIZE    0x7fU
+#define V_DEBUG_IDMA0_ISHIFT_TX_SIZE(x) ((x) << S_DEBUG_IDMA0_ISHIFT_TX_SIZE)
+#define G_DEBUG_IDMA0_ISHIFT_TX_SIZE(x) (((x) >> S_DEBUG_IDMA0_ISHIFT_TX_SIZE) & M_DEBUG_IDMA0_ISHIFT_TX_SIZE)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_13 0x12b4
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_14 0x12b8
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_15 0x12bc
+#define A_SGE_DEBUG_DATA_LOW_INDEX_0 0x12c0
+
+#define S_DEBUG_ST_IDMA1_FLM_REQ    29
+#define M_DEBUG_ST_IDMA1_FLM_REQ    0x7U
+#define V_DEBUG_ST_IDMA1_FLM_REQ(x) ((x) << S_DEBUG_ST_IDMA1_FLM_REQ)
+#define G_DEBUG_ST_IDMA1_FLM_REQ(x) (((x) >> S_DEBUG_ST_IDMA1_FLM_REQ) & M_DEBUG_ST_IDMA1_FLM_REQ)
+
+#define S_DEBUG_ST_IDMA0_FLM_REQ    26
+#define M_DEBUG_ST_IDMA0_FLM_REQ    0x7U
+#define V_DEBUG_ST_IDMA0_FLM_REQ(x) ((x) << S_DEBUG_ST_IDMA0_FLM_REQ)
+#define G_DEBUG_ST_IDMA0_FLM_REQ(x) (((x) >> S_DEBUG_ST_IDMA0_FLM_REQ) & M_DEBUG_ST_IDMA0_FLM_REQ)
+
+#define S_DEBUG_ST_IMSG_CTXT    23
+#define M_DEBUG_ST_IMSG_CTXT    0x7U
+#define V_DEBUG_ST_IMSG_CTXT(x) ((x) << S_DEBUG_ST_IMSG_CTXT)
+#define G_DEBUG_ST_IMSG_CTXT(x) (((x) >> S_DEBUG_ST_IMSG_CTXT) & M_DEBUG_ST_IMSG_CTXT)
+
+#define S_DEBUG_ST_IMSG    18
+#define M_DEBUG_ST_IMSG    0x1fU
+#define V_DEBUG_ST_IMSG(x) ((x) << S_DEBUG_ST_IMSG)
+#define G_DEBUG_ST_IMSG(x) (((x) >> S_DEBUG_ST_IMSG) & M_DEBUG_ST_IMSG)
+
+#define S_DEBUG_ST_IDMA1_IALN    16
+#define M_DEBUG_ST_IDMA1_IALN    0x3U
+#define V_DEBUG_ST_IDMA1_IALN(x) ((x) << S_DEBUG_ST_IDMA1_IALN)
+#define G_DEBUG_ST_IDMA1_IALN(x) (((x) >> S_DEBUG_ST_IDMA1_IALN) & M_DEBUG_ST_IDMA1_IALN)
+
+#define S_DEBUG_ST_IDMA1_IDMA_SM    9
+#define M_DEBUG_ST_IDMA1_IDMA_SM    0x3fU
+#define V_DEBUG_ST_IDMA1_IDMA_SM(x) ((x) << S_DEBUG_ST_IDMA1_IDMA_SM)
+#define G_DEBUG_ST_IDMA1_IDMA_SM(x) (((x) >> S_DEBUG_ST_IDMA1_IDMA_SM) & M_DEBUG_ST_IDMA1_IDMA_SM)
+
+#define S_DEBUG_ST_IDMA0_IALN    7
+#define M_DEBUG_ST_IDMA0_IALN    0x3U
+#define V_DEBUG_ST_IDMA0_IALN(x) ((x) << S_DEBUG_ST_IDMA0_IALN)
+#define G_DEBUG_ST_IDMA0_IALN(x) (((x) >> S_DEBUG_ST_IDMA0_IALN) & M_DEBUG_ST_IDMA0_IALN)
+
+#define S_DEBUG_ST_IDMA0_IDMA_SM    0
+#define M_DEBUG_ST_IDMA0_IDMA_SM    0x3fU
+#define V_DEBUG_ST_IDMA0_IDMA_SM(x) ((x) << S_DEBUG_ST_IDMA0_IDMA_SM)
+#define G_DEBUG_ST_IDMA0_IDMA_SM(x) (((x) >> S_DEBUG_ST_IDMA0_IDMA_SM) & M_DEBUG_ST_IDMA0_IDMA_SM)
+
+#define S_DEBUG_ST_IDMA1_IDMA2IMSG    15
+#define V_DEBUG_ST_IDMA1_IDMA2IMSG(x) ((x) << S_DEBUG_ST_IDMA1_IDMA2IMSG)
+#define F_DEBUG_ST_IDMA1_IDMA2IMSG    V_DEBUG_ST_IDMA1_IDMA2IMSG(1U)
+
+#define S_DEBUG_ST_IDMA0_IDMA2IMSG    6
+#define V_DEBUG_ST_IDMA0_IDMA2IMSG(x) ((x) << S_DEBUG_ST_IDMA0_IDMA2IMSG)
+#define F_DEBUG_ST_IDMA0_IDMA2IMSG    V_DEBUG_ST_IDMA0_IDMA2IMSG(1U)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_1 0x12c4
+
+#define S_DEBUG_ITP_EMPTY    12
+#define M_DEBUG_ITP_EMPTY    0x3fU
+#define V_DEBUG_ITP_EMPTY(x) ((x) << S_DEBUG_ITP_EMPTY)
+#define G_DEBUG_ITP_EMPTY(x) (((x) >> S_DEBUG_ITP_EMPTY) & M_DEBUG_ITP_EMPTY)
+
+#define S_DEBUG_ITP_EXPIRED    6
+#define M_DEBUG_ITP_EXPIRED    0x3fU
+#define V_DEBUG_ITP_EXPIRED(x) ((x) << S_DEBUG_ITP_EXPIRED)
+#define G_DEBUG_ITP_EXPIRED(x) (((x) >> S_DEBUG_ITP_EXPIRED) & M_DEBUG_ITP_EXPIRED)
+
+#define S_DEBUG_ITP_PAUSE    5
+#define V_DEBUG_ITP_PAUSE(x) ((x) << S_DEBUG_ITP_PAUSE)
+#define F_DEBUG_ITP_PAUSE    V_DEBUG_ITP_PAUSE(1U)
+
+#define S_DEBUG_ITP_DEL_DONE    4
+#define V_DEBUG_ITP_DEL_DONE(x) ((x) << S_DEBUG_ITP_DEL_DONE)
+#define F_DEBUG_ITP_DEL_DONE    V_DEBUG_ITP_DEL_DONE(1U)
+
+#define S_DEBUG_ITP_ADD_DONE    3
+#define V_DEBUG_ITP_ADD_DONE(x) ((x) << S_DEBUG_ITP_ADD_DONE)
+#define F_DEBUG_ITP_ADD_DONE    V_DEBUG_ITP_ADD_DONE(1U)
+
+#define S_DEBUG_ITP_EVR_STATE    0
+#define M_DEBUG_ITP_EVR_STATE    0x7U
+#define V_DEBUG_ITP_EVR_STATE(x) ((x) << S_DEBUG_ITP_EVR_STATE)
+#define G_DEBUG_ITP_EVR_STATE(x) (((x) >> S_DEBUG_ITP_EVR_STATE) & M_DEBUG_ITP_EVR_STATE)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+
+#define S_DEBUG_ST_DBP_THREAD2_CIMFL    25
+#define M_DEBUG_ST_DBP_THREAD2_CIMFL    0x1fU
+#define V_DEBUG_ST_DBP_THREAD2_CIMFL(x) ((x) << S_DEBUG_ST_DBP_THREAD2_CIMFL)
+#define G_DEBUG_ST_DBP_THREAD2_CIMFL(x) (((x) >> S_DEBUG_ST_DBP_THREAD2_CIMFL) & M_DEBUG_ST_DBP_THREAD2_CIMFL)
+
+#define S_DEBUG_ST_DBP_THREAD2_MAIN    20
+#define M_DEBUG_ST_DBP_THREAD2_MAIN    0x1fU
+#define V_DEBUG_ST_DBP_THREAD2_MAIN(x) ((x) << S_DEBUG_ST_DBP_THREAD2_MAIN)
+#define G_DEBUG_ST_DBP_THREAD2_MAIN(x) (((x) >> S_DEBUG_ST_DBP_THREAD2_MAIN) & M_DEBUG_ST_DBP_THREAD2_MAIN)
+
+#define S_DEBUG_ST_DBP_THREAD1_CIMFL    15
+#define M_DEBUG_ST_DBP_THREAD1_CIMFL    0x1fU
+#define V_DEBUG_ST_DBP_THREAD1_CIMFL(x) ((x) << S_DEBUG_ST_DBP_THREAD1_CIMFL)
+#define G_DEBUG_ST_DBP_THREAD1_CIMFL(x) (((x) >> S_DEBUG_ST_DBP_THREAD1_CIMFL) & M_DEBUG_ST_DBP_THREAD1_CIMFL)
+
+#define S_DEBUG_ST_DBP_THREAD1_MAIN    10
+#define M_DEBUG_ST_DBP_THREAD1_MAIN    0x1fU
+#define V_DEBUG_ST_DBP_THREAD1_MAIN(x) ((x) << S_DEBUG_ST_DBP_THREAD1_MAIN)
+#define G_DEBUG_ST_DBP_THREAD1_MAIN(x) (((x) >> S_DEBUG_ST_DBP_THREAD1_MAIN) & M_DEBUG_ST_DBP_THREAD1_MAIN)
+
+#define S_DEBUG_ST_DBP_THREAD0_CIMFL    5
+#define M_DEBUG_ST_DBP_THREAD0_CIMFL    0x1fU
+#define V_DEBUG_ST_DBP_THREAD0_CIMFL(x) ((x) << S_DEBUG_ST_DBP_THREAD0_CIMFL)
+#define G_DEBUG_ST_DBP_THREAD0_CIMFL(x) (((x) >> S_DEBUG_ST_DBP_THREAD0_CIMFL) & M_DEBUG_ST_DBP_THREAD0_CIMFL)
+
+#define S_DEBUG_ST_DBP_THREAD0_MAIN    0
+#define M_DEBUG_ST_DBP_THREAD0_MAIN    0x1fU
+#define V_DEBUG_ST_DBP_THREAD0_MAIN(x) ((x) << S_DEBUG_ST_DBP_THREAD0_MAIN)
+#define G_DEBUG_ST_DBP_THREAD0_MAIN(x) (((x) >> S_DEBUG_ST_DBP_THREAD0_MAIN) & M_DEBUG_ST_DBP_THREAD0_MAIN)
+
+#define S_T6_DEBUG_ST_DBP_UPCP_MAIN    14
+#define M_T6_DEBUG_ST_DBP_UPCP_MAIN    0x7U
+#define V_T6_DEBUG_ST_DBP_UPCP_MAIN(x) ((x) << S_T6_DEBUG_ST_DBP_UPCP_MAIN)
+#define G_T6_DEBUG_ST_DBP_UPCP_MAIN(x) (((x) >> S_T6_DEBUG_ST_DBP_UPCP_MAIN) & M_T6_DEBUG_ST_DBP_UPCP_MAIN)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+
+#define S_DEBUG_ST_DBP_UPCP_MAIN    14
+#define M_DEBUG_ST_DBP_UPCP_MAIN    0x1fU
+#define V_DEBUG_ST_DBP_UPCP_MAIN(x) ((x) << S_DEBUG_ST_DBP_UPCP_MAIN)
+#define G_DEBUG_ST_DBP_UPCP_MAIN(x) (((x) >> S_DEBUG_ST_DBP_UPCP_MAIN) & M_DEBUG_ST_DBP_UPCP_MAIN)
+
+#define S_DEBUG_ST_DBP_DBFIFO_MAIN    13
+#define V_DEBUG_ST_DBP_DBFIFO_MAIN(x) ((x) << S_DEBUG_ST_DBP_DBFIFO_MAIN)
+#define F_DEBUG_ST_DBP_DBFIFO_MAIN    V_DEBUG_ST_DBP_DBFIFO_MAIN(1U)
+
+#define S_DEBUG_ST_DBP_CTXT    10
+#define M_DEBUG_ST_DBP_CTXT    0x7U
+#define V_DEBUG_ST_DBP_CTXT(x) ((x) << S_DEBUG_ST_DBP_CTXT)
+#define G_DEBUG_ST_DBP_CTXT(x) (((x) >> S_DEBUG_ST_DBP_CTXT) & M_DEBUG_ST_DBP_CTXT)
+
+#define S_DEBUG_ST_DBP_THREAD3_CIMFL    5
+#define M_DEBUG_ST_DBP_THREAD3_CIMFL    0x1fU
+#define V_DEBUG_ST_DBP_THREAD3_CIMFL(x) ((x) << S_DEBUG_ST_DBP_THREAD3_CIMFL)
+#define G_DEBUG_ST_DBP_THREAD3_CIMFL(x) (((x) >> S_DEBUG_ST_DBP_THREAD3_CIMFL) & M_DEBUG_ST_DBP_THREAD3_CIMFL)
+
+#define S_DEBUG_ST_DBP_THREAD3_MAIN    0
+#define M_DEBUG_ST_DBP_THREAD3_MAIN    0x1fU
+#define V_DEBUG_ST_DBP_THREAD3_MAIN(x) ((x) << S_DEBUG_ST_DBP_THREAD3_MAIN)
+#define G_DEBUG_ST_DBP_THREAD3_MAIN(x) (((x) >> S_DEBUG_ST_DBP_THREAD3_MAIN) & M_DEBUG_ST_DBP_THREAD3_MAIN)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_4 0x12d0
+
+#define S_DEBUG_ST_EDMA3_ALIGN_SUB    29
+#define M_DEBUG_ST_EDMA3_ALIGN_SUB    0x7U
+#define V_DEBUG_ST_EDMA3_ALIGN_SUB(x) ((x) << S_DEBUG_ST_EDMA3_ALIGN_SUB)
+#define G_DEBUG_ST_EDMA3_ALIGN_SUB(x) (((x) >> S_DEBUG_ST_EDMA3_ALIGN_SUB) & M_DEBUG_ST_EDMA3_ALIGN_SUB)
+
+#define S_DEBUG_ST_EDMA3_ALIGN    27
+#define M_DEBUG_ST_EDMA3_ALIGN    0x3U
+#define V_DEBUG_ST_EDMA3_ALIGN(x) ((x) << S_DEBUG_ST_EDMA3_ALIGN)
+#define G_DEBUG_ST_EDMA3_ALIGN(x) (((x) >> S_DEBUG_ST_EDMA3_ALIGN) & M_DEBUG_ST_EDMA3_ALIGN)
+
+#define S_DEBUG_ST_EDMA3_REQ    24
+#define M_DEBUG_ST_EDMA3_REQ    0x7U
+#define V_DEBUG_ST_EDMA3_REQ(x) ((x) << S_DEBUG_ST_EDMA3_REQ)
+#define G_DEBUG_ST_EDMA3_REQ(x) (((x) >> S_DEBUG_ST_EDMA3_REQ) & M_DEBUG_ST_EDMA3_REQ)
+
+#define S_DEBUG_ST_EDMA2_ALIGN_SUB    21
+#define M_DEBUG_ST_EDMA2_ALIGN_SUB    0x7U
+#define V_DEBUG_ST_EDMA2_ALIGN_SUB(x) ((x) << S_DEBUG_ST_EDMA2_ALIGN_SUB)
+#define G_DEBUG_ST_EDMA2_ALIGN_SUB(x) (((x) >> S_DEBUG_ST_EDMA2_ALIGN_SUB) & M_DEBUG_ST_EDMA2_ALIGN_SUB)
+
+#define S_DEBUG_ST_EDMA2_ALIGN    19
+#define M_DEBUG_ST_EDMA2_ALIGN    0x3U
+#define V_DEBUG_ST_EDMA2_ALIGN(x) ((x) << S_DEBUG_ST_EDMA2_ALIGN)
+#define G_DEBUG_ST_EDMA2_ALIGN(x) (((x) >> S_DEBUG_ST_EDMA2_ALIGN) & M_DEBUG_ST_EDMA2_ALIGN)
+
+#define S_DEBUG_ST_EDMA2_REQ    16
+#define M_DEBUG_ST_EDMA2_REQ    0x7U
+#define V_DEBUG_ST_EDMA2_REQ(x) ((x) << S_DEBUG_ST_EDMA2_REQ)
+#define G_DEBUG_ST_EDMA2_REQ(x) (((x) >> S_DEBUG_ST_EDMA2_REQ) & M_DEBUG_ST_EDMA2_REQ)
+
+#define S_DEBUG_ST_EDMA1_ALIGN_SUB    13
+#define M_DEBUG_ST_EDMA1_ALIGN_SUB    0x7U
+#define V_DEBUG_ST_EDMA1_ALIGN_SUB(x) ((x) << S_DEBUG_ST_EDMA1_ALIGN_SUB)
+#define G_DEBUG_ST_EDMA1_ALIGN_SUB(x) (((x) >> S_DEBUG_ST_EDMA1_ALIGN_SUB) & M_DEBUG_ST_EDMA1_ALIGN_SUB)
+
+#define S_DEBUG_ST_EDMA1_ALIGN    11
+#define M_DEBUG_ST_EDMA1_ALIGN    0x3U
+#define V_DEBUG_ST_EDMA1_ALIGN(x) ((x) << S_DEBUG_ST_EDMA1_ALIGN)
+#define G_DEBUG_ST_EDMA1_ALIGN(x) (((x) >> S_DEBUG_ST_EDMA1_ALIGN) & M_DEBUG_ST_EDMA1_ALIGN)
+
+#define S_DEBUG_ST_EDMA1_REQ    8
+#define M_DEBUG_ST_EDMA1_REQ    0x7U
+#define V_DEBUG_ST_EDMA1_REQ(x) ((x) << S_DEBUG_ST_EDMA1_REQ)
+#define G_DEBUG_ST_EDMA1_REQ(x) (((x) >> S_DEBUG_ST_EDMA1_REQ) & M_DEBUG_ST_EDMA1_REQ)
+
+#define S_DEBUG_ST_EDMA0_ALIGN_SUB    5
+#define M_DEBUG_ST_EDMA0_ALIGN_SUB    0x7U
+#define V_DEBUG_ST_EDMA0_ALIGN_SUB(x) ((x) << S_DEBUG_ST_EDMA0_ALIGN_SUB)
+#define G_DEBUG_ST_EDMA0_ALIGN_SUB(x) (((x) >> S_DEBUG_ST_EDMA0_ALIGN_SUB) & M_DEBUG_ST_EDMA0_ALIGN_SUB)
+
+#define S_DEBUG_ST_EDMA0_ALIGN    3
+#define M_DEBUG_ST_EDMA0_ALIGN    0x3U
+#define V_DEBUG_ST_EDMA0_ALIGN(x) ((x) << S_DEBUG_ST_EDMA0_ALIGN)
+#define G_DEBUG_ST_EDMA0_ALIGN(x) (((x) >> S_DEBUG_ST_EDMA0_ALIGN) & M_DEBUG_ST_EDMA0_ALIGN)
+
+#define S_DEBUG_ST_EDMA0_REQ    0
+#define M_DEBUG_ST_EDMA0_REQ    0x7U
+#define V_DEBUG_ST_EDMA0_REQ(x) ((x) << S_DEBUG_ST_EDMA0_REQ)
+#define G_DEBUG_ST_EDMA0_REQ(x) (((x) >> S_DEBUG_ST_EDMA0_REQ) & M_DEBUG_ST_EDMA0_REQ)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_5 0x12d4
+
+#define S_DEBUG_ST_FLM_DBPTR    30
+#define M_DEBUG_ST_FLM_DBPTR    0x3U
+#define V_DEBUG_ST_FLM_DBPTR(x) ((x) << S_DEBUG_ST_FLM_DBPTR)
+#define G_DEBUG_ST_FLM_DBPTR(x) (((x) >> S_DEBUG_ST_FLM_DBPTR) & M_DEBUG_ST_FLM_DBPTR)
+
+#define S_DEBUG_FLM_CACHE_LOCKED_COUNT    23
+#define M_DEBUG_FLM_CACHE_LOCKED_COUNT    0x7fU
+#define V_DEBUG_FLM_CACHE_LOCKED_COUNT(x) ((x) << S_DEBUG_FLM_CACHE_LOCKED_COUNT)
+#define G_DEBUG_FLM_CACHE_LOCKED_COUNT(x) (((x) >> S_DEBUG_FLM_CACHE_LOCKED_COUNT) & M_DEBUG_FLM_CACHE_LOCKED_COUNT)
+
+#define S_DEBUG_FLM_CACHE_AGENT    20
+#define M_DEBUG_FLM_CACHE_AGENT    0x7U
+#define V_DEBUG_FLM_CACHE_AGENT(x) ((x) << S_DEBUG_FLM_CACHE_AGENT)
+#define G_DEBUG_FLM_CACHE_AGENT(x) (((x) >> S_DEBUG_FLM_CACHE_AGENT) & M_DEBUG_FLM_CACHE_AGENT)
+
+#define S_DEBUG_ST_FLM_CACHE    16
+#define M_DEBUG_ST_FLM_CACHE    0xfU
+#define V_DEBUG_ST_FLM_CACHE(x) ((x) << S_DEBUG_ST_FLM_CACHE)
+#define G_DEBUG_ST_FLM_CACHE(x) (((x) >> S_DEBUG_ST_FLM_CACHE) & M_DEBUG_ST_FLM_CACHE)
+
+#define S_DEBUG_FLM_DBPTR_CIDX_STALL    12
+#define V_DEBUG_FLM_DBPTR_CIDX_STALL(x) ((x) << S_DEBUG_FLM_DBPTR_CIDX_STALL)
+#define F_DEBUG_FLM_DBPTR_CIDX_STALL    V_DEBUG_FLM_DBPTR_CIDX_STALL(1U)
+
+#define S_DEBUG_FLM_DBPTR_QID    0
+#define M_DEBUG_FLM_DBPTR_QID    0xfffU
+#define V_DEBUG_FLM_DBPTR_QID(x) ((x) << S_DEBUG_FLM_DBPTR_QID)
+#define G_DEBUG_FLM_DBPTR_QID(x) (((x) >> S_DEBUG_FLM_DBPTR_QID) & M_DEBUG_FLM_DBPTR_QID)
+
+#define A_SGE_DEBUG0_DBP_THREAD 0x12d4
+
+#define S_THREAD_ST_MAIN    25
+#define M_THREAD_ST_MAIN    0x3fU
+#define V_THREAD_ST_MAIN(x) ((x) << S_THREAD_ST_MAIN)
+#define G_THREAD_ST_MAIN(x) (((x) >> S_THREAD_ST_MAIN) & M_THREAD_ST_MAIN)
+
+#define S_THREAD_ST_CIMFL    21
+#define M_THREAD_ST_CIMFL    0xfU
+#define V_THREAD_ST_CIMFL(x) ((x) << S_THREAD_ST_CIMFL)
+#define G_THREAD_ST_CIMFL(x) (((x) >> S_THREAD_ST_CIMFL) & M_THREAD_ST_CIMFL)
+
+#define S_THREAD_CMDOP    17
+#define M_THREAD_CMDOP    0xfU
+#define V_THREAD_CMDOP(x) ((x) << S_THREAD_CMDOP)
+#define G_THREAD_CMDOP(x) (((x) >> S_THREAD_CMDOP) & M_THREAD_CMDOP)
+
+#define S_THREAD_QID    0
+#define M_THREAD_QID    0x1ffffU
+#define V_THREAD_QID(x) ((x) << S_THREAD_QID)
+#define G_THREAD_QID(x) (((x) >> S_THREAD_QID) & M_THREAD_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_6 0x12d8
+
+#define S_DEBUG_DBP_THREAD0_QID    0
+#define M_DEBUG_DBP_THREAD0_QID    0x1ffffU
+#define V_DEBUG_DBP_THREAD0_QID(x) ((x) << S_DEBUG_DBP_THREAD0_QID)
+#define G_DEBUG_DBP_THREAD0_QID(x) (((x) >> S_DEBUG_DBP_THREAD0_QID) & M_DEBUG_DBP_THREAD0_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_7 0x12dc
+
+#define S_DEBUG_DBP_THREAD1_QID    0
+#define M_DEBUG_DBP_THREAD1_QID    0x1ffffU
+#define V_DEBUG_DBP_THREAD1_QID(x) ((x) << S_DEBUG_DBP_THREAD1_QID)
+#define G_DEBUG_DBP_THREAD1_QID(x) (((x) >> S_DEBUG_DBP_THREAD1_QID) & M_DEBUG_DBP_THREAD1_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_8 0x12e0
+
+#define S_DEBUG_DBP_THREAD2_QID    0
+#define M_DEBUG_DBP_THREAD2_QID    0x1ffffU
+#define V_DEBUG_DBP_THREAD2_QID(x) ((x) << S_DEBUG_DBP_THREAD2_QID)
+#define G_DEBUG_DBP_THREAD2_QID(x) (((x) >> S_DEBUG_DBP_THREAD2_QID) & M_DEBUG_DBP_THREAD2_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_9 0x12e4
+
+#define S_DEBUG_DBP_THREAD3_QID    0
+#define M_DEBUG_DBP_THREAD3_QID    0x1ffffU
+#define V_DEBUG_DBP_THREAD3_QID(x) ((x) << S_DEBUG_DBP_THREAD3_QID)
+#define G_DEBUG_DBP_THREAD3_QID(x) (((x) >> S_DEBUG_DBP_THREAD3_QID) & M_DEBUG_DBP_THREAD3_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_10 0x12e8
+
+#define S_DEBUG_IMSG_CPL    16
+#define M_DEBUG_IMSG_CPL    0xffU
+#define V_DEBUG_IMSG_CPL(x) ((x) << S_DEBUG_IMSG_CPL)
+#define G_DEBUG_IMSG_CPL(x) (((x) >> S_DEBUG_IMSG_CPL) & M_DEBUG_IMSG_CPL)
+
+#define S_DEBUG_IMSG_QID    0
+#define M_DEBUG_IMSG_QID    0xffffU
+#define V_DEBUG_IMSG_QID(x) ((x) << S_DEBUG_IMSG_QID)
+#define G_DEBUG_IMSG_QID(x) (((x) >> S_DEBUG_IMSG_QID) & M_DEBUG_IMSG_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_11 0x12ec
+
+#define S_DEBUG_IDMA1_QID    16
+#define M_DEBUG_IDMA1_QID    0xffffU
+#define V_DEBUG_IDMA1_QID(x) ((x) << S_DEBUG_IDMA1_QID)
+#define G_DEBUG_IDMA1_QID(x) (((x) >> S_DEBUG_IDMA1_QID) & M_DEBUG_IDMA1_QID)
+
+#define S_DEBUG_IDMA0_QID    0
+#define M_DEBUG_IDMA0_QID    0xffffU
+#define V_DEBUG_IDMA0_QID(x) ((x) << S_DEBUG_IDMA0_QID)
+#define G_DEBUG_IDMA0_QID(x) (((x) >> S_DEBUG_IDMA0_QID) & M_DEBUG_IDMA0_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_12 0x12f0
+
+#define S_DEBUG_IDMA1_FLM_REQ_QID    16
+#define M_DEBUG_IDMA1_FLM_REQ_QID    0xffffU
+#define V_DEBUG_IDMA1_FLM_REQ_QID(x) ((x) << S_DEBUG_IDMA1_FLM_REQ_QID)
+#define G_DEBUG_IDMA1_FLM_REQ_QID(x) (((x) >> S_DEBUG_IDMA1_FLM_REQ_QID) & M_DEBUG_IDMA1_FLM_REQ_QID)
+
+#define S_DEBUG_IDMA0_FLM_REQ_QID    0
+#define M_DEBUG_IDMA0_FLM_REQ_QID    0xffffU
+#define V_DEBUG_IDMA0_FLM_REQ_QID(x) ((x) << S_DEBUG_IDMA0_FLM_REQ_QID)
+#define G_DEBUG_IDMA0_FLM_REQ_QID(x) (((x) >> S_DEBUG_IDMA0_FLM_REQ_QID) & M_DEBUG_IDMA0_FLM_REQ_QID)
+
+#define A_SGE_DEBUG_DATA_LOW_INDEX_13 0x12f4
+#define A_SGE_DEBUG_DATA_LOW_INDEX_14 0x12f8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_15 0x12fc
 #define A_SGE_QUEUE_BASE_MAP_HIGH 0x1300
 
 #define S_EGRESS_LOG2SIZE    27
@@ -1468,6 +3877,52 @@
 #define V_INGRESS1_LOG2SIZE(x) ((x) << S_INGRESS1_LOG2SIZE)
 #define G_INGRESS1_LOG2SIZE(x) (((x) >> S_INGRESS1_LOG2SIZE) & M_INGRESS1_LOG2SIZE)
 
+#define S_EGRESS_SIZE    27
+#define M_EGRESS_SIZE    0x1fU
+#define V_EGRESS_SIZE(x) ((x) << S_EGRESS_SIZE)
+#define G_EGRESS_SIZE(x) (((x) >> S_EGRESS_SIZE) & M_EGRESS_SIZE)
+
+#define S_INGRESS2_SIZE    5
+#define M_INGRESS2_SIZE    0x1fU
+#define V_INGRESS2_SIZE(x) ((x) << S_INGRESS2_SIZE)
+#define G_INGRESS2_SIZE(x) (((x) >> S_INGRESS2_SIZE) & M_INGRESS2_SIZE)
+
+#define S_INGRESS1_SIZE    0
+#define M_INGRESS1_SIZE    0x1fU
+#define V_INGRESS1_SIZE(x) ((x) << S_INGRESS1_SIZE)
+#define G_INGRESS1_SIZE(x) (((x) >> S_INGRESS1_SIZE) & M_INGRESS1_SIZE)
+
+#define A_SGE_WC_EGRS_BAR2_OFF_PF 0x1300
+
+#define S_PFIQSPERPAGE    28
+#define M_PFIQSPERPAGE    0xfU
+#define V_PFIQSPERPAGE(x) ((x) << S_PFIQSPERPAGE)
+#define G_PFIQSPERPAGE(x) (((x) >> S_PFIQSPERPAGE) & M_PFIQSPERPAGE)
+
+#define S_PFEQSPERPAGE    24
+#define M_PFEQSPERPAGE    0xfU
+#define V_PFEQSPERPAGE(x) ((x) << S_PFEQSPERPAGE)
+#define G_PFEQSPERPAGE(x) (((x) >> S_PFEQSPERPAGE) & M_PFEQSPERPAGE)
+
+#define S_PFWCQSPERPAGE    20
+#define M_PFWCQSPERPAGE    0xfU
+#define V_PFWCQSPERPAGE(x) ((x) << S_PFWCQSPERPAGE)
+#define G_PFWCQSPERPAGE(x) (((x) >> S_PFWCQSPERPAGE) & M_PFWCQSPERPAGE)
+
+#define S_PFWCOFFEN    19
+#define V_PFWCOFFEN(x) ((x) << S_PFWCOFFEN)
+#define F_PFWCOFFEN    V_PFWCOFFEN(1U)
+
+#define S_PFMAXWCSIZE    17
+#define M_PFMAXWCSIZE    0x3U
+#define V_PFMAXWCSIZE(x) ((x) << S_PFMAXWCSIZE)
+#define G_PFMAXWCSIZE(x) (((x) >> S_PFMAXWCSIZE) & M_PFMAXWCSIZE)
+
+#define S_PFWCOFFSET    0
+#define M_PFWCOFFSET    0x1ffffU
+#define V_PFWCOFFSET(x) ((x) << S_PFWCOFFSET)
+#define G_PFWCOFFSET(x) (((x) >> S_PFWCOFFSET) & M_PFWCOFFSET)
+
 #define A_SGE_QUEUE_BASE_MAP_LOW 0x1304
 
 #define S_INGRESS2_BASE    16
@@ -1480,6 +3935,37 @@
 #define V_INGRESS1_BASE(x) ((x) << S_INGRESS1_BASE)
 #define G_INGRESS1_BASE(x) (((x) >> S_INGRESS1_BASE) & M_INGRESS1_BASE)
 
+#define A_SGE_WC_EGRS_BAR2_OFF_VF 0x1320
+
+#define S_VFIQSPERPAGE    28
+#define M_VFIQSPERPAGE    0xfU
+#define V_VFIQSPERPAGE(x) ((x) << S_VFIQSPERPAGE)
+#define G_VFIQSPERPAGE(x) (((x) >> S_VFIQSPERPAGE) & M_VFIQSPERPAGE)
+
+#define S_VFEQSPERPAGE    24
+#define M_VFEQSPERPAGE    0xfU
+#define V_VFEQSPERPAGE(x) ((x) << S_VFEQSPERPAGE)
+#define G_VFEQSPERPAGE(x) (((x) >> S_VFEQSPERPAGE) & M_VFEQSPERPAGE)
+
+#define S_VFWCQSPERPAGE    20
+#define M_VFWCQSPERPAGE    0xfU
+#define V_VFWCQSPERPAGE(x) ((x) << S_VFWCQSPERPAGE)
+#define G_VFWCQSPERPAGE(x) (((x) >> S_VFWCQSPERPAGE) & M_VFWCQSPERPAGE)
+
+#define S_VFWCOFFEN    19
+#define V_VFWCOFFEN(x) ((x) << S_VFWCOFFEN)
+#define F_VFWCOFFEN    V_VFWCOFFEN(1U)
+
+#define S_VFMAXWCSIZE    17
+#define M_VFMAXWCSIZE    0x3U
+#define V_VFMAXWCSIZE(x) ((x) << S_VFMAXWCSIZE)
+#define G_VFMAXWCSIZE(x) (((x) >> S_VFMAXWCSIZE) & M_VFMAXWCSIZE)
+
+#define S_VFWCOFFSET    0
+#define M_VFWCOFFSET    0x1ffffU
+#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET)
+#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET)
+
 #define A_SGE_LA_RDPTR_0 0x1800
 #define A_SGE_LA_RDDATA_0 0x1804
 #define A_SGE_LA_WRPTR_0 0x1808
@@ -1721,6 +4207,94 @@
 #define V_MSIADDRLPERR(x) ((x) << S_MSIADDRLPERR)
 #define F_MSIADDRLPERR    V_MSIADDRLPERR(1U)
 
+#define S_IPGRPPERR    31
+#define V_IPGRPPERR(x) ((x) << S_IPGRPPERR)
+#define F_IPGRPPERR    V_IPGRPPERR(1U)
+
+#define S_READRSPERR    29
+#define V_READRSPERR(x) ((x) << S_READRSPERR)
+#define F_READRSPERR    V_READRSPERR(1U)
+
+#define S_TRGT1GRPPERR    28
+#define V_TRGT1GRPPERR(x) ((x) << S_TRGT1GRPPERR)
+#define F_TRGT1GRPPERR    V_TRGT1GRPPERR(1U)
+
+#define S_IPSOTPERR    27
+#define V_IPSOTPERR(x) ((x) << S_IPSOTPERR)
+#define F_IPSOTPERR    V_IPSOTPERR(1U)
+
+#define S_IPRETRYPERR    26
+#define V_IPRETRYPERR(x) ((x) << S_IPRETRYPERR)
+#define F_IPRETRYPERR    V_IPRETRYPERR(1U)
+
+#define S_IPRXDATAGRPPERR    25
+#define V_IPRXDATAGRPPERR(x) ((x) << S_IPRXDATAGRPPERR)
+#define F_IPRXDATAGRPPERR    V_IPRXDATAGRPPERR(1U)
+
+#define S_IPRXHDRGRPPERR    24
+#define V_IPRXHDRGRPPERR(x) ((x) << S_IPRXHDRGRPPERR)
+#define F_IPRXHDRGRPPERR    V_IPRXHDRGRPPERR(1U)
+
+#define S_PIOTAGQPERR    23
+#define V_PIOTAGQPERR(x) ((x) << S_PIOTAGQPERR)
+#define F_PIOTAGQPERR    V_PIOTAGQPERR(1U)
+
+#define S_MAGRPPERR    22
+#define V_MAGRPPERR(x) ((x) << S_MAGRPPERR)
+#define F_MAGRPPERR    V_MAGRPPERR(1U)
+
+#define S_VFIDPERR    21
+#define V_VFIDPERR(x) ((x) << S_VFIDPERR)
+#define F_VFIDPERR    V_VFIDPERR(1U)
+
+#define S_HREQRDPERR    17
+#define V_HREQRDPERR(x) ((x) << S_HREQRDPERR)
+#define F_HREQRDPERR    V_HREQRDPERR(1U)
+
+#define S_HREQWRPERR    16
+#define V_HREQWRPERR(x) ((x) << S_HREQWRPERR)
+#define F_HREQWRPERR    V_HREQWRPERR(1U)
+
+#define S_DREQRDPERR    14
+#define V_DREQRDPERR(x) ((x) << S_DREQRDPERR)
+#define F_DREQRDPERR    V_DREQRDPERR(1U)
+
+#define S_DREQWRPERR    13
+#define V_DREQWRPERR(x) ((x) << S_DREQWRPERR)
+#define F_DREQWRPERR    V_DREQWRPERR(1U)
+
+#define S_CREQRDPERR    11
+#define V_CREQRDPERR(x) ((x) << S_CREQRDPERR)
+#define F_CREQRDPERR    V_CREQRDPERR(1U)
+
+#define S_MSTTAGQPERR    10
+#define V_MSTTAGQPERR(x) ((x) << S_MSTTAGQPERR)
+#define F_MSTTAGQPERR    V_MSTTAGQPERR(1U)
+
+#define S_TGTTAGQPERR    9
+#define V_TGTTAGQPERR(x) ((x) << S_TGTTAGQPERR)
+#define F_TGTTAGQPERR    V_TGTTAGQPERR(1U)
+
+#define S_PIOREQGRPPERR    8
+#define V_PIOREQGRPPERR(x) ((x) << S_PIOREQGRPPERR)
+#define F_PIOREQGRPPERR    V_PIOREQGRPPERR(1U)
+
+#define S_PIOCPLGRPPERR    7
+#define V_PIOCPLGRPPERR(x) ((x) << S_PIOCPLGRPPERR)
+#define F_PIOCPLGRPPERR    V_PIOCPLGRPPERR(1U)
+
+#define S_MSIXSTIPERR    2
+#define V_MSIXSTIPERR(x) ((x) << S_MSIXSTIPERR)
+#define F_MSIXSTIPERR    V_MSIXSTIPERR(1U)
+
+#define S_MSTTIMEOUTPERR    1
+#define V_MSTTIMEOUTPERR(x) ((x) << S_MSTTIMEOUTPERR)
+#define F_MSTTIMEOUTPERR    V_MSTTIMEOUTPERR(1U)
+
+#define S_MSTGRPPERR    0
+#define V_MSTGRPPERR(x) ((x) << S_MSTGRPPERR)
+#define F_MSTGRPPERR    V_MSTGRPPERR(1U)
+
 #define A_PCIE_INT_CAUSE 0x3004
 #define A_PCIE_PERR_ENABLE 0x3008
 #define A_PCIE_PERR_INJECT 0x300c
@@ -1729,6 +4303,11 @@
 #define V_IDE(x) ((x) << S_IDE)
 #define F_IDE    V_IDE(1U)
 
+#define S_MEMSEL_PCIE    1
+#define M_MEMSEL_PCIE    0x1fU
+#define V_MEMSEL_PCIE(x) ((x) << S_MEMSEL_PCIE)
+#define G_MEMSEL_PCIE(x) (((x) >> S_MEMSEL_PCIE) & M_MEMSEL_PCIE)
+
 #define A_PCIE_NONFAT_ERR 0x3010
 
 #define S_RDRSPERR    9
@@ -1771,6 +4350,98 @@
 #define V_CFGSNP(x) ((x) << S_CFGSNP)
 #define F_CFGSNP    V_CFGSNP(1U)
 
+#define S_MAREQTIMEOUT    29
+#define V_MAREQTIMEOUT(x) ((x) << S_MAREQTIMEOUT)
+#define F_MAREQTIMEOUT    V_MAREQTIMEOUT(1U)
+
+#define S_TRGT1BARTYPEERR    28
+#define V_TRGT1BARTYPEERR(x) ((x) << S_TRGT1BARTYPEERR)
+#define F_TRGT1BARTYPEERR    V_TRGT1BARTYPEERR(1U)
+
+#define S_MAEXTRARSPERR    27
+#define V_MAEXTRARSPERR(x) ((x) << S_MAEXTRARSPERR)
+#define F_MAEXTRARSPERR    V_MAEXTRARSPERR(1U)
+
+#define S_MARSPTIMEOUT    26
+#define V_MARSPTIMEOUT(x) ((x) << S_MARSPTIMEOUT)
+#define F_MARSPTIMEOUT    V_MARSPTIMEOUT(1U)
+
+#define S_INTVFALLMSIDISERR    25
+#define V_INTVFALLMSIDISERR(x) ((x) << S_INTVFALLMSIDISERR)
+#define F_INTVFALLMSIDISERR    V_INTVFALLMSIDISERR(1U)
+
+#define S_INTVFRANGEERR    24
+#define V_INTVFRANGEERR(x) ((x) << S_INTVFRANGEERR)
+#define F_INTVFRANGEERR    V_INTVFRANGEERR(1U)
+
+#define S_INTPLIRSPERR    23
+#define V_INTPLIRSPERR(x) ((x) << S_INTPLIRSPERR)
+#define F_INTPLIRSPERR    V_INTPLIRSPERR(1U)
+
+#define S_MEMREQRDTAGERR    22
+#define V_MEMREQRDTAGERR(x) ((x) << S_MEMREQRDTAGERR)
+#define F_MEMREQRDTAGERR    V_MEMREQRDTAGERR(1U)
+
+#define S_CFGINITDONEERR    21
+#define V_CFGINITDONEERR(x) ((x) << S_CFGINITDONEERR)
+#define F_CFGINITDONEERR    V_CFGINITDONEERR(1U)
+
+#define S_BAR2TIMEOUT    20
+#define V_BAR2TIMEOUT(x) ((x) << S_BAR2TIMEOUT)
+#define F_BAR2TIMEOUT    V_BAR2TIMEOUT(1U)
+
+#define S_VPDTIMEOUT    19
+#define V_VPDTIMEOUT(x) ((x) << S_VPDTIMEOUT)
+#define F_VPDTIMEOUT    V_VPDTIMEOUT(1U)
+
+#define S_MEMRSPRDTAGERR    18
+#define V_MEMRSPRDTAGERR(x) ((x) << S_MEMRSPRDTAGERR)
+#define F_MEMRSPRDTAGERR    V_MEMRSPRDTAGERR(1U)
+
+#define S_MEMRSPWRTAGERR    17
+#define V_MEMRSPWRTAGERR(x) ((x) << S_MEMRSPWRTAGERR)
+#define F_MEMRSPWRTAGERR    V_MEMRSPWRTAGERR(1U)
+
+#define S_PIORSPRDTAGERR    16
+#define V_PIORSPRDTAGERR(x) ((x) << S_PIORSPRDTAGERR)
+#define F_PIORSPRDTAGERR    V_PIORSPRDTAGERR(1U)
+
+#define S_PIORSPWRTAGERR    15
+#define V_PIORSPWRTAGERR(x) ((x) << S_PIORSPWRTAGERR)
+#define F_PIORSPWRTAGERR    V_PIORSPWRTAGERR(1U)
+
+#define S_DBITIMEOUT    14
+#define V_DBITIMEOUT(x) ((x) << S_DBITIMEOUT)
+#define F_DBITIMEOUT    V_DBITIMEOUT(1U)
+
+#define S_PIOUNALINDWR    13
+#define V_PIOUNALINDWR(x) ((x) << S_PIOUNALINDWR)
+#define F_PIOUNALINDWR    V_PIOUNALINDWR(1U)
+
+#define S_BAR2RDERR    12
+#define V_BAR2RDERR(x) ((x) << S_BAR2RDERR)
+#define F_BAR2RDERR    V_BAR2RDERR(1U)
+
+#define S_MAWREOPERR    11
+#define V_MAWREOPERR(x) ((x) << S_MAWREOPERR)
+#define F_MAWREOPERR    V_MAWREOPERR(1U)
+
+#define S_MARDEOPERR    10
+#define V_MARDEOPERR(x) ((x) << S_MARDEOPERR)
+#define F_MARDEOPERR    V_MARDEOPERR(1U)
+
+#define S_BAR2REQ    2
+#define V_BAR2REQ(x) ((x) << S_BAR2REQ)
+#define F_BAR2REQ    V_BAR2REQ(1U)
+
+#define S_MARSPUE    30
+#define V_MARSPUE(x) ((x) << S_MARSPUE)
+#define F_MARSPUE    V_MARSPUE(1U)
+
+#define S_KDBEOPERR    7
+#define V_KDBEOPERR(x) ((x) << S_KDBEOPERR)
+#define F_KDBEOPERR    V_KDBEOPERR(1U)
+
 #define A_PCIE_CFG 0x3014
 
 #define S_CFGDMAXPYLDSZRX    26
@@ -1861,6 +4532,61 @@
 #define V_LINKDNRSTEN(x) ((x) << S_LINKDNRSTEN)
 #define F_LINKDNRSTEN    V_LINKDNRSTEN(1U)
 
+#define S_T5_PIOSTOPEN    31
+#define V_T5_PIOSTOPEN(x) ((x) << S_T5_PIOSTOPEN)
+#define F_T5_PIOSTOPEN    V_T5_PIOSTOPEN(1U)
+
+#define S_DIAGCTRLBUS    28
+#define M_DIAGCTRLBUS    0x7U
+#define V_DIAGCTRLBUS(x) ((x) << S_DIAGCTRLBUS)
+#define G_DIAGCTRLBUS(x) (((x) >> S_DIAGCTRLBUS) & M_DIAGCTRLBUS)
+
+#define S_IPPERREN    27
+#define V_IPPERREN(x) ((x) << S_IPPERREN)
+#define F_IPPERREN    V_IPPERREN(1U)
+
+#define S_CFGDEXTTAGEN    26
+#define V_CFGDEXTTAGEN(x) ((x) << S_CFGDEXTTAGEN)
+#define F_CFGDEXTTAGEN    V_CFGDEXTTAGEN(1U)
+
+#define S_CFGDMAXPYLDSZ    23
+#define M_CFGDMAXPYLDSZ    0x7U
+#define V_CFGDMAXPYLDSZ(x) ((x) << S_CFGDMAXPYLDSZ)
+#define G_CFGDMAXPYLDSZ(x) (((x) >> S_CFGDMAXPYLDSZ) & M_CFGDMAXPYLDSZ)
+
+#define S_DCAEN    17
+#define V_DCAEN(x) ((x) << S_DCAEN)
+#define F_DCAEN    V_DCAEN(1U)
+
+#define S_T5CMDREQPRIORITY    16
+#define V_T5CMDREQPRIORITY(x) ((x) << S_T5CMDREQPRIORITY)
+#define F_T5CMDREQPRIORITY    V_T5CMDREQPRIORITY(1U)
+
+#define S_T5VPDREQPROTECT    14
+#define M_T5VPDREQPROTECT    0x3U
+#define V_T5VPDREQPROTECT(x) ((x) << S_T5VPDREQPROTECT)
+#define G_T5VPDREQPROTECT(x) (((x) >> S_T5VPDREQPROTECT) & M_T5VPDREQPROTECT)
+
+#define S_DROPPEDRDRSPDATA    12
+#define V_DROPPEDRDRSPDATA(x) ((x) << S_DROPPEDRDRSPDATA)
+#define F_DROPPEDRDRSPDATA    V_DROPPEDRDRSPDATA(1U)
+
+#define S_AI_INTX_REASSERTEN    11
+#define V_AI_INTX_REASSERTEN(x) ((x) << S_AI_INTX_REASSERTEN)
+#define F_AI_INTX_REASSERTEN    V_AI_INTX_REASSERTEN(1U)
+
+#define S_AUTOTXNDISABLE    10
+#define V_AUTOTXNDISABLE(x) ((x) << S_AUTOTXNDISABLE)
+#define F_AUTOTXNDISABLE    V_AUTOTXNDISABLE(1U)
+
+#define S_LINKREQRSTPCIECRSTMODE    3
+#define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE)
+#define F_LINKREQRSTPCIECRSTMODE    V_LINKREQRSTPCIECRSTMODE(1U)
+
+#define S_T6_PIOSTOPEN    31
+#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN)
+#define F_T6_PIOSTOPEN    V_T6_PIOSTOPEN(1U)
+
 #define A_PCIE_DMA_CTRL 0x3018
 
 #define S_LITTLEENDIAN    7
@@ -1867,6 +4593,32 @@
 #define V_LITTLEENDIAN(x) ((x) << S_LITTLEENDIAN)
 #define F_LITTLEENDIAN    V_LITTLEENDIAN(1U)
 
+#define A_PCIE_CFG2 0x3018
+
+#define S_VPDTIMER    16
+#define M_VPDTIMER    0xffffU
+#define V_VPDTIMER(x) ((x) << S_VPDTIMER)
+#define G_VPDTIMER(x) (((x) >> S_VPDTIMER) & M_VPDTIMER)
+
+#define S_BAR2TIMER    4
+#define M_BAR2TIMER    0xfffU
+#define V_BAR2TIMER(x) ((x) << S_BAR2TIMER)
+#define G_BAR2TIMER(x) (((x) >> S_BAR2TIMER) & M_BAR2TIMER)
+
+#define S_MSTREQRDRRASIMPLE    3
+#define V_MSTREQRDRRASIMPLE(x) ((x) << S_MSTREQRDRRASIMPLE)
+#define F_MSTREQRDRRASIMPLE    V_MSTREQRDRRASIMPLE(1U)
+
+#define S_TOTMAXTAG    0
+#define M_TOTMAXTAG    0x3U
+#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG)
+#define G_TOTMAXTAG(x) (((x) >> S_TOTMAXTAG) & M_TOTMAXTAG)
+
+#define S_T6_TOTMAXTAG    0
+#define M_T6_TOTMAXTAG    0x7U
+#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
+#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG)
+
 #define A_PCIE_DMA_CFG 0x301c
 
 #define S_MAXPYLDSIZE    28
@@ -1894,6 +4646,29 @@
 #define V_MAXTAG(x) ((x) << S_MAXTAG)
 #define G_MAXTAG(x) (((x) >> S_MAXTAG) & M_MAXTAG)
 
+#define A_PCIE_CFG3 0x301c
+
+#define S_AUTOPIOCOOKIEMATCH    6
+#define V_AUTOPIOCOOKIEMATCH(x) ((x) << S_AUTOPIOCOOKIEMATCH)
+#define F_AUTOPIOCOOKIEMATCH    V_AUTOPIOCOOKIEMATCH(1U)
+
+#define S_FLRPNDCPLMODE    4
+#define M_FLRPNDCPLMODE    0x3U
+#define V_FLRPNDCPLMODE(x) ((x) << S_FLRPNDCPLMODE)
+#define G_FLRPNDCPLMODE(x) (((x) >> S_FLRPNDCPLMODE) & M_FLRPNDCPLMODE)
+
+#define S_HMADCASTFIRSTONLY    2
+#define V_HMADCASTFIRSTONLY(x) ((x) << S_HMADCASTFIRSTONLY)
+#define F_HMADCASTFIRSTONLY    V_HMADCASTFIRSTONLY(1U)
+
+#define S_CMDDCASTFIRSTONLY    1
+#define V_CMDDCASTFIRSTONLY(x) ((x) << S_CMDDCASTFIRSTONLY)
+#define F_CMDDCASTFIRSTONLY    V_CMDDCASTFIRSTONLY(1U)
+
+#define S_DMADCASTFIRSTONLY    0
+#define V_DMADCASTFIRSTONLY(x) ((x) << S_DMADCASTFIRSTONLY)
+#define F_DMADCASTFIRSTONLY    V_DMADCASTFIRSTONLY(1U)
+
 #define A_PCIE_DMA_STAT 0x3020
 
 #define S_STATEREQ    28
@@ -1920,6 +4695,60 @@
 #define V_DMA_REQCNT(x) ((x) << S_DMA_REQCNT)
 #define G_DMA_REQCNT(x) (((x) >> S_DMA_REQCNT) & M_DMA_REQCNT)
 
+#define A_PCIE_CFG4 0x3020
+
+#define S_L1CLKREMOVALEN    17
+#define V_L1CLKREMOVALEN(x) ((x) << S_L1CLKREMOVALEN)
+#define F_L1CLKREMOVALEN    V_L1CLKREMOVALEN(1U)
+
+#define S_READYENTERL23    16
+#define V_READYENTERL23(x) ((x) << S_READYENTERL23)
+#define F_READYENTERL23    V_READYENTERL23(1U)
+
+#define S_EXITL1    12
+#define V_EXITL1(x) ((x) << S_EXITL1)
+#define F_EXITL1    V_EXITL1(1U)
+
+#define S_ENTERL1    8
+#define V_ENTERL1(x) ((x) << S_ENTERL1)
+#define F_ENTERL1    V_ENTERL1(1U)
+
+#define S_GENPME    0
+#define M_GENPME    0xffU
+#define V_GENPME(x) ((x) << S_GENPME)
+#define G_GENPME(x) (((x) >> S_GENPME) & M_GENPME)
+
+#define A_PCIE_CFG5 0x3024
+
+#define S_ENABLESKPPARITYFIX    2
+#define V_ENABLESKPPARITYFIX(x) ((x) << S_ENABLESKPPARITYFIX)
+#define F_ENABLESKPPARITYFIX    V_ENABLESKPPARITYFIX(1U)
+
+#define S_ENABLEL2ENTRYINL1    1
+#define V_ENABLEL2ENTRYINL1(x) ((x) << S_ENABLEL2ENTRYINL1)
+#define F_ENABLEL2ENTRYINL1    V_ENABLEL2ENTRYINL1(1U)
+
+#define S_HOLDCPLENTERINGL1    0
+#define V_HOLDCPLENTERINGL1(x) ((x) << S_HOLDCPLENTERINGL1)
+#define F_HOLDCPLENTERINGL1    V_HOLDCPLENTERINGL1(1U)
+
+#define A_PCIE_CFG6 0x3028
+
+#define S_PERSTTIMERCOUNT    12
+#define M_PERSTTIMERCOUNT    0x3fffU
+#define V_PERSTTIMERCOUNT(x) ((x) << S_PERSTTIMERCOUNT)
+#define G_PERSTTIMERCOUNT(x) (((x) >> S_PERSTTIMERCOUNT) & M_PERSTTIMERCOUNT)
+
+#define S_PERSTTIMEOUT    8
+#define V_PERSTTIMEOUT(x) ((x) << S_PERSTTIMEOUT)
+#define F_PERSTTIMEOUT    V_PERSTTIMEOUT(1U)
+
+#define S_PERSTTIMER    0
+#define M_PERSTTIMER    0xfU
+#define V_PERSTTIMER(x) ((x) << S_PERSTTIMER)
+#define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER)
+
+#define A_PCIE_CFG7 0x302c
 #define A_PCIE_CMD_CTRL 0x303c
 #define A_PCIE_CMD_CFG 0x3040
 
@@ -2034,6 +4863,55 @@
 #define V_REGISTER(x) ((x) << S_REGISTER)
 #define G_REGISTER(x) (((x) >> S_REGISTER) & M_REGISTER)
 
+#define S_CS2    28
+#define V_CS2(x) ((x) << S_CS2)
+#define F_CS2    V_CS2(1U)
+
+#define S_WRBE    24
+#define M_WRBE    0xfU
+#define V_WRBE(x) ((x) << S_WRBE)
+#define G_WRBE(x) (((x) >> S_WRBE) & M_WRBE)
+
+#define S_CFG_SPACE_VFVLD    23
+#define V_CFG_SPACE_VFVLD(x) ((x) << S_CFG_SPACE_VFVLD)
+#define F_CFG_SPACE_VFVLD    V_CFG_SPACE_VFVLD(1U)
+
+#define S_CFG_SPACE_RVF    16
+#define M_CFG_SPACE_RVF    0x7fU
+#define V_CFG_SPACE_RVF(x) ((x) << S_CFG_SPACE_RVF)
+#define G_CFG_SPACE_RVF(x) (((x) >> S_CFG_SPACE_RVF) & M_CFG_SPACE_RVF)
+
+#define S_CFG_SPACE_PF    12
+#define M_CFG_SPACE_PF    0x7U
+#define V_CFG_SPACE_PF(x) ((x) << S_CFG_SPACE_PF)
+#define G_CFG_SPACE_PF(x) (((x) >> S_CFG_SPACE_PF) & M_CFG_SPACE_PF)
+
+#define S_T6_ENABLE    31
+#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE)
+#define F_T6_ENABLE    V_T6_ENABLE(1U)
+
+#define S_T6_AI    30
+#define V_T6_AI(x) ((x) << S_T6_AI)
+#define F_T6_AI    V_T6_AI(1U)
+
+#define S_T6_CS2    29
+#define V_T6_CS2(x) ((x) << S_T6_CS2)
+#define F_T6_CS2    V_T6_CS2(1U)
+
+#define S_T6_WRBE    25
+#define M_T6_WRBE    0xfU
+#define V_T6_WRBE(x) ((x) << S_T6_WRBE)
+#define G_T6_WRBE(x) (((x) >> S_T6_WRBE) & M_T6_WRBE)
+
+#define S_T6_CFG_SPACE_VFVLD    24
+#define V_T6_CFG_SPACE_VFVLD(x) ((x) << S_T6_CFG_SPACE_VFVLD)
+#define F_T6_CFG_SPACE_VFVLD    V_T6_CFG_SPACE_VFVLD(1U)
+
+#define S_T6_CFG_SPACE_RVF    16
+#define M_T6_CFG_SPACE_RVF    0xffU
+#define V_T6_CFG_SPACE_RVF(x) ((x) << S_T6_CFG_SPACE_RVF)
+#define G_T6_CFG_SPACE_RVF(x) (((x) >> S_T6_CFG_SPACE_RVF) & M_T6_CFG_SPACE_RVF)
+
 #define A_PCIE_CFG_SPACE_DATA 0x3064
 #define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
 
@@ -2053,6 +4931,12 @@
 #define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
 
 #define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define S_MEMOFST    7
+#define M_MEMOFST    0x1ffffffU
+#define V_MEMOFST(x) ((x) << S_MEMOFST)
+#define G_MEMOFST(x) (((x) >> S_MEMOFST) & M_MEMOFST)
+
 #define A_PCIE_MAILBOX_BASE_WIN 0x30a8
 
 #define S_MBOXPCIEOFST    6
@@ -2106,6 +4990,16 @@
 #define V_MA_MAXTAG(x) ((x) << S_MA_MAXTAG)
 #define G_MA_MAXTAG(x) (((x) >> S_MA_MAXTAG) & M_MA_MAXTAG)
 
+#define S_T5_MA_MAXREQCNT    16
+#define M_T5_MA_MAXREQCNT    0x7fU
+#define V_T5_MA_MAXREQCNT(x) ((x) << S_T5_MA_MAXREQCNT)
+#define G_T5_MA_MAXREQCNT(x) (((x) >> S_T5_MA_MAXREQCNT) & M_T5_MA_MAXREQCNT)
+
+#define S_MA_MAXREQSIZE    8
+#define M_MA_MAXREQSIZE    0x7U
+#define V_MA_MAXREQSIZE(x) ((x) << S_MA_MAXREQSIZE)
+#define G_MA_MAXREQSIZE(x) (((x) >> S_MA_MAXREQSIZE) & M_MA_MAXREQSIZE)
+
 #define A_PCIE_MA_SYNC 0x30b4
 #define A_PCIE_FW 0x30b8
 #define A_PCIE_FW_PF 0x30bc
@@ -2124,7 +5018,16 @@
 #define V_PIOPAUSE(x) ((x) << S_PIOPAUSE)
 #define F_PIOPAUSE    V_PIOPAUSE(1U)
 
+#define S_MSTPAUSEDONE    30
+#define V_MSTPAUSEDONE(x) ((x) << S_MSTPAUSEDONE)
+#define F_MSTPAUSEDONE    V_MSTPAUSEDONE(1U)
+
+#define S_MSTPAUSE    1
+#define V_MSTPAUSE(x) ((x) << S_MSTPAUSE)
+#define F_MSTPAUSE    V_MSTPAUSE(1U)
+
 #define A_PCIE_SYS_CFG_READY 0x30e0
+#define A_PCIE_MA_STAT 0x30e0
 #define A_PCIE_STATIC_CFG1 0x30e4
 
 #define S_LINKDOWN_RESET_EN    26
@@ -2180,6 +5083,22 @@
 #define V_PCIE_MAX_RDSIZE(x) ((x) << S_PCIE_MAX_RDSIZE)
 #define G_PCIE_MAX_RDSIZE(x) (((x) >> S_PCIE_MAX_RDSIZE) & M_PCIE_MAX_RDSIZE)
 
+#define S_AUXPOWER_DETECTED    27
+#define V_AUXPOWER_DETECTED(x) ((x) << S_AUXPOWER_DETECTED)
+#define F_AUXPOWER_DETECTED    V_AUXPOWER_DETECTED(1U)
+
+#define A_PCIE_STATIC_CFG2 0x30e8
+
+#define S_PL_CONTROL    16
+#define M_PL_CONTROL    0xffffU
+#define V_PL_CONTROL(x) ((x) << S_PL_CONTROL)
+#define G_PL_CONTROL(x) (((x) >> S_PL_CONTROL) & M_PL_CONTROL)
+
+#define S_STATIC_SPARE3    0
+#define M_STATIC_SPARE3    0x3fffU
+#define V_STATIC_SPARE3(x) ((x) << S_STATIC_SPARE3)
+#define G_STATIC_SPARE3(x) (((x) >> S_STATIC_SPARE3) & M_STATIC_SPARE3)
+
 #define A_PCIE_DBG_INDIR_REQ 0x30ec
 
 #define S_DBGENABLE    31
@@ -2254,6 +5173,98 @@
 #define V_PFNUM(x) ((x) << S_PFNUM)
 #define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
 
+#define A_PCIE_PF_INT_CFG 0x3140
+#define A_PCIE_PF_INT_CFG2 0x3144
+#define A_PCIE_VF_INT_CFG 0x3180
+#define A_PCIE_VF_INT_CFG2 0x3184
+#define A_PCIE_PF_MSI_EN 0x35a8
+
+#define S_PFMSIEN_7_0    0
+#define M_PFMSIEN_7_0    0xffU
+#define V_PFMSIEN_7_0(x) ((x) << S_PFMSIEN_7_0)
+#define G_PFMSIEN_7_0(x) (((x) >> S_PFMSIEN_7_0) & M_PFMSIEN_7_0)
+
+#define A_PCIE_VF_MSI_EN_0 0x35ac
+#define A_PCIE_VF_MSI_EN_1 0x35b0
+#define A_PCIE_VF_MSI_EN_2 0x35b4
+#define A_PCIE_VF_MSI_EN_3 0x35b8
+#define A_PCIE_PF_MSIX_EN 0x35bc
+
+#define S_PFMSIXEN_7_0    0
+#define M_PFMSIXEN_7_0    0xffU
+#define V_PFMSIXEN_7_0(x) ((x) << S_PFMSIXEN_7_0)
+#define G_PFMSIXEN_7_0(x) (((x) >> S_PFMSIXEN_7_0) & M_PFMSIXEN_7_0)
+
+#define A_PCIE_VF_MSIX_EN_0 0x35c0
+#define A_PCIE_VF_MSIX_EN_1 0x35c4
+#define A_PCIE_VF_MSIX_EN_2 0x35c8
+#define A_PCIE_VF_MSIX_EN_3 0x35cc
+#define A_PCIE_FID_VFID_SEL 0x35ec
+
+#define S_FID_VFID_SEL_SELECT    0
+#define M_FID_VFID_SEL_SELECT    0x3U
+#define V_FID_VFID_SEL_SELECT(x) ((x) << S_FID_VFID_SEL_SELECT)
+#define G_FID_VFID_SEL_SELECT(x) (((x) >> S_FID_VFID_SEL_SELECT) & M_FID_VFID_SEL_SELECT)
+
+#define A_PCIE_FID_VFID 0x3600
+
+#define S_FID_VFID_SELECT    30
+#define M_FID_VFID_SELECT    0x3U
+#define V_FID_VFID_SELECT(x) ((x) << S_FID_VFID_SELECT)
+#define G_FID_VFID_SELECT(x) (((x) >> S_FID_VFID_SELECT) & M_FID_VFID_SELECT)
+
+#define S_IDO    24
+#define V_IDO(x) ((x) << S_IDO)
+#define F_IDO    V_IDO(1U)
+
+#define S_FID_VFID_VFID    16
+#define M_FID_VFID_VFID    0xffU
+#define V_FID_VFID_VFID(x) ((x) << S_FID_VFID_VFID)
+#define G_FID_VFID_VFID(x) (((x) >> S_FID_VFID_VFID) & M_FID_VFID_VFID)
+
+#define S_FID_VFID_TC    11
+#define M_FID_VFID_TC    0x7U
+#define V_FID_VFID_TC(x) ((x) << S_FID_VFID_TC)
+#define G_FID_VFID_TC(x) (((x) >> S_FID_VFID_TC) & M_FID_VFID_TC)
+
+#define S_FID_VFID_VFVLD    10
+#define V_FID_VFID_VFVLD(x) ((x) << S_FID_VFID_VFVLD)
+#define F_FID_VFID_VFVLD    V_FID_VFID_VFVLD(1U)
+
+#define S_FID_VFID_PF    7
+#define M_FID_VFID_PF    0x7U
+#define V_FID_VFID_PF(x) ((x) << S_FID_VFID_PF)
+#define G_FID_VFID_PF(x) (((x) >> S_FID_VFID_PF) & M_FID_VFID_PF)
+
+#define S_FID_VFID_RVF    0
+#define M_FID_VFID_RVF    0x7fU
+#define V_FID_VFID_RVF(x) ((x) << S_FID_VFID_RVF)
+#define G_FID_VFID_RVF(x) (((x) >> S_FID_VFID_RVF) & M_FID_VFID_RVF)
+
+#define S_T6_FID_VFID_VFID    15
+#define M_T6_FID_VFID_VFID    0x1ffU
+#define V_T6_FID_VFID_VFID(x) ((x) << S_T6_FID_VFID_VFID)
+#define G_T6_FID_VFID_VFID(x) (((x) >> S_T6_FID_VFID_VFID) & M_T6_FID_VFID_VFID)
+
+#define S_T6_FID_VFID_TC    12
+#define M_T6_FID_VFID_TC    0x7U
+#define V_T6_FID_VFID_TC(x) ((x) << S_T6_FID_VFID_TC)
+#define G_T6_FID_VFID_TC(x) (((x) >> S_T6_FID_VFID_TC) & M_T6_FID_VFID_TC)
+
+#define S_T6_FID_VFID_VFVLD    11
+#define V_T6_FID_VFID_VFVLD(x) ((x) << S_T6_FID_VFID_VFVLD)
+#define F_T6_FID_VFID_VFVLD    V_T6_FID_VFID_VFVLD(1U)
+
+#define S_T6_FID_VFID_PF    8
+#define M_T6_FID_VFID_PF    0x7U
+#define V_T6_FID_VFID_PF(x) ((x) << S_T6_FID_VFID_PF)
+#define G_T6_FID_VFID_PF(x) (((x) >> S_T6_FID_VFID_PF) & M_T6_FID_VFID_PF)
+
+#define S_T6_FID_VFID_RVF    0
+#define M_T6_FID_VFID_RVF    0xffU
+#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF)
+#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF)
+
 #define A_PCIE_FID 0x3900
 
 #define S_PAD    11
@@ -2270,6 +5281,746 @@
 #define V_FUNC(x) ((x) << S_FUNC)
 #define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
 
+#define A_PCIE_COOKIE_STAT 0x5600
+
+#define S_COOKIEB    16
+#define M_COOKIEB    0x3ffU
+#define V_COOKIEB(x) ((x) << S_COOKIEB)
+#define G_COOKIEB(x) (((x) >> S_COOKIEB) & M_COOKIEB)
+
+#define S_COOKIEA    0
+#define M_COOKIEA    0x3ffU
+#define V_COOKIEA(x) ((x) << S_COOKIEA)
+#define G_COOKIEA(x) (((x) >> S_COOKIEA) & M_COOKIEA)
+
+#define A_PCIE_FLR_PIO 0x5620
+
+#define S_RCVDBAR2COOKIE    24
+#define M_RCVDBAR2COOKIE    0xffU
+#define V_RCVDBAR2COOKIE(x) ((x) << S_RCVDBAR2COOKIE)
+#define G_RCVDBAR2COOKIE(x) (((x) >> S_RCVDBAR2COOKIE) & M_RCVDBAR2COOKIE)
+
+#define S_RCVDMARSPCOOKIE    16
+#define M_RCVDMARSPCOOKIE    0xffU
+#define V_RCVDMARSPCOOKIE(x) ((x) << S_RCVDMARSPCOOKIE)
+#define G_RCVDMARSPCOOKIE(x) (((x) >> S_RCVDMARSPCOOKIE) & M_RCVDMARSPCOOKIE)
+
+#define S_RCVDPIORSPCOOKIE    8
+#define M_RCVDPIORSPCOOKIE    0xffU
+#define V_RCVDPIORSPCOOKIE(x) ((x) << S_RCVDPIORSPCOOKIE)
+#define G_RCVDPIORSPCOOKIE(x) (((x) >> S_RCVDPIORSPCOOKIE) & M_RCVDPIORSPCOOKIE)
+
+#define S_EXPDCOOKIE    0
+#define M_EXPDCOOKIE    0xffU
+#define V_EXPDCOOKIE(x) ((x) << S_EXPDCOOKIE)
+#define G_EXPDCOOKIE(x) (((x) >> S_EXPDCOOKIE) & M_EXPDCOOKIE)
+
+#define A_PCIE_FLR_PIO2 0x5624
+
+#define S_RCVDMAREQCOOKIE    16
+#define M_RCVDMAREQCOOKIE    0xffU
+#define V_RCVDMAREQCOOKIE(x) ((x) << S_RCVDMAREQCOOKIE)
+#define G_RCVDMAREQCOOKIE(x) (((x) >> S_RCVDMAREQCOOKIE) & M_RCVDMAREQCOOKIE)
+
+#define S_RCVDPIOREQCOOKIE    8
+#define M_RCVDPIOREQCOOKIE    0xffU
+#define V_RCVDPIOREQCOOKIE(x) ((x) << S_RCVDPIOREQCOOKIE)
+#define G_RCVDPIOREQCOOKIE(x) (((x) >> S_RCVDPIOREQCOOKIE) & M_RCVDPIOREQCOOKIE)
+
+#define S_RCVDVDMRXCOOKIE    24
+#define M_RCVDVDMRXCOOKIE    0xffU
+#define V_RCVDVDMRXCOOKIE(x) ((x) << S_RCVDVDMRXCOOKIE)
+#define G_RCVDVDMRXCOOKIE(x) (((x) >> S_RCVDVDMRXCOOKIE) & M_RCVDVDMRXCOOKIE)
+
+#define S_RCVDVDMTXCOOKIE    16
+#define M_RCVDVDMTXCOOKIE    0xffU
+#define V_RCVDVDMTXCOOKIE(x) ((x) << S_RCVDVDMTXCOOKIE)
+#define G_RCVDVDMTXCOOKIE(x) (((x) >> S_RCVDVDMTXCOOKIE) & M_RCVDVDMTXCOOKIE)
+
+#define S_T6_RCVDMAREQCOOKIE    8
+#define M_T6_RCVDMAREQCOOKIE    0xffU
+#define V_T6_RCVDMAREQCOOKIE(x) ((x) << S_T6_RCVDMAREQCOOKIE)
+#define G_T6_RCVDMAREQCOOKIE(x) (((x) >> S_T6_RCVDMAREQCOOKIE) & M_T6_RCVDMAREQCOOKIE)
+
+#define S_T6_RCVDPIOREQCOOKIE    0
+#define M_T6_RCVDPIOREQCOOKIE    0xffU
+#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE)
+#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE)
+
+#define A_PCIE_VC0_CDTS0 0x56cc
+
+#define S_CPLD0    20
+#define M_CPLD0    0xfffU
+#define V_CPLD0(x) ((x) << S_CPLD0)
+#define G_CPLD0(x) (((x) >> S_CPLD0) & M_CPLD0)
+
+#define S_PH0    12
+#define M_PH0    0xffU
+#define V_PH0(x) ((x) << S_PH0)
+#define G_PH0(x) (((x) >> S_PH0) & M_PH0)
+
+#define S_PD0    0
+#define M_PD0    0xfffU
+#define V_PD0(x) ((x) << S_PD0)
+#define G_PD0(x) (((x) >> S_PD0) & M_PD0)
+
+#define A_PCIE_VC0_CDTS1 0x56d0
+
+#define S_CPLH0    20
+#define M_CPLH0    0xffU
+#define V_CPLH0(x) ((x) << S_CPLH0)
+#define G_CPLH0(x) (((x) >> S_CPLH0) & M_CPLH0)
+
+#define S_NPH0    12
+#define M_NPH0    0xffU
+#define V_NPH0(x) ((x) << S_NPH0)
+#define G_NPH0(x) (((x) >> S_NPH0) & M_NPH0)
+
+#define S_NPD0    0
+#define M_NPD0    0xfffU
+#define V_NPD0(x) ((x) << S_NPD0)
+#define G_NPD0(x) (((x) >> S_NPD0) & M_NPD0)
+
+#define A_PCIE_VC1_CDTS0 0x56d4
+
+#define S_CPLD1    20
+#define M_CPLD1    0xfffU
+#define V_CPLD1(x) ((x) << S_CPLD1)
+#define G_CPLD1(x) (((x) >> S_CPLD1) & M_CPLD1)
+
+#define S_PH1    12
+#define M_PH1    0xffU
+#define V_PH1(x) ((x) << S_PH1)
+#define G_PH1(x) (((x) >> S_PH1) & M_PH1)
+
+#define S_PD1    0
+#define M_PD1    0xfffU
+#define V_PD1(x) ((x) << S_PD1)
+#define G_PD1(x) (((x) >> S_PD1) & M_PD1)
+
+#define A_PCIE_VC1_CDTS1 0x56d8
+
+#define S_CPLH1    20
+#define M_CPLH1    0xffU
+#define V_CPLH1(x) ((x) << S_CPLH1)
+#define G_CPLH1(x) (((x) >> S_CPLH1) & M_CPLH1)
+
+#define S_NPH1    12
+#define M_NPH1    0xffU
+#define V_NPH1(x) ((x) << S_NPH1)
+#define G_NPH1(x) (((x) >> S_NPH1) & M_NPH1)
+
+#define S_NPD1    0
+#define M_NPD1    0xfffU
+#define V_NPD1(x) ((x) << S_NPD1)
+#define G_NPD1(x) (((x) >> S_NPD1) & M_NPD1)
+
+#define A_PCIE_FLR_PF_STATUS 0x56dc
+#define A_PCIE_FLR_VF0_STATUS 0x56e0
+#define A_PCIE_FLR_VF1_STATUS 0x56e4
+#define A_PCIE_FLR_VF2_STATUS 0x56e8
+#define A_PCIE_FLR_VF3_STATUS 0x56ec
+#define A_PCIE_STAT 0x56f4
+
+#define S_PM_STATUS    24
+#define M_PM_STATUS    0xffU
+#define V_PM_STATUS(x) ((x) << S_PM_STATUS)
+#define G_PM_STATUS(x) (((x) >> S_PM_STATUS) & M_PM_STATUS)
+
+#define S_PM_CURRENTSTATE    20
+#define M_PM_CURRENTSTATE    0x7U
+#define V_PM_CURRENTSTATE(x) ((x) << S_PM_CURRENTSTATE)
+#define G_PM_CURRENTSTATE(x) (((x) >> S_PM_CURRENTSTATE) & M_PM_CURRENTSTATE)
+
+#define S_LTSSMENABLE    12
+#define V_LTSSMENABLE(x) ((x) << S_LTSSMENABLE)
+#define F_LTSSMENABLE    V_LTSSMENABLE(1U)
+
+#define S_STATECFGINITF    4
+#define M_STATECFGINITF    0x7fU
+#define V_STATECFGINITF(x) ((x) << S_STATECFGINITF)
+#define G_STATECFGINITF(x) (((x) >> S_STATECFGINITF) & M_STATECFGINITF)
+
+#define S_STATECFGINIT    0
+#define M_STATECFGINIT    0xfU
+#define V_STATECFGINIT(x) ((x) << S_STATECFGINIT)
+#define G_STATECFGINIT(x) (((x) >> S_STATECFGINIT) & M_STATECFGINIT)
+
+#define S_LTSSMENABLE_PCIE    12
+#define V_LTSSMENABLE_PCIE(x) ((x) << S_LTSSMENABLE_PCIE)
+#define F_LTSSMENABLE_PCIE    V_LTSSMENABLE_PCIE(1U)
+
+#define S_STATECFGINITF_PCIE    4
+#define M_STATECFGINITF_PCIE    0xffU
+#define V_STATECFGINITF_PCIE(x) ((x) << S_STATECFGINITF_PCIE)
+#define G_STATECFGINITF_PCIE(x) (((x) >> S_STATECFGINITF_PCIE) & M_STATECFGINITF_PCIE)
+
+#define S_STATECFGINIT_PCIE    0
+#define M_STATECFGINIT_PCIE    0xfU
+#define V_STATECFGINIT_PCIE(x) ((x) << S_STATECFGINIT_PCIE)
+#define G_STATECFGINIT_PCIE(x) (((x) >> S_STATECFGINIT_PCIE) & M_STATECFGINIT_PCIE)
+
+#define A_PCIE_CRS 0x56f8
+
+#define S_CRS_ENABLE    0
+#define V_CRS_ENABLE(x) ((x) << S_CRS_ENABLE)
+#define F_CRS_ENABLE    V_CRS_ENABLE(1U)
+
+#define A_PCIE_LTSSM 0x56fc
+
+#define S_LTSSM_ENABLE    0
+#define V_LTSSM_ENABLE(x) ((x) << S_LTSSM_ENABLE)
+#define F_LTSSM_ENABLE    V_LTSSM_ENABLE(1U)
+
+#define S_LTSSM_STALL_DISABLE    1
+#define V_LTSSM_STALL_DISABLE(x) ((x) << S_LTSSM_STALL_DISABLE)
+#define F_LTSSM_STALL_DISABLE    V_LTSSM_STALL_DISABLE(1U)
+
+#define A_PCIE_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x5700
+
+#define S_REPLAY_TIME_LIMIT    16
+#define M_REPLAY_TIME_LIMIT    0xffffU
+#define V_REPLAY_TIME_LIMIT(x) ((x) << S_REPLAY_TIME_LIMIT)
+#define G_REPLAY_TIME_LIMIT(x) (((x) >> S_REPLAY_TIME_LIMIT) & M_REPLAY_TIME_LIMIT)
+
+#define S_ACK_LATENCY_TIMER_LIMIT    0
+#define M_ACK_LATENCY_TIMER_LIMIT    0xffffU
+#define V_ACK_LATENCY_TIMER_LIMIT(x) ((x) << S_ACK_LATENCY_TIMER_LIMIT)
+#define G_ACK_LATENCY_TIMER_LIMIT(x) (((x) >> S_ACK_LATENCY_TIMER_LIMIT) & M_ACK_LATENCY_TIMER_LIMIT)
+
+#define A_PCIE_CORE_VENDOR_SPECIFIC_DLLP 0x5704
+#define A_PCIE_CORE_PORT_FORCE_LINK 0x5708
+
+#define S_LOW_POWER_ENTRANCE_COUNT    24
+#define M_LOW_POWER_ENTRANCE_COUNT    0xffU
+#define V_LOW_POWER_ENTRANCE_COUNT(x) ((x) << S_LOW_POWER_ENTRANCE_COUNT)
+#define G_LOW_POWER_ENTRANCE_COUNT(x) (((x) >> S_LOW_POWER_ENTRANCE_COUNT) & M_LOW_POWER_ENTRANCE_COUNT)
+
+#define S_LINK_STATE    16
+#define M_LINK_STATE    0x3fU
+#define V_LINK_STATE(x) ((x) << S_LINK_STATE)
+#define G_LINK_STATE(x) (((x) >> S_LINK_STATE) & M_LINK_STATE)
+
+#define S_FORCE_LINK    15
+#define V_FORCE_LINK(x) ((x) << S_FORCE_LINK)
+#define F_FORCE_LINK    V_FORCE_LINK(1U)
+
+#define S_LINK_NUMBER    0
+#define M_LINK_NUMBER    0xffU
+#define V_LINK_NUMBER(x) ((x) << S_LINK_NUMBER)
+#define G_LINK_NUMBER(x) (((x) >> S_LINK_NUMBER) & M_LINK_NUMBER)
+
+#define A_PCIE_CORE_ACK_FREQUENCY_L0L1_ASPM_CONTROL 0x570c
+
+#define S_ENTER_ASPM_L1_WO_L0S    30
+#define V_ENTER_ASPM_L1_WO_L0S(x) ((x) << S_ENTER_ASPM_L1_WO_L0S)
+#define F_ENTER_ASPM_L1_WO_L0S    V_ENTER_ASPM_L1_WO_L0S(1U)
+
+#define S_L1_ENTRANCE_LATENCY    27
+#define M_L1_ENTRANCE_LATENCY    0x7U
+#define V_L1_ENTRANCE_LATENCY(x) ((x) << S_L1_ENTRANCE_LATENCY)
+#define G_L1_ENTRANCE_LATENCY(x) (((x) >> S_L1_ENTRANCE_LATENCY) & M_L1_ENTRANCE_LATENCY)
+
+#define S_L0S_ENTRANCE_LATENCY    24
+#define M_L0S_ENTRANCE_LATENCY    0x7U
+#define V_L0S_ENTRANCE_LATENCY(x) ((x) << S_L0S_ENTRANCE_LATENCY)
+#define G_L0S_ENTRANCE_LATENCY(x) (((x) >> S_L0S_ENTRANCE_LATENCY) & M_L0S_ENTRANCE_LATENCY)
+
+#define S_COMMON_CLOCK_N_FTS    16
+#define M_COMMON_CLOCK_N_FTS    0xffU
+#define V_COMMON_CLOCK_N_FTS(x) ((x) << S_COMMON_CLOCK_N_FTS)
+#define G_COMMON_CLOCK_N_FTS(x) (((x) >> S_COMMON_CLOCK_N_FTS) & M_COMMON_CLOCK_N_FTS)
+
+#define S_N_FTS    8
+#define M_N_FTS    0xffU
+#define V_N_FTS(x) ((x) << S_N_FTS)
+#define G_N_FTS(x) (((x) >> S_N_FTS) & M_N_FTS)
+
+#define S_ACK_FREQUENCY    0
+#define M_ACK_FREQUENCY    0xffU
+#define V_ACK_FREQUENCY(x) ((x) << S_ACK_FREQUENCY)
+#define G_ACK_FREQUENCY(x) (((x) >> S_ACK_FREQUENCY) & M_ACK_FREQUENCY)
+
+#define A_PCIE_CORE_PORT_LINK_CONTROL 0x5710
+
+#define S_CROSSLINK_ACTIVE    23
+#define V_CROSSLINK_ACTIVE(x) ((x) << S_CROSSLINK_ACTIVE)
+#define F_CROSSLINK_ACTIVE    V_CROSSLINK_ACTIVE(1U)
+
+#define S_CROSSLINK_ENABLE    22
+#define V_CROSSLINK_ENABLE(x) ((x) << S_CROSSLINK_ENABLE)
+#define F_CROSSLINK_ENABLE    V_CROSSLINK_ENABLE(1U)
+
+#define S_LINK_MODE_ENABLE    16
+#define M_LINK_MODE_ENABLE    0x3fU
+#define V_LINK_MODE_ENABLE(x) ((x) << S_LINK_MODE_ENABLE)
+#define G_LINK_MODE_ENABLE(x) (((x) >> S_LINK_MODE_ENABLE) & M_LINK_MODE_ENABLE)
+
+#define S_FAST_LINK_MODE    7
+#define V_FAST_LINK_MODE(x) ((x) << S_FAST_LINK_MODE)
+#define F_FAST_LINK_MODE    V_FAST_LINK_MODE(1U)
+
+#define S_DLL_LINK_ENABLE    5
+#define V_DLL_LINK_ENABLE(x) ((x) << S_DLL_LINK_ENABLE)
+#define F_DLL_LINK_ENABLE    V_DLL_LINK_ENABLE(1U)
+
+#define S_RESET_ASSERT    3
+#define V_RESET_ASSERT(x) ((x) << S_RESET_ASSERT)
+#define F_RESET_ASSERT    V_RESET_ASSERT(1U)
+
+#define S_LOOPBACK_ENABLE    2
+#define V_LOOPBACK_ENABLE(x) ((x) << S_LOOPBACK_ENABLE)
+#define F_LOOPBACK_ENABLE    V_LOOPBACK_ENABLE(1U)
+
+#define S_SCRAMBLE_DISABLE    1
+#define V_SCRAMBLE_DISABLE(x) ((x) << S_SCRAMBLE_DISABLE)
+#define F_SCRAMBLE_DISABLE    V_SCRAMBLE_DISABLE(1U)
+
+#define S_VENDOR_SPECIFIC_DLLP_REQUEST    0
+#define V_VENDOR_SPECIFIC_DLLP_REQUEST(x) ((x) << S_VENDOR_SPECIFIC_DLLP_REQUEST)
+#define F_VENDOR_SPECIFIC_DLLP_REQUEST    V_VENDOR_SPECIFIC_DLLP_REQUEST(1U)
+
+#define A_PCIE_CORE_LANE_SKEW 0x5714
+
+#define S_DISABLE_DESKEW    31
+#define V_DISABLE_DESKEW(x) ((x) << S_DISABLE_DESKEW)
+#define F_DISABLE_DESKEW    V_DISABLE_DESKEW(1U)
+
+#define S_ACK_NAK_DISABLE    25
+#define V_ACK_NAK_DISABLE(x) ((x) << S_ACK_NAK_DISABLE)
+#define F_ACK_NAK_DISABLE    V_ACK_NAK_DISABLE(1U)
+
+#define S_FLOW_CONTROL_DISABLE    24
+#define V_FLOW_CONTROL_DISABLE(x) ((x) << S_FLOW_CONTROL_DISABLE)
+#define F_FLOW_CONTROL_DISABLE    V_FLOW_CONTROL_DISABLE(1U)
+
+#define S_INSERT_TXSKEW    0
+#define M_INSERT_TXSKEW    0xffffffU
+#define V_INSERT_TXSKEW(x) ((x) << S_INSERT_TXSKEW)
+#define G_INSERT_TXSKEW(x) (((x) >> S_INSERT_TXSKEW) & M_INSERT_TXSKEW)
+
+#define A_PCIE_CORE_SYMBOL_NUMBER 0x5718
+
+#define S_FLOW_CONTROL_TIMER_MODIFIER    24
+#define M_FLOW_CONTROL_TIMER_MODIFIER    0x1fU
+#define V_FLOW_CONTROL_TIMER_MODIFIER(x) ((x) << S_FLOW_CONTROL_TIMER_MODIFIER)
+#define G_FLOW_CONTROL_TIMER_MODIFIER(x) (((x) >> S_FLOW_CONTROL_TIMER_MODIFIER) & M_FLOW_CONTROL_TIMER_MODIFIER)
+
+#define S_ACK_NAK_TIMER_MODIFIER    19
+#define M_ACK_NAK_TIMER_MODIFIER    0x1fU
+#define V_ACK_NAK_TIMER_MODIFIER(x) ((x) << S_ACK_NAK_TIMER_MODIFIER)
+#define G_ACK_NAK_TIMER_MODIFIER(x) (((x) >> S_ACK_NAK_TIMER_MODIFIER) & M_ACK_NAK_TIMER_MODIFIER)
+
+#define S_REPLAY_TIMER_MODIFIER    14
+#define M_REPLAY_TIMER_MODIFIER    0x1fU
+#define V_REPLAY_TIMER_MODIFIER(x) ((x) << S_REPLAY_TIMER_MODIFIER)
+#define G_REPLAY_TIMER_MODIFIER(x) (((x) >> S_REPLAY_TIMER_MODIFIER) & M_REPLAY_TIMER_MODIFIER)
+
+#define S_MAXFUNC    0
+#define M_MAXFUNC    0x7U
+#define V_MAXFUNC(x) ((x) << S_MAXFUNC)
+#define G_MAXFUNC(x) (((x) >> S_MAXFUNC) & M_MAXFUNC)
+
+#define A_PCIE_CORE_SYMBOL_TIMER_FILTER_MASK1 0x571c
+
+#define S_MASK_RADM_FILTER    16
+#define M_MASK_RADM_FILTER    0xffffU
+#define V_MASK_RADM_FILTER(x) ((x) << S_MASK_RADM_FILTER)
+#define G_MASK_RADM_FILTER(x) (((x) >> S_MASK_RADM_FILTER) & M_MASK_RADM_FILTER)
+
+#define S_DISABLE_FC_WATCHDOG    15
+#define V_DISABLE_FC_WATCHDOG(x) ((x) << S_DISABLE_FC_WATCHDOG)
+#define F_DISABLE_FC_WATCHDOG    V_DISABLE_FC_WATCHDOG(1U)
+
+#define S_SKP_INTERVAL    0
+#define M_SKP_INTERVAL    0x7ffU
+#define V_SKP_INTERVAL(x) ((x) << S_SKP_INTERVAL)
+#define G_SKP_INTERVAL(x) (((x) >> S_SKP_INTERVAL) & M_SKP_INTERVAL)
+
+#define A_PCIE_CORE_FILTER_MASK2 0x5720
+#define A_PCIE_CORE_DEBUG_0 0x5728
+#define A_PCIE_CORE_DEBUG_1 0x572c
+#define A_PCIE_CORE_TRANSMIT_POSTED_FC_CREDIT_STATUS 0x5730
+
+#define S_TXPH_FC    12
+#define M_TXPH_FC    0xffU
+#define V_TXPH_FC(x) ((x) << S_TXPH_FC)
+#define G_TXPH_FC(x) (((x) >> S_TXPH_FC) & M_TXPH_FC)
+
+#define S_TXPD_FC    0
+#define M_TXPD_FC    0xfffU
+#define V_TXPD_FC(x) ((x) << S_TXPD_FC)
+#define G_TXPD_FC(x) (((x) >> S_TXPD_FC) & M_TXPD_FC)
+
+#define A_PCIE_CORE_TRANSMIT_NONPOSTED_FC_CREDIT_STATUS 0x5734
+
+#define S_TXNPH_FC    12
+#define M_TXNPH_FC    0xffU
+#define V_TXNPH_FC(x) ((x) << S_TXNPH_FC)
+#define G_TXNPH_FC(x) (((x) >> S_TXNPH_FC) & M_TXNPH_FC)
+
+#define S_TXNPD_FC    0
+#define M_TXNPD_FC    0xfffU
+#define V_TXNPD_FC(x) ((x) << S_TXNPD_FC)
+#define G_TXNPD_FC(x) (((x) >> S_TXNPD_FC) & M_TXNPD_FC)
+
+#define A_PCIE_CORE_TRANSMIT_COMPLETION_FC_CREDIT_STATUS 0x5738
+
+#define S_TXCPLH_FC    12
+#define M_TXCPLH_FC    0xffU
+#define V_TXCPLH_FC(x) ((x) << S_TXCPLH_FC)
+#define G_TXCPLH_FC(x) (((x) >> S_TXCPLH_FC) & M_TXCPLH_FC)
+
+#define S_TXCPLD_FC    0
+#define M_TXCPLD_FC    0xfffU
+#define V_TXCPLD_FC(x) ((x) << S_TXCPLD_FC)
+#define G_TXCPLD_FC(x) (((x) >> S_TXCPLD_FC) & M_TXCPLD_FC)
+
+#define A_PCIE_CORE_QUEUE_STATUS 0x573c
+
+#define S_RXQUEUE_NOT_EMPTY    2
+#define V_RXQUEUE_NOT_EMPTY(x) ((x) << S_RXQUEUE_NOT_EMPTY)
+#define F_RXQUEUE_NOT_EMPTY    V_RXQUEUE_NOT_EMPTY(1U)
+
+#define S_TXRETRYBUF_NOT_EMPTY    1
+#define V_TXRETRYBUF_NOT_EMPTY(x) ((x) << S_TXRETRYBUF_NOT_EMPTY)
+#define F_TXRETRYBUF_NOT_EMPTY    V_TXRETRYBUF_NOT_EMPTY(1U)
+
+#define S_RXTLP_FC_NOT_RETURNED    0
+#define V_RXTLP_FC_NOT_RETURNED(x) ((x) << S_RXTLP_FC_NOT_RETURNED)
+#define F_RXTLP_FC_NOT_RETURNED    V_RXTLP_FC_NOT_RETURNED(1U)
+
+#define A_PCIE_CORE_VC_TRANSMIT_ARBITRATION_1 0x5740
+
+#define S_VC3_WRR    24
+#define M_VC3_WRR    0xffU
+#define V_VC3_WRR(x) ((x) << S_VC3_WRR)
+#define G_VC3_WRR(x) (((x) >> S_VC3_WRR) & M_VC3_WRR)
+
+#define S_VC2_WRR    16
+#define M_VC2_WRR    0xffU
+#define V_VC2_WRR(x) ((x) << S_VC2_WRR)
+#define G_VC2_WRR(x) (((x) >> S_VC2_WRR) & M_VC2_WRR)
+
+#define S_VC1_WRR    8
+#define M_VC1_WRR    0xffU
+#define V_VC1_WRR(x) ((x) << S_VC1_WRR)
+#define G_VC1_WRR(x) (((x) >> S_VC1_WRR) & M_VC1_WRR)
+
+#define S_VC0_WRR    0
+#define M_VC0_WRR    0xffU
+#define V_VC0_WRR(x) ((x) << S_VC0_WRR)
+#define G_VC0_WRR(x) (((x) >> S_VC0_WRR) & M_VC0_WRR)
+
+#define A_PCIE_CORE_VC_TRANSMIT_ARBITRATION_2 0x5744
+
+#define S_VC7_WRR    24
+#define M_VC7_WRR    0xffU
+#define V_VC7_WRR(x) ((x) << S_VC7_WRR)
+#define G_VC7_WRR(x) (((x) >> S_VC7_WRR) & M_VC7_WRR)
+
+#define S_VC6_WRR    16
+#define M_VC6_WRR    0xffU
+#define V_VC6_WRR(x) ((x) << S_VC6_WRR)
+#define G_VC6_WRR(x) (((x) >> S_VC6_WRR) & M_VC6_WRR)
+
+#define S_VC5_WRR    8
+#define M_VC5_WRR    0xffU
+#define V_VC5_WRR(x) ((x) << S_VC5_WRR)
+#define G_VC5_WRR(x) (((x) >> S_VC5_WRR) & M_VC5_WRR)
+
+#define S_VC4_WRR    0
+#define M_VC4_WRR    0xffU
+#define V_VC4_WRR(x) ((x) << S_VC4_WRR)
+#define G_VC4_WRR(x) (((x) >> S_VC4_WRR) & M_VC4_WRR)
+
+#define A_PCIE_CORE_VC0_POSTED_RECEIVE_QUEUE_CONTROL 0x5748
+
+#define S_VC0_RX_ORDERING    31
+#define V_VC0_RX_ORDERING(x) ((x) << S_VC0_RX_ORDERING)
+#define F_VC0_RX_ORDERING    V_VC0_RX_ORDERING(1U)
+
+#define S_VC0_TLP_ORDERING    30
+#define V_VC0_TLP_ORDERING(x) ((x) << S_VC0_TLP_ORDERING)
+#define F_VC0_TLP_ORDERING    V_VC0_TLP_ORDERING(1U)
+
+#define S_VC0_PTLP_QUEUE_MODE    21
+#define M_VC0_PTLP_QUEUE_MODE    0x7U
+#define V_VC0_PTLP_QUEUE_MODE(x) ((x) << S_VC0_PTLP_QUEUE_MODE)
+#define G_VC0_PTLP_QUEUE_MODE(x) (((x) >> S_VC0_PTLP_QUEUE_MODE) & M_VC0_PTLP_QUEUE_MODE)
+
+#define S_VC0_PH_CREDITS    12
+#define M_VC0_PH_CREDITS    0xffU
+#define V_VC0_PH_CREDITS(x) ((x) << S_VC0_PH_CREDITS)
+#define G_VC0_PH_CREDITS(x) (((x) >> S_VC0_PH_CREDITS) & M_VC0_PH_CREDITS)
+
+#define S_VC0_PD_CREDITS    0
+#define M_VC0_PD_CREDITS    0xfffU
+#define V_VC0_PD_CREDITS(x) ((x) << S_VC0_PD_CREDITS)
+#define G_VC0_PD_CREDITS(x) (((x) >> S_VC0_PD_CREDITS) & M_VC0_PD_CREDITS)
+
+#define A_PCIE_CORE_VC0_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x574c
+
+#define S_VC0_NPTLP_QUEUE_MODE    21
+#define M_VC0_NPTLP_QUEUE_MODE    0x7U
+#define V_VC0_NPTLP_QUEUE_MODE(x) ((x) << S_VC0_NPTLP_QUEUE_MODE)
+#define G_VC0_NPTLP_QUEUE_MODE(x) (((x) >> S_VC0_NPTLP_QUEUE_MODE) & M_VC0_NPTLP_QUEUE_MODE)
+
+#define S_VC0_NPH_CREDITS    12
+#define M_VC0_NPH_CREDITS    0xffU
+#define V_VC0_NPH_CREDITS(x) ((x) << S_VC0_NPH_CREDITS)
+#define G_VC0_NPH_CREDITS(x) (((x) >> S_VC0_NPH_CREDITS) & M_VC0_NPH_CREDITS)
+
+#define S_VC0_NPD_CREDITS    0
+#define M_VC0_NPD_CREDITS    0xfffU
+#define V_VC0_NPD_CREDITS(x) ((x) << S_VC0_NPD_CREDITS)
+#define G_VC0_NPD_CREDITS(x) (((x) >> S_VC0_NPD_CREDITS) & M_VC0_NPD_CREDITS)
+
+#define A_PCIE_CORE_VC0_COMPLETION_RECEIVE_QUEUE_CONTROL 0x5750
+
+#define S_VC0_CPLTLP_QUEUE_MODE    21
+#define M_VC0_CPLTLP_QUEUE_MODE    0x7U
+#define V_VC0_CPLTLP_QUEUE_MODE(x) ((x) << S_VC0_CPLTLP_QUEUE_MODE)
+#define G_VC0_CPLTLP_QUEUE_MODE(x) (((x) >> S_VC0_CPLTLP_QUEUE_MODE) & M_VC0_CPLTLP_QUEUE_MODE)
+
+#define S_VC0_CPLH_CREDITS    12
+#define M_VC0_CPLH_CREDITS    0xffU
+#define V_VC0_CPLH_CREDITS(x) ((x) << S_VC0_CPLH_CREDITS)
+#define G_VC0_CPLH_CREDITS(x) (((x) >> S_VC0_CPLH_CREDITS) & M_VC0_CPLH_CREDITS)
+
+#define S_VC0_CPLD_CREDITS    0
+#define M_VC0_CPLD_CREDITS    0xfffU
+#define V_VC0_CPLD_CREDITS(x) ((x) << S_VC0_CPLD_CREDITS)
+#define G_VC0_CPLD_CREDITS(x) (((x) >> S_VC0_CPLD_CREDITS) & M_VC0_CPLD_CREDITS)
+
+#define A_PCIE_CORE_VC1_POSTED_RECEIVE_QUEUE_CONTROL 0x5754
+
+#define S_VC1_TLP_ORDERING    30
+#define V_VC1_TLP_ORDERING(x) ((x) << S_VC1_TLP_ORDERING)
+#define F_VC1_TLP_ORDERING    V_VC1_TLP_ORDERING(1U)
+
+#define S_VC1_PTLP_QUEUE_MODE    21
+#define M_VC1_PTLP_QUEUE_MODE    0x7U
+#define V_VC1_PTLP_QUEUE_MODE(x) ((x) << S_VC1_PTLP_QUEUE_MODE)
+#define G_VC1_PTLP_QUEUE_MODE(x) (((x) >> S_VC1_PTLP_QUEUE_MODE) & M_VC1_PTLP_QUEUE_MODE)
+
+#define S_VC1_PH_CREDITS    12
+#define M_VC1_PH_CREDITS    0xffU
+#define V_VC1_PH_CREDITS(x) ((x) << S_VC1_PH_CREDITS)
+#define G_VC1_PH_CREDITS(x) (((x) >> S_VC1_PH_CREDITS) & M_VC1_PH_CREDITS)
+
+#define S_VC1_PD_CREDITS    0
+#define M_VC1_PD_CREDITS    0xfffU
+#define V_VC1_PD_CREDITS(x) ((x) << S_VC1_PD_CREDITS)
+#define G_VC1_PD_CREDITS(x) (((x) >> S_VC1_PD_CREDITS) & M_VC1_PD_CREDITS)
+
+#define A_PCIE_CORE_VC1_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x5758
+
+#define S_VC1_NPTLP_QUEUE_MODE    21
+#define M_VC1_NPTLP_QUEUE_MODE    0x7U
+#define V_VC1_NPTLP_QUEUE_MODE(x) ((x) << S_VC1_NPTLP_QUEUE_MODE)
+#define G_VC1_NPTLP_QUEUE_MODE(x) (((x) >> S_VC1_NPTLP_QUEUE_MODE) & M_VC1_NPTLP_QUEUE_MODE)
+
+#define S_VC1_NPH_CREDITS    12
+#define M_VC1_NPH_CREDITS    0xffU
+#define V_VC1_NPH_CREDITS(x) ((x) << S_VC1_NPH_CREDITS)
+#define G_VC1_NPH_CREDITS(x) (((x) >> S_VC1_NPH_CREDITS) & M_VC1_NPH_CREDITS)
+
+#define S_VC1_NPD_CREDITS    0
+#define M_VC1_NPD_CREDITS    0xfffU
+#define V_VC1_NPD_CREDITS(x) ((x) << S_VC1_NPD_CREDITS)
+#define G_VC1_NPD_CREDITS(x) (((x) >> S_VC1_NPD_CREDITS) & M_VC1_NPD_CREDITS)
+
+#define A_PCIE_CORE_VC1_COMPLETION_RECEIVE_QUEUE_CONTROL 0x575c
+
+#define S_VC1_CPLTLP_QUEUE_MODE    21
+#define M_VC1_CPLTLP_QUEUE_MODE    0x7U
+#define V_VC1_CPLTLP_QUEUE_MODE(x) ((x) << S_VC1_CPLTLP_QUEUE_MODE)
+#define G_VC1_CPLTLP_QUEUE_MODE(x) (((x) >> S_VC1_CPLTLP_QUEUE_MODE) & M_VC1_CPLTLP_QUEUE_MODE)
+
+#define S_VC1_CPLH_CREDITS    12
+#define M_VC1_CPLH_CREDITS    0xffU
+#define V_VC1_CPLH_CREDITS(x) ((x) << S_VC1_CPLH_CREDITS)
+#define G_VC1_CPLH_CREDITS(x) (((x) >> S_VC1_CPLH_CREDITS) & M_VC1_CPLH_CREDITS)
+
+#define S_VC1_CPLD_CREDITS    0
+#define M_VC1_CPLD_CREDITS    0xfffU
+#define V_VC1_CPLD_CREDITS(x) ((x) << S_VC1_CPLD_CREDITS)
+#define G_VC1_CPLD_CREDITS(x) (((x) >> S_VC1_CPLD_CREDITS) & M_VC1_CPLD_CREDITS)
+
+#define A_PCIE_CORE_LINK_WIDTH_SPEED_CHANGE 0x580c
+
+#define S_SEL_DEEMPHASIS    20
+#define V_SEL_DEEMPHASIS(x) ((x) << S_SEL_DEEMPHASIS)
+#define F_SEL_DEEMPHASIS    V_SEL_DEEMPHASIS(1U)
+
+#define S_TXCMPLRCV    19
+#define V_TXCMPLRCV(x) ((x) << S_TXCMPLRCV)
+#define F_TXCMPLRCV    V_TXCMPLRCV(1U)
+
+#define S_PHYTXSWING    18
+#define V_PHYTXSWING(x) ((x) << S_PHYTXSWING)
+#define F_PHYTXSWING    V_PHYTXSWING(1U)
+
+#define S_DIRSPDCHANGE    17
+#define V_DIRSPDCHANGE(x) ((x) << S_DIRSPDCHANGE)
+#define F_DIRSPDCHANGE    V_DIRSPDCHANGE(1U)
+
+#define S_NUM_LANES    8
+#define M_NUM_LANES    0x1ffU
+#define V_NUM_LANES(x) ((x) << S_NUM_LANES)
+#define G_NUM_LANES(x) (((x) >> S_NUM_LANES) & M_NUM_LANES)
+
+#define S_NFTS_GEN2_3    0
+#define M_NFTS_GEN2_3    0xffU
+#define V_NFTS_GEN2_3(x) ((x) << S_NFTS_GEN2_3)
+#define G_NFTS_GEN2_3(x) (((x) >> S_NFTS_GEN2_3) & M_NFTS_GEN2_3)
+
+#define S_AUTO_LANE_FLIP_CTRL_EN    16
+#define V_AUTO_LANE_FLIP_CTRL_EN(x) ((x) << S_AUTO_LANE_FLIP_CTRL_EN)
+#define F_AUTO_LANE_FLIP_CTRL_EN    V_AUTO_LANE_FLIP_CTRL_EN(1U)
+
+#define S_T6_NUM_LANES    8
+#define M_T6_NUM_LANES    0x1fU
+#define V_T6_NUM_LANES(x) ((x) << S_T6_NUM_LANES)
+#define G_T6_NUM_LANES(x) (((x) >> S_T6_NUM_LANES) & M_T6_NUM_LANES)
+
+#define A_PCIE_CORE_PHY_STATUS 0x5810
+#define A_PCIE_CORE_PHY_CONTROL 0x5814
+#define A_PCIE_CORE_GEN3_CONTROL 0x5890
+
+#define S_DC_BALANCE_DISABLE    18
+#define V_DC_BALANCE_DISABLE(x) ((x) << S_DC_BALANCE_DISABLE)
+#define F_DC_BALANCE_DISABLE    V_DC_BALANCE_DISABLE(1U)
+
+#define S_DLLP_DELAY_DISABLE    17
+#define V_DLLP_DELAY_DISABLE(x) ((x) << S_DLLP_DELAY_DISABLE)
+#define F_DLLP_DELAY_DISABLE    V_DLLP_DELAY_DISABLE(1U)
+
+#define S_EQL_DISABLE    16
+#define V_EQL_DISABLE(x) ((x) << S_EQL_DISABLE)
+#define F_EQL_DISABLE    V_EQL_DISABLE(1U)
+
+#define S_EQL_REDO_DISABLE    11
+#define V_EQL_REDO_DISABLE(x) ((x) << S_EQL_REDO_DISABLE)
+#define F_EQL_REDO_DISABLE    V_EQL_REDO_DISABLE(1U)
+
+#define S_EQL_EIEOS_CNTRST_DISABLE    10
+#define V_EQL_EIEOS_CNTRST_DISABLE(x) ((x) << S_EQL_EIEOS_CNTRST_DISABLE)
+#define F_EQL_EIEOS_CNTRST_DISABLE    V_EQL_EIEOS_CNTRST_DISABLE(1U)
+
+#define S_EQL_PH2_PH3_DISABLE    9
+#define V_EQL_PH2_PH3_DISABLE(x) ((x) << S_EQL_PH2_PH3_DISABLE)
+#define F_EQL_PH2_PH3_DISABLE    V_EQL_PH2_PH3_DISABLE(1U)
+
+#define S_DISABLE_SCRAMBLER    8
+#define V_DISABLE_SCRAMBLER(x) ((x) << S_DISABLE_SCRAMBLER)
+#define F_DISABLE_SCRAMBLER    V_DISABLE_SCRAMBLER(1U)
+
+#define A_PCIE_CORE_GEN3_EQ_FS_LF 0x5894
+
+#define S_FULL_SWING    6
+#define M_FULL_SWING    0x3fU
+#define V_FULL_SWING(x) ((x) << S_FULL_SWING)
+#define G_FULL_SWING(x) (((x) >> S_FULL_SWING) & M_FULL_SWING)
+
+#define S_LOW_FREQUENCY    0
+#define M_LOW_FREQUENCY    0x3fU
+#define V_LOW_FREQUENCY(x) ((x) << S_LOW_FREQUENCY)
+#define G_LOW_FREQUENCY(x) (((x) >> S_LOW_FREQUENCY) & M_LOW_FREQUENCY)
+
+#define A_PCIE_CORE_GEN3_EQ_PRESET_COEFF 0x5898
+
+#define S_POSTCURSOR    12
+#define M_POSTCURSOR    0x3fU
+#define V_POSTCURSOR(x) ((x) << S_POSTCURSOR)
+#define G_POSTCURSOR(x) (((x) >> S_POSTCURSOR) & M_POSTCURSOR)
+
+#define S_CURSOR    6
+#define M_CURSOR    0x3fU
+#define V_CURSOR(x) ((x) << S_CURSOR)
+#define G_CURSOR(x) (((x) >> S_CURSOR) & M_CURSOR)
+
+#define S_PRECURSOR    0
+#define M_PRECURSOR    0x3fU
+#define V_PRECURSOR(x) ((x) << S_PRECURSOR)
+#define G_PRECURSOR(x) (((x) >> S_PRECURSOR) & M_PRECURSOR)
+
+#define A_PCIE_CORE_GEN3_EQ_PRESET_INDEX 0x589c
+
+#define S_INDEX    0
+#define M_INDEX    0xfU
+#define V_INDEX(x) ((x) << S_INDEX)
+#define G_INDEX(x) (((x) >> S_INDEX) & M_INDEX)
+
+#define A_PCIE_CORE_GEN3_EQ_STATUS 0x58a4
+
+#define S_LEGALITY_STATUS    0
+#define V_LEGALITY_STATUS(x) ((x) << S_LEGALITY_STATUS)
+#define F_LEGALITY_STATUS    V_LEGALITY_STATUS(1U)
+
+#define A_PCIE_CORE_GEN3_EQ_CONTROL 0x58a8
+
+#define S_INCLUDE_INITIAL_FOM    24
+#define V_INCLUDE_INITIAL_FOM(x) ((x) << S_INCLUDE_INITIAL_FOM)
+#define F_INCLUDE_INITIAL_FOM    V_INCLUDE_INITIAL_FOM(1U)
+
+#define S_PRESET_REQUEST_VECTOR    8
+#define M_PRESET_REQUEST_VECTOR    0xffffU
+#define V_PRESET_REQUEST_VECTOR(x) ((x) << S_PRESET_REQUEST_VECTOR)
+#define G_PRESET_REQUEST_VECTOR(x) (((x) >> S_PRESET_REQUEST_VECTOR) & M_PRESET_REQUEST_VECTOR)
+
+#define S_PHASE23_2MS_TIMEOUT_DISABLE    5
+#define V_PHASE23_2MS_TIMEOUT_DISABLE(x) ((x) << S_PHASE23_2MS_TIMEOUT_DISABLE)
+#define F_PHASE23_2MS_TIMEOUT_DISABLE    V_PHASE23_2MS_TIMEOUT_DISABLE(1U)
+
+#define S_AFTER24MS    4
+#define V_AFTER24MS(x) ((x) << S_AFTER24MS)
+#define F_AFTER24MS    V_AFTER24MS(1U)
+
+#define S_FEEDBACK_MODE    0
+#define M_FEEDBACK_MODE    0xfU
+#define V_FEEDBACK_MODE(x) ((x) << S_FEEDBACK_MODE)
+#define G_FEEDBACK_MODE(x) (((x) >> S_FEEDBACK_MODE) & M_FEEDBACK_MODE)
+
+#define A_PCIE_CORE_GEN3_EQ_DIRCHANGE_FEEDBACK 0x58ac
+
+#define S_WINAPERTURE_CPLUS1    14
+#define M_WINAPERTURE_CPLUS1    0xfU
+#define V_WINAPERTURE_CPLUS1(x) ((x) << S_WINAPERTURE_CPLUS1)
+#define G_WINAPERTURE_CPLUS1(x) (((x) >> S_WINAPERTURE_CPLUS1) & M_WINAPERTURE_CPLUS1)
+
+#define S_WINAPERTURE_CMINS1    10
+#define M_WINAPERTURE_CMINS1    0xfU
+#define V_WINAPERTURE_CMINS1(x) ((x) << S_WINAPERTURE_CMINS1)
+#define G_WINAPERTURE_CMINS1(x) (((x) >> S_WINAPERTURE_CMINS1) & M_WINAPERTURE_CMINS1)
+
+#define S_CONVERGENCE_WINDEPTH    5
+#define M_CONVERGENCE_WINDEPTH    0x1fU
+#define V_CONVERGENCE_WINDEPTH(x) ((x) << S_CONVERGENCE_WINDEPTH)
+#define G_CONVERGENCE_WINDEPTH(x) (((x) >> S_CONVERGENCE_WINDEPTH) & M_CONVERGENCE_WINDEPTH)
+
+#define S_EQMASTERPHASE_MINTIME    0
+#define M_EQMASTERPHASE_MINTIME    0x1fU
+#define V_EQMASTERPHASE_MINTIME(x) ((x) << S_EQMASTERPHASE_MINTIME)
+#define G_EQMASTERPHASE_MINTIME(x) (((x) >> S_EQMASTERPHASE_MINTIME) & M_EQMASTERPHASE_MINTIME)
+
+#define A_PCIE_CORE_PIPE_CONTROL 0x58b8
+
+#define S_PIPE_LOOPBACK_EN    0
+#define V_PIPE_LOOPBACK_EN(x) ((x) << S_PIPE_LOOPBACK_EN)
+#define F_PIPE_LOOPBACK_EN    V_PIPE_LOOPBACK_EN(1U)
+
+#define S_T6_PIPE_LOOPBACK_EN    31
+#define V_T6_PIPE_LOOPBACK_EN(x) ((x) << S_T6_PIPE_LOOPBACK_EN)
+#define F_T6_PIPE_LOOPBACK_EN    V_T6_PIPE_LOOPBACK_EN(1U)
+
+#define A_PCIE_CORE_DBI_RO_WE 0x58bc
+
+#define S_READONLY_WRITEEN    0
+#define V_READONLY_WRITEEN(x) ((x) << S_READONLY_WRITEEN)
+#define F_READONLY_WRITEEN    V_READONLY_WRITEEN(1U)
+
 #define A_PCIE_CORE_UTL_SYSTEM_BUS_CONTROL 0x5900
 
 #define S_SMTD    27
@@ -2489,6 +6240,139 @@
 #define V_BRVN(x) ((x) << S_BRVN)
 #define G_BRVN(x) (((x) >> S_BRVN) & M_BRVN)
 
+#define A_PCIE_T5_DMA_CFG 0x5940
+
+#define S_T5_DMA_MAXREQCNT    20
+#define M_T5_DMA_MAXREQCNT    0xffU
+#define V_T5_DMA_MAXREQCNT(x) ((x) << S_T5_DMA_MAXREQCNT)
+#define G_T5_DMA_MAXREQCNT(x) (((x) >> S_T5_DMA_MAXREQCNT) & M_T5_DMA_MAXREQCNT)
+
+#define S_T5_DMA_MAXRDREQSIZE    17
+#define M_T5_DMA_MAXRDREQSIZE    0x7U
+#define V_T5_DMA_MAXRDREQSIZE(x) ((x) << S_T5_DMA_MAXRDREQSIZE)
+#define G_T5_DMA_MAXRDREQSIZE(x) (((x) >> S_T5_DMA_MAXRDREQSIZE) & M_T5_DMA_MAXRDREQSIZE)
+
+#define S_T5_DMA_MAXRSPCNT    8
+#define M_T5_DMA_MAXRSPCNT    0x1ffU
+#define V_T5_DMA_MAXRSPCNT(x) ((x) << S_T5_DMA_MAXRSPCNT)
+#define G_T5_DMA_MAXRSPCNT(x) (((x) >> S_T5_DMA_MAXRSPCNT) & M_T5_DMA_MAXRSPCNT)
+
+#define S_SEQCHKDIS    7
+#define V_SEQCHKDIS(x) ((x) << S_SEQCHKDIS)
+#define F_SEQCHKDIS    V_SEQCHKDIS(1U)
+
+#define S_MINTAG    0
+#define M_MINTAG    0x7fU
+#define V_MINTAG(x) ((x) << S_MINTAG)
+#define G_MINTAG(x) (((x) >> S_MINTAG) & M_MINTAG)
+
+#define S_T6_T5_DMA_MAXREQCNT    20
+#define M_T6_T5_DMA_MAXREQCNT    0x7fU
+#define V_T6_T5_DMA_MAXREQCNT(x) ((x) << S_T6_T5_DMA_MAXREQCNT)
+#define G_T6_T5_DMA_MAXREQCNT(x) (((x) >> S_T6_T5_DMA_MAXREQCNT) & M_T6_T5_DMA_MAXREQCNT)
+
+#define S_T6_T5_DMA_MAXRSPCNT    9
+#define M_T6_T5_DMA_MAXRSPCNT    0xffU
+#define V_T6_T5_DMA_MAXRSPCNT(x) ((x) << S_T6_T5_DMA_MAXRSPCNT)
+#define G_T6_T5_DMA_MAXRSPCNT(x) (((x) >> S_T6_T5_DMA_MAXRSPCNT) & M_T6_T5_DMA_MAXRSPCNT)
+
+#define S_T6_SEQCHKDIS    8
+#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
+#define F_T6_SEQCHKDIS    V_T6_SEQCHKDIS(1U)
+
+#define S_T6_MINTAG    0
+#define M_T6_MINTAG    0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+
+#define A_PCIE_T5_DMA_STAT 0x5944
+
+#define S_DMA_RESPCNT    20
+#define M_DMA_RESPCNT    0xfffU
+#define V_DMA_RESPCNT(x) ((x) << S_DMA_RESPCNT)
+#define G_DMA_RESPCNT(x) (((x) >> S_DMA_RESPCNT) & M_DMA_RESPCNT)
+
+#define S_DMA_RDREQCNT    12
+#define M_DMA_RDREQCNT    0xffU
+#define V_DMA_RDREQCNT(x) ((x) << S_DMA_RDREQCNT)
+#define G_DMA_RDREQCNT(x) (((x) >> S_DMA_RDREQCNT) & M_DMA_RDREQCNT)
+
+#define S_DMA_WRREQCNT    0
+#define M_DMA_WRREQCNT    0x7ffU
+#define V_DMA_WRREQCNT(x) ((x) << S_DMA_WRREQCNT)
+#define G_DMA_WRREQCNT(x) (((x) >> S_DMA_WRREQCNT) & M_DMA_WRREQCNT)
+
+#define S_T6_DMA_RESPCNT    20
+#define M_T6_DMA_RESPCNT    0x3ffU
+#define V_T6_DMA_RESPCNT(x) ((x) << S_T6_DMA_RESPCNT)
+#define G_T6_DMA_RESPCNT(x) (((x) >> S_T6_DMA_RESPCNT) & M_T6_DMA_RESPCNT)
+
+#define S_T6_DMA_RDREQCNT    12
+#define M_T6_DMA_RDREQCNT    0x3fU
+#define V_T6_DMA_RDREQCNT(x) ((x) << S_T6_DMA_RDREQCNT)
+#define G_T6_DMA_RDREQCNT(x) (((x) >> S_T6_DMA_RDREQCNT) & M_T6_DMA_RDREQCNT)
+
+#define S_T6_DMA_WRREQCNT    0
+#define M_T6_DMA_WRREQCNT    0x1ffU
+#define V_T6_DMA_WRREQCNT(x) ((x) << S_T6_DMA_WRREQCNT)
+#define G_T6_DMA_WRREQCNT(x) (((x) >> S_T6_DMA_WRREQCNT) & M_T6_DMA_WRREQCNT)
+
+#define A_PCIE_T5_DMA_STAT2 0x5948
+
+#define S_COOKIECNT    24
+#define M_COOKIECNT    0xfU
+#define V_COOKIECNT(x) ((x) << S_COOKIECNT)
+#define G_COOKIECNT(x) (((x) >> S_COOKIECNT) & M_COOKIECNT)
+
+#define S_RDSEQNUMUPDCNT    20
+#define M_RDSEQNUMUPDCNT    0xfU
+#define V_RDSEQNUMUPDCNT(x) ((x) << S_RDSEQNUMUPDCNT)
+#define G_RDSEQNUMUPDCNT(x) (((x) >> S_RDSEQNUMUPDCNT) & M_RDSEQNUMUPDCNT)
+
+#define S_SIREQCNT    16
+#define M_SIREQCNT    0xfU
+#define V_SIREQCNT(x) ((x) << S_SIREQCNT)
+#define G_SIREQCNT(x) (((x) >> S_SIREQCNT) & M_SIREQCNT)
+
+#define S_WREOPMATCHSOP    12
+#define V_WREOPMATCHSOP(x) ((x) << S_WREOPMATCHSOP)
+#define F_WREOPMATCHSOP    V_WREOPMATCHSOP(1U)
+
+#define S_WRSOPCNT    8
+#define M_WRSOPCNT    0xfU
+#define V_WRSOPCNT(x) ((x) << S_WRSOPCNT)
+#define G_WRSOPCNT(x) (((x) >> S_WRSOPCNT) & M_WRSOPCNT)
+
+#define S_RDSOPCNT    0
+#define M_RDSOPCNT    0xffU
+#define V_RDSOPCNT(x) ((x) << S_RDSOPCNT)
+#define G_RDSOPCNT(x) (((x) >> S_RDSOPCNT) & M_RDSOPCNT)
+
+#define A_PCIE_T5_DMA_STAT3 0x594c
+
+#define S_ATMREQSOPCNT    24
+#define M_ATMREQSOPCNT    0xffU
+#define V_ATMREQSOPCNT(x) ((x) << S_ATMREQSOPCNT)
+#define G_ATMREQSOPCNT(x) (((x) >> S_ATMREQSOPCNT) & M_ATMREQSOPCNT)
+
+#define S_ATMEOPMATCHSOP    17
+#define V_ATMEOPMATCHSOP(x) ((x) << S_ATMEOPMATCHSOP)
+#define F_ATMEOPMATCHSOP    V_ATMEOPMATCHSOP(1U)
+
+#define S_RSPEOPMATCHSOP    16
+#define V_RSPEOPMATCHSOP(x) ((x) << S_RSPEOPMATCHSOP)
+#define F_RSPEOPMATCHSOP    V_RSPEOPMATCHSOP(1U)
+
+#define S_RSPERRCNT    8
+#define M_RSPERRCNT    0xffU
+#define V_RSPERRCNT(x) ((x) << S_RSPERRCNT)
+#define G_RSPERRCNT(x) (((x) >> S_RSPERRCNT) & M_RSPERRCNT)
+
+#define S_RSPSOPCNT    0
+#define M_RSPSOPCNT    0xffU
+#define V_RSPSOPCNT(x) ((x) << S_RSPSOPCNT)
+#define G_RSPSOPCNT(x) (((x) >> S_RSPSOPCNT) & M_RSPSOPCNT)
+
 #define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
 
 #define S_OP0H    24
@@ -2599,6 +6483,58 @@
 #define V_ON3H(x) ((x) << S_ON3H)
 #define G_ON3H(x) (((x) >> S_ON3H) & M_ON3H)
 
+#define A_PCIE_T5_CMD_CFG 0x5980
+
+#define S_T5_CMD_MAXRDREQSIZE    17
+#define M_T5_CMD_MAXRDREQSIZE    0x7U
+#define V_T5_CMD_MAXRDREQSIZE(x) ((x) << S_T5_CMD_MAXRDREQSIZE)
+#define G_T5_CMD_MAXRDREQSIZE(x) (((x) >> S_T5_CMD_MAXRDREQSIZE) & M_T5_CMD_MAXRDREQSIZE)
+
+#define S_T5_CMD_MAXRSPCNT    8
+#define M_T5_CMD_MAXRSPCNT    0xffU
+#define V_T5_CMD_MAXRSPCNT(x) ((x) << S_T5_CMD_MAXRSPCNT)
+#define G_T5_CMD_MAXRSPCNT(x) (((x) >> S_T5_CMD_MAXRSPCNT) & M_T5_CMD_MAXRSPCNT)
+
+#define S_USECMDPOOL    7
+#define V_USECMDPOOL(x) ((x) << S_USECMDPOOL)
+#define F_USECMDPOOL    V_USECMDPOOL(1U)
+
+#define S_T6_T5_CMD_MAXRSPCNT    9
+#define M_T6_T5_CMD_MAXRSPCNT    0x3fU
+#define V_T6_T5_CMD_MAXRSPCNT(x) ((x) << S_T6_T5_CMD_MAXRSPCNT)
+#define G_T6_T5_CMD_MAXRSPCNT(x) (((x) >> S_T6_T5_CMD_MAXRSPCNT) & M_T6_T5_CMD_MAXRSPCNT)
+
+#define S_T6_USECMDPOOL    8
+#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL)
+#define F_T6_USECMDPOOL    V_T6_USECMDPOOL(1U)
+
+#define S_T6_MINTAG    0
+#define M_T6_MINTAG    0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+
+#define A_PCIE_T5_CMD_STAT 0x5984
+
+#define S_T5_STAT_RSPCNT    20
+#define M_T5_STAT_RSPCNT    0x7ffU
+#define V_T5_STAT_RSPCNT(x) ((x) << S_T5_STAT_RSPCNT)
+#define G_T5_STAT_RSPCNT(x) (((x) >> S_T5_STAT_RSPCNT) & M_T5_STAT_RSPCNT)
+
+#define S_RDREQCNT    12
+#define M_RDREQCNT    0x1fU
+#define V_RDREQCNT(x) ((x) << S_RDREQCNT)
+#define G_RDREQCNT(x) (((x) >> S_RDREQCNT) & M_RDREQCNT)
+
+#define S_T6_T5_STAT_RSPCNT    20
+#define M_T6_T5_STAT_RSPCNT    0xffU
+#define V_T6_T5_STAT_RSPCNT(x) ((x) << S_T6_T5_STAT_RSPCNT)
+#define G_T6_T5_STAT_RSPCNT(x) (((x) >> S_T6_T5_STAT_RSPCNT) & M_T6_T5_STAT_RSPCNT)
+
+#define S_T6_RDREQCNT    12
+#define M_T6_RDREQCNT    0xfU
+#define V_T6_RDREQCNT(x) ((x) << S_T6_RDREQCNT)
+#define G_T6_RDREQCNT(x) (((x) >> S_T6_RDREQCNT) & M_T6_RDREQCNT)
+
 #define A_PCIE_CORE_INBOUND_NON_POSTED_REQUESTS_BUFFER_ALLOCATION 0x5988
 
 #define S_IN0H    24
@@ -2621,6 +6557,8 @@
 #define V_IN3H(x) ((x) << S_IN3H)
 #define G_IN3H(x) (((x) >> S_IN3H) & M_IN3H)
 
+#define A_PCIE_T5_CMD_STAT2 0x5988
+#define A_PCIE_T5_CMD_STAT3 0x598c
 #define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
 
 #define S_OC0T    24
@@ -2904,6 +6842,42 @@
 #define V_CRSE(x) ((x) << S_CRSE)
 #define F_CRSE    V_CRSE(1U)
 
+#define A_PCIE_T5_HMA_CFG 0x59b0
+
+#define S_HMA_MAXREQCNT    20
+#define M_HMA_MAXREQCNT    0x1fU
+#define V_HMA_MAXREQCNT(x) ((x) << S_HMA_MAXREQCNT)
+#define G_HMA_MAXREQCNT(x) (((x) >> S_HMA_MAXREQCNT) & M_HMA_MAXREQCNT)
+
+#define S_T5_HMA_MAXRDREQSIZE    17
+#define M_T5_HMA_MAXRDREQSIZE    0x7U
+#define V_T5_HMA_MAXRDREQSIZE(x) ((x) << S_T5_HMA_MAXRDREQSIZE)
+#define G_T5_HMA_MAXRDREQSIZE(x) (((x) >> S_T5_HMA_MAXRDREQSIZE) & M_T5_HMA_MAXRDREQSIZE)
+
+#define S_T5_HMA_MAXRSPCNT    8
+#define M_T5_HMA_MAXRSPCNT    0x1fU
+#define V_T5_HMA_MAXRSPCNT(x) ((x) << S_T5_HMA_MAXRSPCNT)
+#define G_T5_HMA_MAXRSPCNT(x) (((x) >> S_T5_HMA_MAXRSPCNT) & M_T5_HMA_MAXRSPCNT)
+
+#define S_T6_HMA_MAXREQCNT    20
+#define M_T6_HMA_MAXREQCNT    0x7fU
+#define V_T6_HMA_MAXREQCNT(x) ((x) << S_T6_HMA_MAXREQCNT)
+#define G_T6_HMA_MAXREQCNT(x) (((x) >> S_T6_HMA_MAXREQCNT) & M_T6_HMA_MAXREQCNT)
+
+#define S_T6_T5_HMA_MAXRSPCNT    9
+#define M_T6_T5_HMA_MAXRSPCNT    0xffU
+#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT)
+#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT)
+
+#define S_T6_SEQCHKDIS    8
+#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
+#define F_T6_SEQCHKDIS    V_T6_SEQCHKDIS(1U)
+
+#define S_T6_MINTAG    0
+#define M_T6_MINTAG    0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+
 #define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
 
 #define S_RLCS    31
@@ -2950,6 +6924,28 @@
 #define V_CRSS(x) ((x) << S_CRSS)
 #define F_CRSS    V_CRSS(1U)
 
+#define A_PCIE_T5_HMA_STAT 0x59b4
+
+#define S_HMA_RESPCNT    20
+#define M_HMA_RESPCNT    0x1ffU
+#define V_HMA_RESPCNT(x) ((x) << S_HMA_RESPCNT)
+#define G_HMA_RESPCNT(x) (((x) >> S_HMA_RESPCNT) & M_HMA_RESPCNT)
+
+#define S_HMA_RDREQCNT    12
+#define M_HMA_RDREQCNT    0x3fU
+#define V_HMA_RDREQCNT(x) ((x) << S_HMA_RDREQCNT)
+#define G_HMA_RDREQCNT(x) (((x) >> S_HMA_RDREQCNT) & M_HMA_RDREQCNT)
+
+#define S_HMA_WRREQCNT    0
+#define M_HMA_WRREQCNT    0x1ffU
+#define V_HMA_WRREQCNT(x) ((x) << S_HMA_WRREQCNT)
+#define G_HMA_WRREQCNT(x) (((x) >> S_HMA_WRREQCNT) & M_HMA_WRREQCNT)
+
+#define S_T6_HMA_RESPCNT    20
+#define M_T6_HMA_RESPCNT    0x3ffU
+#define V_T6_HMA_RESPCNT(x) ((x) << S_T6_HMA_RESPCNT)
+#define G_T6_HMA_RESPCNT(x) (((x) >> S_T6_HMA_RESPCNT) & M_T6_HMA_RESPCNT)
+
 #define A_PCIE_CORE_ROOT_COMPLEX_INTERRUPT_ENABLE 0x59b8
 
 #define S_RLCI    31
@@ -2996,6 +6992,7 @@
 #define V_CRSI(x) ((x) << S_CRSI)
 #define F_CRSI    V_CRSI(1U)
 
+#define A_PCIE_T5_HMA_STAT2 0x59b8
 #define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
 
 #define S_PTOM    31
@@ -3038,6 +7035,7 @@
 #define V_PMC7(x) ((x) << S_PMC7)
 #define F_PMC7    V_PMC7(1U)
 
+#define A_PCIE_T5_HMA_STAT3 0x59bc
 #define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
 
 #define S_PTOS    31
@@ -3112,6 +7110,84 @@
 #define V_PME7(x) ((x) << S_PME7)
 #define F_PME7    V_PME7(1U)
 
+#define A_PCIE_CGEN 0x59c0
+
+#define S_VPD_DYNAMIC_CGEN    26
+#define V_VPD_DYNAMIC_CGEN(x) ((x) << S_VPD_DYNAMIC_CGEN)
+#define F_VPD_DYNAMIC_CGEN    V_VPD_DYNAMIC_CGEN(1U)
+
+#define S_MA_DYNAMIC_CGEN    25
+#define V_MA_DYNAMIC_CGEN(x) ((x) << S_MA_DYNAMIC_CGEN)
+#define F_MA_DYNAMIC_CGEN    V_MA_DYNAMIC_CGEN(1U)
+
+#define S_TAGQ_DYNAMIC_CGEN    24
+#define V_TAGQ_DYNAMIC_CGEN(x) ((x) << S_TAGQ_DYNAMIC_CGEN)
+#define F_TAGQ_DYNAMIC_CGEN    V_TAGQ_DYNAMIC_CGEN(1U)
+
+#define S_REQCTL_DYNAMIC_CGEN    23
+#define V_REQCTL_DYNAMIC_CGEN(x) ((x) << S_REQCTL_DYNAMIC_CGEN)
+#define F_REQCTL_DYNAMIC_CGEN    V_REQCTL_DYNAMIC_CGEN(1U)
+
+#define S_RSPDATAPROC_DYNAMIC_CGEN    22
+#define V_RSPDATAPROC_DYNAMIC_CGEN(x) ((x) << S_RSPDATAPROC_DYNAMIC_CGEN)
+#define F_RSPDATAPROC_DYNAMIC_CGEN    V_RSPDATAPROC_DYNAMIC_CGEN(1U)
+
+#define S_RSPRDQ_DYNAMIC_CGEN    21
+#define V_RSPRDQ_DYNAMIC_CGEN(x) ((x) << S_RSPRDQ_DYNAMIC_CGEN)
+#define F_RSPRDQ_DYNAMIC_CGEN    V_RSPRDQ_DYNAMIC_CGEN(1U)
+
+#define S_RSPIPIF_DYNAMIC_CGEN    20
+#define V_RSPIPIF_DYNAMIC_CGEN(x) ((x) << S_RSPIPIF_DYNAMIC_CGEN)
+#define F_RSPIPIF_DYNAMIC_CGEN    V_RSPIPIF_DYNAMIC_CGEN(1U)
+
+#define S_HMA_STATIC_CGEN    19
+#define V_HMA_STATIC_CGEN(x) ((x) << S_HMA_STATIC_CGEN)
+#define F_HMA_STATIC_CGEN    V_HMA_STATIC_CGEN(1U)
+
+#define S_HMA_DYNAMIC_CGEN    18
+#define V_HMA_DYNAMIC_CGEN(x) ((x) << S_HMA_DYNAMIC_CGEN)
+#define F_HMA_DYNAMIC_CGEN    V_HMA_DYNAMIC_CGEN(1U)
+
+#define S_CMD_STATIC_CGEN    16
+#define V_CMD_STATIC_CGEN(x) ((x) << S_CMD_STATIC_CGEN)
+#define F_CMD_STATIC_CGEN    V_CMD_STATIC_CGEN(1U)
+
+#define S_CMD_DYNAMIC_CGEN    15
+#define V_CMD_DYNAMIC_CGEN(x) ((x) << S_CMD_DYNAMIC_CGEN)
+#define F_CMD_DYNAMIC_CGEN    V_CMD_DYNAMIC_CGEN(1U)
+
+#define S_DMA_STATIC_CGEN    13
+#define V_DMA_STATIC_CGEN(x) ((x) << S_DMA_STATIC_CGEN)
+#define F_DMA_STATIC_CGEN    V_DMA_STATIC_CGEN(1U)
+
+#define S_DMA_DYNAMIC_CGEN    12
+#define V_DMA_DYNAMIC_CGEN(x) ((x) << S_DMA_DYNAMIC_CGEN)
+#define F_DMA_DYNAMIC_CGEN    V_DMA_DYNAMIC_CGEN(1U)
+
+#define S_VFID_SLEEPSTATUS    10
+#define V_VFID_SLEEPSTATUS(x) ((x) << S_VFID_SLEEPSTATUS)
+#define F_VFID_SLEEPSTATUS    V_VFID_SLEEPSTATUS(1U)
+
+#define S_VC1_SLEEPSTATUS    9
+#define V_VC1_SLEEPSTATUS(x) ((x) << S_VC1_SLEEPSTATUS)
+#define F_VC1_SLEEPSTATUS    V_VC1_SLEEPSTATUS(1U)
+
+#define S_STI_SLEEPSTATUS    8
+#define V_STI_SLEEPSTATUS(x) ((x) << S_STI_SLEEPSTATUS)
+#define F_STI_SLEEPSTATUS    V_STI_SLEEPSTATUS(1U)
+
+#define S_VFID_SLEEPREQ    2
+#define V_VFID_SLEEPREQ(x) ((x) << S_VFID_SLEEPREQ)
+#define F_VFID_SLEEPREQ    V_VFID_SLEEPREQ(1U)
+
+#define S_VC1_SLEEPREQ    1
+#define V_VC1_SLEEPREQ(x) ((x) << S_VC1_SLEEPREQ)
+#define F_VC1_SLEEPREQ    V_VC1_SLEEPREQ(1U)
+
+#define S_STI_SLEEPREQ    0
+#define V_STI_SLEEPREQ(x) ((x) << S_STI_SLEEPREQ)
+#define F_STI_SLEEPREQ    V_STI_SLEEPREQ(1U)
+
 #define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
 
 #define S_PTOI    31
@@ -3154,6 +7230,21 @@
 #define V_PC7I(x) ((x) << S_PC7I)
 #define F_PC7I    V_PC7I(1U)
 
+#define A_PCIE_MA_RSP 0x59c4
+
+#define S_TIMERVALUE    8
+#define M_TIMERVALUE    0xffffffU
+#define V_TIMERVALUE(x) ((x) << S_TIMERVALUE)
+#define G_TIMERVALUE(x) (((x) >> S_TIMERVALUE) & M_TIMERVALUE)
+
+#define S_MAREQTIMEREN    1
+#define V_MAREQTIMEREN(x) ((x) << S_MAREQTIMEREN)
+#define F_MAREQTIMEREN    V_MAREQTIMEREN(1U)
+
+#define S_MARSPTIMEREN    0
+#define V_MARSPTIMEREN(x) ((x) << S_MARSPTIMEREN)
+#define F_MARSPTIMEREN    V_MARSPTIMEREN(1U)
+
 #define A_PCIE_CORE_PCI_POWER_MANAGEMENT_CONTROL_1 0x59c8
 
 #define S_TOAK    31
@@ -3176,6 +7267,52 @@
 #define V_ALET(x) ((x) << S_ALET)
 #define F_ALET    V_ALET(1U)
 
+#define A_PCIE_HPRD 0x59c8
+
+#define S_NPH_CREDITSAVAILVC0    19
+#define M_NPH_CREDITSAVAILVC0    0x3U
+#define V_NPH_CREDITSAVAILVC0(x) ((x) << S_NPH_CREDITSAVAILVC0)
+#define G_NPH_CREDITSAVAILVC0(x) (((x) >> S_NPH_CREDITSAVAILVC0) & M_NPH_CREDITSAVAILVC0)
+
+#define S_NPD_CREDITSAVAILVC0    17
+#define M_NPD_CREDITSAVAILVC0    0x3U
+#define V_NPD_CREDITSAVAILVC0(x) ((x) << S_NPD_CREDITSAVAILVC0)
+#define G_NPD_CREDITSAVAILVC0(x) (((x) >> S_NPD_CREDITSAVAILVC0) & M_NPD_CREDITSAVAILVC0)
+
+#define S_NPH_CREDITSAVAILVC1    15
+#define M_NPH_CREDITSAVAILVC1    0x3U
+#define V_NPH_CREDITSAVAILVC1(x) ((x) << S_NPH_CREDITSAVAILVC1)
+#define G_NPH_CREDITSAVAILVC1(x) (((x) >> S_NPH_CREDITSAVAILVC1) & M_NPH_CREDITSAVAILVC1)
+
+#define S_NPD_CREDITSAVAILVC1    13
+#define M_NPD_CREDITSAVAILVC1    0x3U
+#define V_NPD_CREDITSAVAILVC1(x) ((x) << S_NPD_CREDITSAVAILVC1)
+#define G_NPD_CREDITSAVAILVC1(x) (((x) >> S_NPD_CREDITSAVAILVC1) & M_NPD_CREDITSAVAILVC1)
+
+#define S_NPH_CREDITSREQUIRED    11
+#define M_NPH_CREDITSREQUIRED    0x3U
+#define V_NPH_CREDITSREQUIRED(x) ((x) << S_NPH_CREDITSREQUIRED)
+#define G_NPH_CREDITSREQUIRED(x) (((x) >> S_NPH_CREDITSREQUIRED) & M_NPH_CREDITSREQUIRED)
+
+#define S_NPD_CREDITSREQUIRED    9
+#define M_NPD_CREDITSREQUIRED    0x3U
+#define V_NPD_CREDITSREQUIRED(x) ((x) << S_NPD_CREDITSREQUIRED)
+#define G_NPD_CREDITSREQUIRED(x) (((x) >> S_NPD_CREDITSREQUIRED) & M_NPD_CREDITSREQUIRED)
+
+#define S_REQBURSTCOUNT    5
+#define M_REQBURSTCOUNT    0xfU
+#define V_REQBURSTCOUNT(x) ((x) << S_REQBURSTCOUNT)
+#define G_REQBURSTCOUNT(x) (((x) >> S_REQBURSTCOUNT) & M_REQBURSTCOUNT)
+
+#define S_REQBURSTFREQUENCY    1
+#define M_REQBURSTFREQUENCY    0xfU
+#define V_REQBURSTFREQUENCY(x) ((x) << S_REQBURSTFREQUENCY)
+#define G_REQBURSTFREQUENCY(x) (((x) >> S_REQBURSTFREQUENCY) & M_REQBURSTFREQUENCY)
+
+#define S_ENABLEVC1    0
+#define V_ENABLEVC1(x) ((x) << S_ENABLEVC1)
+#define F_ENABLEVC1    V_ENABLEVC1(1U)
+
 #define A_PCIE_CORE_PCI_POWER_MANAGEMENT_CONTROL_2 0x59cc
 
 #define S_CPM0    30
@@ -3259,7 +7396,234 @@
 #define G_OPM7(x) (((x) >> S_OPM7) & M_OPM7)
 
 #define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_1 0x59d0
+#define A_PCIE_PERR_GROUP 0x59d0
+
+#define S_MST_DATAPATHPERR    25
+#define V_MST_DATAPATHPERR(x) ((x) << S_MST_DATAPATHPERR)
+#define F_MST_DATAPATHPERR    V_MST_DATAPATHPERR(1U)
+
+#define S_MST_RSPRDQPERR    24
+#define V_MST_RSPRDQPERR(x) ((x) << S_MST_RSPRDQPERR)
+#define F_MST_RSPRDQPERR    V_MST_RSPRDQPERR(1U)
+
+#define S_IP_RXPERR    23
+#define V_IP_RXPERR(x) ((x) << S_IP_RXPERR)
+#define F_IP_RXPERR    V_IP_RXPERR(1U)
+
+#define S_IP_BACKTXPERR    22
+#define V_IP_BACKTXPERR(x) ((x) << S_IP_BACKTXPERR)
+#define F_IP_BACKTXPERR    V_IP_BACKTXPERR(1U)
+
+#define S_IP_FRONTTXPERR    21
+#define V_IP_FRONTTXPERR(x) ((x) << S_IP_FRONTTXPERR)
+#define F_IP_FRONTTXPERR    V_IP_FRONTTXPERR(1U)
+
+#define S_TRGT1_FIDLKUPHDRPERR    20
+#define V_TRGT1_FIDLKUPHDRPERR(x) ((x) << S_TRGT1_FIDLKUPHDRPERR)
+#define F_TRGT1_FIDLKUPHDRPERR    V_TRGT1_FIDLKUPHDRPERR(1U)
+
+#define S_TRGT1_ALINDDATAPERR    19
+#define V_TRGT1_ALINDDATAPERR(x) ((x) << S_TRGT1_ALINDDATAPERR)
+#define F_TRGT1_ALINDDATAPERR    V_TRGT1_ALINDDATAPERR(1U)
+
+#define S_TRGT1_UNALINDATAPERR    18
+#define V_TRGT1_UNALINDATAPERR(x) ((x) << S_TRGT1_UNALINDATAPERR)
+#define F_TRGT1_UNALINDATAPERR    V_TRGT1_UNALINDATAPERR(1U)
+
+#define S_TRGT1_REQDATAPERR    17
+#define V_TRGT1_REQDATAPERR(x) ((x) << S_TRGT1_REQDATAPERR)
+#define F_TRGT1_REQDATAPERR    V_TRGT1_REQDATAPERR(1U)
+
+#define S_TRGT1_REQHDRPERR    16
+#define V_TRGT1_REQHDRPERR(x) ((x) << S_TRGT1_REQHDRPERR)
+#define F_TRGT1_REQHDRPERR    V_TRGT1_REQHDRPERR(1U)
+
+#define S_IPRXDATA_VC1PERR    15
+#define V_IPRXDATA_VC1PERR(x) ((x) << S_IPRXDATA_VC1PERR)
+#define F_IPRXDATA_VC1PERR    V_IPRXDATA_VC1PERR(1U)
+
+#define S_IPRXDATA_VC0PERR    14
+#define V_IPRXDATA_VC0PERR(x) ((x) << S_IPRXDATA_VC0PERR)
+#define F_IPRXDATA_VC0PERR    V_IPRXDATA_VC0PERR(1U)
+
+#define S_IPRXHDR_VC1PERR    13
+#define V_IPRXHDR_VC1PERR(x) ((x) << S_IPRXHDR_VC1PERR)
+#define F_IPRXHDR_VC1PERR    V_IPRXHDR_VC1PERR(1U)
+
+#define S_IPRXHDR_VC0PERR    12
+#define V_IPRXHDR_VC0PERR(x) ((x) << S_IPRXHDR_VC0PERR)
+#define F_IPRXHDR_VC0PERR    V_IPRXHDR_VC0PERR(1U)
+
+#define S_MA_RSPDATAPERR    11
+#define V_MA_RSPDATAPERR(x) ((x) << S_MA_RSPDATAPERR)
+#define F_MA_RSPDATAPERR    V_MA_RSPDATAPERR(1U)
+
+#define S_MA_CPLTAGQPERR    10
+#define V_MA_CPLTAGQPERR(x) ((x) << S_MA_CPLTAGQPERR)
+#define F_MA_CPLTAGQPERR    V_MA_CPLTAGQPERR(1U)
+
+#define S_MA_REQTAGQPERR    9
+#define V_MA_REQTAGQPERR(x) ((x) << S_MA_REQTAGQPERR)
+#define F_MA_REQTAGQPERR    V_MA_REQTAGQPERR(1U)
+
+#define S_PIOREQ_BAR2CTLPERR    8
+#define V_PIOREQ_BAR2CTLPERR(x) ((x) << S_PIOREQ_BAR2CTLPERR)
+#define F_PIOREQ_BAR2CTLPERR    V_PIOREQ_BAR2CTLPERR(1U)
+
+#define S_PIOREQ_MEMCTLPERR    7
+#define V_PIOREQ_MEMCTLPERR(x) ((x) << S_PIOREQ_MEMCTLPERR)
+#define F_PIOREQ_MEMCTLPERR    V_PIOREQ_MEMCTLPERR(1U)
+
+#define S_PIOREQ_PLMCTLPERR    6
+#define V_PIOREQ_PLMCTLPERR(x) ((x) << S_PIOREQ_PLMCTLPERR)
+#define F_PIOREQ_PLMCTLPERR    V_PIOREQ_PLMCTLPERR(1U)
+
+#define S_PIOREQ_BAR2DATAPERR    5
+#define V_PIOREQ_BAR2DATAPERR(x) ((x) << S_PIOREQ_BAR2DATAPERR)
+#define F_PIOREQ_BAR2DATAPERR    V_PIOREQ_BAR2DATAPERR(1U)
+
+#define S_PIOREQ_MEMDATAPERR    4
+#define V_PIOREQ_MEMDATAPERR(x) ((x) << S_PIOREQ_MEMDATAPERR)
+#define F_PIOREQ_MEMDATAPERR    V_PIOREQ_MEMDATAPERR(1U)
+
+#define S_PIOREQ_PLMDATAPERR    3
+#define V_PIOREQ_PLMDATAPERR(x) ((x) << S_PIOREQ_PLMDATAPERR)
+#define F_PIOREQ_PLMDATAPERR    V_PIOREQ_PLMDATAPERR(1U)
+
+#define S_PIOCPL_CTLPERR    2
+#define V_PIOCPL_CTLPERR(x) ((x) << S_PIOCPL_CTLPERR)
+#define F_PIOCPL_CTLPERR    V_PIOCPL_CTLPERR(1U)
+
+#define S_PIOCPL_DATAPERR    1
+#define V_PIOCPL_DATAPERR(x) ((x) << S_PIOCPL_DATAPERR)
+#define F_PIOCPL_DATAPERR    V_PIOCPL_DATAPERR(1U)
+
+#define S_PIOCPL_PLMRSPPERR    0
+#define V_PIOCPL_PLMRSPPERR(x) ((x) << S_PIOCPL_PLMRSPPERR)
+#define F_PIOCPL_PLMRSPPERR    V_PIOCPL_PLMRSPPERR(1U)
+
+#define S_MA_RSPCTLPERR    26
+#define V_MA_RSPCTLPERR(x) ((x) << S_MA_RSPCTLPERR)
+#define F_MA_RSPCTLPERR    V_MA_RSPCTLPERR(1U)
+
+#define S_T6_IPRXDATA_VC0PERR    15
+#define V_T6_IPRXDATA_VC0PERR(x) ((x) << S_T6_IPRXDATA_VC0PERR)
+#define F_T6_IPRXDATA_VC0PERR    V_T6_IPRXDATA_VC0PERR(1U)
+
+#define S_T6_IPRXHDR_VC0PERR    14
+#define V_T6_IPRXHDR_VC0PERR(x) ((x) << S_T6_IPRXHDR_VC0PERR)
+#define F_T6_IPRXHDR_VC0PERR    V_T6_IPRXHDR_VC0PERR(1U)
+
+#define S_PIOCPL_VDMTXCTLPERR    13
+#define V_PIOCPL_VDMTXCTLPERR(x) ((x) << S_PIOCPL_VDMTXCTLPERR)
+#define F_PIOCPL_VDMTXCTLPERR    V_PIOCPL_VDMTXCTLPERR(1U)
+
+#define S_PIOCPL_VDMTXDATAPERR    12
+#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR)
+#define F_PIOCPL_VDMTXDATAPERR    V_PIOCPL_VDMTXDATAPERR(1U)
+
 #define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
+#define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4
+
+#define S_CPLSTATUSINTEN    12
+#define V_CPLSTATUSINTEN(x) ((x) << S_CPLSTATUSINTEN)
+#define F_CPLSTATUSINTEN    V_CPLSTATUSINTEN(1U)
+
+#define S_REQTIMEOUTINTEN    11
+#define V_REQTIMEOUTINTEN(x) ((x) << S_REQTIMEOUTINTEN)
+#define F_REQTIMEOUTINTEN    V_REQTIMEOUTINTEN(1U)
+
+#define S_DISABLEDINTEN    10
+#define V_DISABLEDINTEN(x) ((x) << S_DISABLEDINTEN)
+#define F_DISABLEDINTEN    V_DISABLEDINTEN(1U)
+
+#define S_RSPDROPFLRINTEN    9
+#define V_RSPDROPFLRINTEN(x) ((x) << S_RSPDROPFLRINTEN)
+#define F_RSPDROPFLRINTEN    V_RSPDROPFLRINTEN(1U)
+
+#define S_REQUNDERFLRINTEN    8
+#define V_REQUNDERFLRINTEN(x) ((x) << S_REQUNDERFLRINTEN)
+#define F_REQUNDERFLRINTEN    V_REQUNDERFLRINTEN(1U)
+
+#define S_CPLSTATUSLOGEN    4
+#define V_CPLSTATUSLOGEN(x) ((x) << S_CPLSTATUSLOGEN)
+#define F_CPLSTATUSLOGEN    V_CPLSTATUSLOGEN(1U)
+
+#define S_TIMEOUTLOGEN    3
+#define V_TIMEOUTLOGEN(x) ((x) << S_TIMEOUTLOGEN)
+#define F_TIMEOUTLOGEN    V_TIMEOUTLOGEN(1U)
+
+#define S_DISABLEDLOGEN    2
+#define V_DISABLEDLOGEN(x) ((x) << S_DISABLEDLOGEN)
+#define F_DISABLEDLOGEN    V_DISABLEDLOGEN(1U)
+
+#define S_RSPDROPFLRLOGEN    1
+#define V_RSPDROPFLRLOGEN(x) ((x) << S_RSPDROPFLRLOGEN)
+#define F_RSPDROPFLRLOGEN    V_RSPDROPFLRLOGEN(1U)
+
+#define S_REQUNDERFLRLOGEN    0
+#define V_REQUNDERFLRLOGEN(x) ((x) << S_REQUNDERFLRLOGEN)
+#define F_REQUNDERFLRLOGEN    V_REQUNDERFLRLOGEN(1U)
+
+#define A_PCIE_RSP_ERR_LOG1 0x59d8
+
+#define S_REQTAG    25
+#define M_REQTAG    0x7fU
+#define V_REQTAG(x) ((x) << S_REQTAG)
+#define G_REQTAG(x) (((x) >> S_REQTAG) & M_REQTAG)
+
+#define S_CID    22
+#define M_CID    0x7U
+#define V_CID(x) ((x) << S_CID)
+#define G_CID(x) (((x) >> S_CID) & M_CID)
+
+#define S_CHNUM    19
+#define M_CHNUM    0x7U
+#define V_CHNUM(x) ((x) << S_CHNUM)
+#define G_CHNUM(x) (((x) >> S_CHNUM) & M_CHNUM)
+
+#define S_BYTELEN    6
+#define M_BYTELEN    0x1fffU
+#define V_BYTELEN(x) ((x) << S_BYTELEN)
+#define G_BYTELEN(x) (((x) >> S_BYTELEN) & M_BYTELEN)
+
+#define S_REASON    3
+#define M_REASON    0x7U
+#define V_REASON(x) ((x) << S_REASON)
+#define G_REASON(x) (((x) >> S_REASON) & M_REASON)
+
+#define S_CPLSTATUS    0
+#define M_CPLSTATUS    0x7U
+#define V_CPLSTATUS(x) ((x) << S_CPLSTATUS)
+#define G_CPLSTATUS(x) (((x) >> S_CPLSTATUS) & M_CPLSTATUS)
+
+#define A_PCIE_RSP_ERR_LOG2 0x59dc
+
+#define S_LOGVALID    31
+#define V_LOGVALID(x) ((x) << S_LOGVALID)
+#define F_LOGVALID    V_LOGVALID(1U)
+
+#define S_ADDR10B    8
+#define M_ADDR10B    0x3ffU
+#define V_ADDR10B(x) ((x) << S_ADDR10B)
+#define G_ADDR10B(x) (((x) >> S_ADDR10B) & M_ADDR10B)
+
+#define S_REQVFID    0
+#define M_REQVFID    0xffU
+#define V_REQVFID(x) ((x) << S_REQVFID)
+#define G_REQVFID(x) (((x) >> S_REQVFID) & M_REQVFID)
+
+#define S_T6_ADDR10B    9
+#define M_T6_ADDR10B    0x3ffU
+#define V_T6_ADDR10B(x) ((x) << S_T6_ADDR10B)
+#define G_T6_ADDR10B(x) (((x) >> S_T6_ADDR10B) & M_T6_ADDR10B)
+
+#define S_T6_REQVFID    0
+#define M_T6_REQVFID    0x1ffU
+#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID)
+#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID)
+
+#define A_PCIE_CHANGESET 0x59fc
 #define A_PCIE_REVISION 0x5a00
 #define A_PCIE_PDEBUG_INDEX 0x5a04
 
@@ -3273,6 +7637,16 @@
 #define V_PDEBUGSELL(x) ((x) << S_PDEBUGSELL)
 #define G_PDEBUGSELL(x) (((x) >> S_PDEBUGSELL) & M_PDEBUGSELL)
 
+#define S_T6_PDEBUGSELH    16
+#define M_T6_PDEBUGSELH    0x7fU
+#define V_T6_PDEBUGSELH(x) ((x) << S_T6_PDEBUGSELH)
+#define G_T6_PDEBUGSELH(x) (((x) >> S_T6_PDEBUGSELH) & M_T6_PDEBUGSELH)
+
+#define S_T6_PDEBUGSELL    0
+#define M_T6_PDEBUGSELL    0x7fU
+#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL)
+#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL)
+
 #define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
 #define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
 #define A_PCIE_CDEBUG_INDEX 0x5a10
@@ -3402,6 +7776,12 @@
 #define A_PCIE_BUS_MST_STAT_2 0x5a68
 #define A_PCIE_BUS_MST_STAT_3 0x5a6c
 #define A_PCIE_BUS_MST_STAT_4 0x5a70
+
+#define S_BUSMST_135_128    0
+#define M_BUSMST_135_128    0xffU
+#define V_BUSMST_135_128(x) ((x) << S_BUSMST_135_128)
+#define G_BUSMST_135_128(x) (((x) >> S_BUSMST_135_128) & M_BUSMST_135_128)
+
 #define A_PCIE_BUS_MST_STAT_5 0x5a74
 #define A_PCIE_BUS_MST_STAT_6 0x5a78
 #define A_PCIE_BUS_MST_STAT_7 0x5a7c
@@ -3410,9 +7790,81 @@
 #define A_PCIE_RSP_ERR_STAT_2 0x5a88
 #define A_PCIE_RSP_ERR_STAT_3 0x5a8c
 #define A_PCIE_RSP_ERR_STAT_4 0x5a90
+
+#define S_RSPERR_135_128    0
+#define M_RSPERR_135_128    0xffU
+#define V_RSPERR_135_128(x) ((x) << S_RSPERR_135_128)
+#define G_RSPERR_135_128(x) (((x) >> S_RSPERR_135_128) & M_RSPERR_135_128)
+
 #define A_PCIE_RSP_ERR_STAT_5 0x5a94
+#define A_PCIE_DBI_TIMEOUT_CTL 0x5a94
+
+#define S_DBI_TIMER    0
+#define M_DBI_TIMER    0xffffU
+#define V_DBI_TIMER(x) ((x) << S_DBI_TIMER)
+#define G_DBI_TIMER(x) (((x) >> S_DBI_TIMER) & M_DBI_TIMER)
+
 #define A_PCIE_RSP_ERR_STAT_6 0x5a98
+#define A_PCIE_DBI_TIMEOUT_STATUS0 0x5a98
 #define A_PCIE_RSP_ERR_STAT_7 0x5a9c
+#define A_PCIE_DBI_TIMEOUT_STATUS1 0x5a9c
+
+#define S_SOURCE    16
+#define M_SOURCE    0x3U
+#define V_SOURCE(x) ((x) << S_SOURCE)
+#define G_SOURCE(x) (((x) >> S_SOURCE) & M_SOURCE)
+
+#define S_DBI_WRITE    12
+#define M_DBI_WRITE    0xfU
+#define V_DBI_WRITE(x) ((x) << S_DBI_WRITE)
+#define G_DBI_WRITE(x) (((x) >> S_DBI_WRITE) & M_DBI_WRITE)
+
+#define S_DBI_CS2    11
+#define V_DBI_CS2(x) ((x) << S_DBI_CS2)
+#define F_DBI_CS2    V_DBI_CS2(1U)
+
+#define S_DBI_PF    8
+#define M_DBI_PF    0x7U
+#define V_DBI_PF(x) ((x) << S_DBI_PF)
+#define G_DBI_PF(x) (((x) >> S_DBI_PF) & M_DBI_PF)
+
+#define S_PL_TOVFVLD    7
+#define V_PL_TOVFVLD(x) ((x) << S_PL_TOVFVLD)
+#define F_PL_TOVFVLD    V_PL_TOVFVLD(1U)
+
+#define S_PL_TOVF    0
+#define M_PL_TOVF    0x7fU
+#define V_PL_TOVF(x) ((x) << S_PL_TOVF)
+#define G_PL_TOVF(x) (((x) >> S_PL_TOVF) & M_PL_TOVF)
+
+#define S_T6_SOURCE    17
+#define M_T6_SOURCE    0x3U
+#define V_T6_SOURCE(x) ((x) << S_T6_SOURCE)
+#define G_T6_SOURCE(x) (((x) >> S_T6_SOURCE) & M_T6_SOURCE)
+
+#define S_T6_DBI_WRITE    13
+#define M_T6_DBI_WRITE    0xfU
+#define V_T6_DBI_WRITE(x) ((x) << S_T6_DBI_WRITE)
+#define G_T6_DBI_WRITE(x) (((x) >> S_T6_DBI_WRITE) & M_T6_DBI_WRITE)
+
+#define S_T6_DBI_CS2    12
+#define V_T6_DBI_CS2(x) ((x) << S_T6_DBI_CS2)
+#define F_T6_DBI_CS2    V_T6_DBI_CS2(1U)
+
+#define S_T6_DBI_PF    9
+#define M_T6_DBI_PF    0x7U
+#define V_T6_DBI_PF(x) ((x) << S_T6_DBI_PF)
+#define G_T6_DBI_PF(x) (((x) >> S_T6_DBI_PF) & M_T6_DBI_PF)
+
+#define S_T6_PL_TOVFVLD    8
+#define V_T6_PL_TOVFVLD(x) ((x) << S_T6_PL_TOVFVLD)
+#define F_T6_PL_TOVFVLD    V_T6_PL_TOVFVLD(1U)
+
+#define S_T6_PL_TOVF    0
+#define M_T6_PL_TOVF    0xffU
+#define V_T6_PL_TOVF(x) ((x) << S_T6_PL_TOVF)
+#define G_T6_PL_TOVF(x) (((x) >> S_T6_PL_TOVF) & M_T6_PL_TOVF)
+
 #define A_PCIE_MSI_EN_0 0x5aa0
 #define A_PCIE_MSI_EN_1 0x5aa4
 #define A_PCIE_MSI_EN_2 0x5aa8
@@ -3446,6 +7898,3598 @@
 #define V_MAXBUFWRREQ(x) ((x) << S_MAXBUFWRREQ)
 #define G_MAXBUFWRREQ(x) (((x) >> S_MAXBUFWRREQ) & M_MAXBUFWRREQ)
 
+#define A_PCIE_PB_CTL 0x5b94
+
+#define S_PB_SEL    16
+#define M_PB_SEL    0xffU
+#define V_PB_SEL(x) ((x) << S_PB_SEL)
+#define G_PB_SEL(x) (((x) >> S_PB_SEL) & M_PB_SEL)
+
+#define S_PB_SELREG    8
+#define M_PB_SELREG    0xffU
+#define V_PB_SELREG(x) ((x) << S_PB_SELREG)
+#define G_PB_SELREG(x) (((x) >> S_PB_SELREG) & M_PB_SELREG)
+
+#define S_PB_FUNC    0
+#define M_PB_FUNC    0x7U
+#define V_PB_FUNC(x) ((x) << S_PB_FUNC)
+#define G_PB_FUNC(x) (((x) >> S_PB_FUNC) & M_PB_FUNC)
+
+#define A_PCIE_PB_DATA 0x5b98
+#define A_PCIE_CUR_LINK 0x5b9c
+
+#define S_CFGINITCOEFFDONESEEN    22
+#define V_CFGINITCOEFFDONESEEN(x) ((x) << S_CFGINITCOEFFDONESEEN)
+#define F_CFGINITCOEFFDONESEEN    V_CFGINITCOEFFDONESEEN(1U)
+
+#define S_CFGINITCOEFFDONE    21
+#define V_CFGINITCOEFFDONE(x) ((x) << S_CFGINITCOEFFDONE)
+#define F_CFGINITCOEFFDONE    V_CFGINITCOEFFDONE(1U)
+
+#define S_XMLH_LINK_UP    20
+#define V_XMLH_LINK_UP(x) ((x) << S_XMLH_LINK_UP)
+#define F_XMLH_LINK_UP    V_XMLH_LINK_UP(1U)
+
+#define S_PM_LINKST_IN_L0S    19
+#define V_PM_LINKST_IN_L0S(x) ((x) << S_PM_LINKST_IN_L0S)
+#define F_PM_LINKST_IN_L0S    V_PM_LINKST_IN_L0S(1U)
+
+#define S_PM_LINKST_IN_L1    18
+#define V_PM_LINKST_IN_L1(x) ((x) << S_PM_LINKST_IN_L1)
+#define F_PM_LINKST_IN_L1    V_PM_LINKST_IN_L1(1U)
+
+#define S_PM_LINKST_IN_L2    17
+#define V_PM_LINKST_IN_L2(x) ((x) << S_PM_LINKST_IN_L2)
+#define F_PM_LINKST_IN_L2    V_PM_LINKST_IN_L2(1U)
+
+#define S_PM_LINKST_L2_EXIT    16
+#define V_PM_LINKST_L2_EXIT(x) ((x) << S_PM_LINKST_L2_EXIT)
+#define F_PM_LINKST_L2_EXIT    V_PM_LINKST_L2_EXIT(1U)
+
+#define S_XMLH_IN_RL0S    15
+#define V_XMLH_IN_RL0S(x) ((x) << S_XMLH_IN_RL0S)
+#define F_XMLH_IN_RL0S    V_XMLH_IN_RL0S(1U)
+
+#define S_XMLH_LTSSM_STATE_RCVRY_EQ    14
+#define V_XMLH_LTSSM_STATE_RCVRY_EQ(x) ((x) << S_XMLH_LTSSM_STATE_RCVRY_EQ)
+#define F_XMLH_LTSSM_STATE_RCVRY_EQ    V_XMLH_LTSSM_STATE_RCVRY_EQ(1U)
+
+#define S_NEGOTIATEDWIDTH    8
+#define M_NEGOTIATEDWIDTH    0x3fU
+#define V_NEGOTIATEDWIDTH(x) ((x) << S_NEGOTIATEDWIDTH)
+#define G_NEGOTIATEDWIDTH(x) (((x) >> S_NEGOTIATEDWIDTH) & M_NEGOTIATEDWIDTH)
+
+#define S_ACTIVELANES    0
+#define M_ACTIVELANES    0xffU
+#define V_ACTIVELANES(x) ((x) << S_ACTIVELANES)
+#define G_ACTIVELANES(x) (((x) >> S_ACTIVELANES) & M_ACTIVELANES)
+
+#define A_PCIE_PHY_REQRXPWR 0x5ba0
+
+#define S_LNH_RXSTATEDONE    31
+#define V_LNH_RXSTATEDONE(x) ((x) << S_LNH_RXSTATEDONE)
+#define F_LNH_RXSTATEDONE    V_LNH_RXSTATEDONE(1U)
+
+#define S_LNH_RXSTATEREQ    30
+#define V_LNH_RXSTATEREQ(x) ((x) << S_LNH_RXSTATEREQ)
+#define F_LNH_RXSTATEREQ    V_LNH_RXSTATEREQ(1U)
+
+#define S_LNH_RXPWRSTATE    28
+#define M_LNH_RXPWRSTATE    0x3U
+#define V_LNH_RXPWRSTATE(x) ((x) << S_LNH_RXPWRSTATE)
+#define G_LNH_RXPWRSTATE(x) (((x) >> S_LNH_RXPWRSTATE) & M_LNH_RXPWRSTATE)
+
+#define S_LNG_RXSTATEDONE    27
+#define V_LNG_RXSTATEDONE(x) ((x) << S_LNG_RXSTATEDONE)
+#define F_LNG_RXSTATEDONE    V_LNG_RXSTATEDONE(1U)
+
+#define S_LNG_RXSTATEREQ    26
+#define V_LNG_RXSTATEREQ(x) ((x) << S_LNG_RXSTATEREQ)
+#define F_LNG_RXSTATEREQ    V_LNG_RXSTATEREQ(1U)
+
+#define S_LNG_RXPWRSTATE    24
+#define M_LNG_RXPWRSTATE    0x3U
+#define V_LNG_RXPWRSTATE(x) ((x) << S_LNG_RXPWRSTATE)
+#define G_LNG_RXPWRSTATE(x) (((x) >> S_LNG_RXPWRSTATE) & M_LNG_RXPWRSTATE)
+
+#define S_LNF_RXSTATEDONE    23
+#define V_LNF_RXSTATEDONE(x) ((x) << S_LNF_RXSTATEDONE)
+#define F_LNF_RXSTATEDONE    V_LNF_RXSTATEDONE(1U)
+
+#define S_LNF_RXSTATEREQ    22
+#define V_LNF_RXSTATEREQ(x) ((x) << S_LNF_RXSTATEREQ)
+#define F_LNF_RXSTATEREQ    V_LNF_RXSTATEREQ(1U)
+
+#define S_LNF_RXPWRSTATE    20
+#define M_LNF_RXPWRSTATE    0x3U
+#define V_LNF_RXPWRSTATE(x) ((x) << S_LNF_RXPWRSTATE)
+#define G_LNF_RXPWRSTATE(x) (((x) >> S_LNF_RXPWRSTATE) & M_LNF_RXPWRSTATE)
+
+#define S_LNE_RXSTATEDONE    19
+#define V_LNE_RXSTATEDONE(x) ((x) << S_LNE_RXSTATEDONE)
+#define F_LNE_RXSTATEDONE    V_LNE_RXSTATEDONE(1U)
+
+#define S_LNE_RXSTATEREQ    18
+#define V_LNE_RXSTATEREQ(x) ((x) << S_LNE_RXSTATEREQ)
+#define F_LNE_RXSTATEREQ    V_LNE_RXSTATEREQ(1U)
+
+#define S_LNE_RXPWRSTATE    16
+#define M_LNE_RXPWRSTATE    0x3U
+#define V_LNE_RXPWRSTATE(x) ((x) << S_LNE_RXPWRSTATE)
+#define G_LNE_RXPWRSTATE(x) (((x) >> S_LNE_RXPWRSTATE) & M_LNE_RXPWRSTATE)
+
+#define S_LND_RXSTATEDONE    15
+#define V_LND_RXSTATEDONE(x) ((x) << S_LND_RXSTATEDONE)
+#define F_LND_RXSTATEDONE    V_LND_RXSTATEDONE(1U)
+
+#define S_LND_RXSTATEREQ    14
+#define V_LND_RXSTATEREQ(x) ((x) << S_LND_RXSTATEREQ)
+#define F_LND_RXSTATEREQ    V_LND_RXSTATEREQ(1U)
+
+#define S_LND_RXPWRSTATE    12
+#define M_LND_RXPWRSTATE    0x3U
+#define V_LND_RXPWRSTATE(x) ((x) << S_LND_RXPWRSTATE)
+#define G_LND_RXPWRSTATE(x) (((x) >> S_LND_RXPWRSTATE) & M_LND_RXPWRSTATE)
+
+#define S_LNC_RXSTATEDONE    11
+#define V_LNC_RXSTATEDONE(x) ((x) << S_LNC_RXSTATEDONE)
+#define F_LNC_RXSTATEDONE    V_LNC_RXSTATEDONE(1U)
+
+#define S_LNC_RXSTATEREQ    10
+#define V_LNC_RXSTATEREQ(x) ((x) << S_LNC_RXSTATEREQ)
+#define F_LNC_RXSTATEREQ    V_LNC_RXSTATEREQ(1U)
+
+#define S_LNC_RXPWRSTATE    8
+#define M_LNC_RXPWRSTATE    0x3U
+#define V_LNC_RXPWRSTATE(x) ((x) << S_LNC_RXPWRSTATE)
+#define G_LNC_RXPWRSTATE(x) (((x) >> S_LNC_RXPWRSTATE) & M_LNC_RXPWRSTATE)
+
+#define S_LNB_RXSTATEDONE    7
+#define V_LNB_RXSTATEDONE(x) ((x) << S_LNB_RXSTATEDONE)
+#define F_LNB_RXSTATEDONE    V_LNB_RXSTATEDONE(1U)
+
+#define S_LNB_RXSTATEREQ    6
+#define V_LNB_RXSTATEREQ(x) ((x) << S_LNB_RXSTATEREQ)
+#define F_LNB_RXSTATEREQ    V_LNB_RXSTATEREQ(1U)
+
+#define S_LNB_RXPWRSTATE    4
+#define M_LNB_RXPWRSTATE    0x3U
+#define V_LNB_RXPWRSTATE(x) ((x) << S_LNB_RXPWRSTATE)
+#define G_LNB_RXPWRSTATE(x) (((x) >> S_LNB_RXPWRSTATE) & M_LNB_RXPWRSTATE)
+
+#define S_LNA_RXSTATEDONE    3
+#define V_LNA_RXSTATEDONE(x) ((x) << S_LNA_RXSTATEDONE)
+#define F_LNA_RXSTATEDONE    V_LNA_RXSTATEDONE(1U)
+
+#define S_LNA_RXSTATEREQ    2
+#define V_LNA_RXSTATEREQ(x) ((x) << S_LNA_RXSTATEREQ)
+#define F_LNA_RXSTATEREQ    V_LNA_RXSTATEREQ(1U)
+
+#define S_LNA_RXPWRSTATE    0
+#define M_LNA_RXPWRSTATE    0x3U
+#define V_LNA_RXPWRSTATE(x) ((x) << S_LNA_RXPWRSTATE)
+#define G_LNA_RXPWRSTATE(x) (((x) >> S_LNA_RXPWRSTATE) & M_LNA_RXPWRSTATE)
+
+#define S_REQ_LNH_RXSTATEDONE    31
+#define V_REQ_LNH_RXSTATEDONE(x) ((x) << S_REQ_LNH_RXSTATEDONE)
+#define F_REQ_LNH_RXSTATEDONE    V_REQ_LNH_RXSTATEDONE(1U)
+
+#define S_REQ_LNH_RXSTATEREQ    30
+#define V_REQ_LNH_RXSTATEREQ(x) ((x) << S_REQ_LNH_RXSTATEREQ)
+#define F_REQ_LNH_RXSTATEREQ    V_REQ_LNH_RXSTATEREQ(1U)
+
+#define S_REQ_LNH_RXPWRSTATE    28
+#define M_REQ_LNH_RXPWRSTATE    0x3U
+#define V_REQ_LNH_RXPWRSTATE(x) ((x) << S_REQ_LNH_RXPWRSTATE)
+#define G_REQ_LNH_RXPWRSTATE(x) (((x) >> S_REQ_LNH_RXPWRSTATE) & M_REQ_LNH_RXPWRSTATE)
+
+#define S_REQ_LNG_RXSTATEDONE    27
+#define V_REQ_LNG_RXSTATEDONE(x) ((x) << S_REQ_LNG_RXSTATEDONE)
+#define F_REQ_LNG_RXSTATEDONE    V_REQ_LNG_RXSTATEDONE(1U)
+
+#define S_REQ_LNG_RXSTATEREQ    26
+#define V_REQ_LNG_RXSTATEREQ(x) ((x) << S_REQ_LNG_RXSTATEREQ)
+#define F_REQ_LNG_RXSTATEREQ    V_REQ_LNG_RXSTATEREQ(1U)
+
+#define S_REQ_LNG_RXPWRSTATE    24
+#define M_REQ_LNG_RXPWRSTATE    0x3U
+#define V_REQ_LNG_RXPWRSTATE(x) ((x) << S_REQ_LNG_RXPWRSTATE)
+#define G_REQ_LNG_RXPWRSTATE(x) (((x) >> S_REQ_LNG_RXPWRSTATE) & M_REQ_LNG_RXPWRSTATE)
+
+#define S_REQ_LNF_RXSTATEDONE    23
+#define V_REQ_LNF_RXSTATEDONE(x) ((x) << S_REQ_LNF_RXSTATEDONE)
+#define F_REQ_LNF_RXSTATEDONE    V_REQ_LNF_RXSTATEDONE(1U)
+
+#define S_REQ_LNF_RXSTATEREQ    22
+#define V_REQ_LNF_RXSTATEREQ(x) ((x) << S_REQ_LNF_RXSTATEREQ)
+#define F_REQ_LNF_RXSTATEREQ    V_REQ_LNF_RXSTATEREQ(1U)
+
+#define S_REQ_LNF_RXPWRSTATE    20
+#define M_REQ_LNF_RXPWRSTATE    0x3U
+#define V_REQ_LNF_RXPWRSTATE(x) ((x) << S_REQ_LNF_RXPWRSTATE)
+#define G_REQ_LNF_RXPWRSTATE(x) (((x) >> S_REQ_LNF_RXPWRSTATE) & M_REQ_LNF_RXPWRSTATE)
+
+#define S_REQ_LNE_RXSTATEDONE    19
+#define V_REQ_LNE_RXSTATEDONE(x) ((x) << S_REQ_LNE_RXSTATEDONE)
+#define F_REQ_LNE_RXSTATEDONE    V_REQ_LNE_RXSTATEDONE(1U)
+
+#define S_REQ_LNE_RXSTATEREQ    18
+#define V_REQ_LNE_RXSTATEREQ(x) ((x) << S_REQ_LNE_RXSTATEREQ)
+#define F_REQ_LNE_RXSTATEREQ    V_REQ_LNE_RXSTATEREQ(1U)
+
+#define S_REQ_LNE_RXPWRSTATE    16
+#define M_REQ_LNE_RXPWRSTATE    0x3U
+#define V_REQ_LNE_RXPWRSTATE(x) ((x) << S_REQ_LNE_RXPWRSTATE)
+#define G_REQ_LNE_RXPWRSTATE(x) (((x) >> S_REQ_LNE_RXPWRSTATE) & M_REQ_LNE_RXPWRSTATE)
+
+#define S_REQ_LND_RXSTATEDONE    15
+#define V_REQ_LND_RXSTATEDONE(x) ((x) << S_REQ_LND_RXSTATEDONE)
+#define F_REQ_LND_RXSTATEDONE    V_REQ_LND_RXSTATEDONE(1U)
+
+#define S_REQ_LND_RXSTATEREQ    14
+#define V_REQ_LND_RXSTATEREQ(x) ((x) << S_REQ_LND_RXSTATEREQ)
+#define F_REQ_LND_RXSTATEREQ    V_REQ_LND_RXSTATEREQ(1U)
+
+#define S_REQ_LND_RXPWRSTATE    12
+#define M_REQ_LND_RXPWRSTATE    0x3U
+#define V_REQ_LND_RXPWRSTATE(x) ((x) << S_REQ_LND_RXPWRSTATE)
+#define G_REQ_LND_RXPWRSTATE(x) (((x) >> S_REQ_LND_RXPWRSTATE) & M_REQ_LND_RXPWRSTATE)
+
+#define S_REQ_LNC_RXSTATEDONE    11
+#define V_REQ_LNC_RXSTATEDONE(x) ((x) << S_REQ_LNC_RXSTATEDONE)
+#define F_REQ_LNC_RXSTATEDONE    V_REQ_LNC_RXSTATEDONE(1U)
+
+#define S_REQ_LNC_RXSTATEREQ    10
+#define V_REQ_LNC_RXSTATEREQ(x) ((x) << S_REQ_LNC_RXSTATEREQ)
+#define F_REQ_LNC_RXSTATEREQ    V_REQ_LNC_RXSTATEREQ(1U)
+
+#define S_REQ_LNC_RXPWRSTATE    8
+#define M_REQ_LNC_RXPWRSTATE    0x3U
+#define V_REQ_LNC_RXPWRSTATE(x) ((x) << S_REQ_LNC_RXPWRSTATE)
+#define G_REQ_LNC_RXPWRSTATE(x) (((x) >> S_REQ_LNC_RXPWRSTATE) & M_REQ_LNC_RXPWRSTATE)
+
+#define S_REQ_LNB_RXSTATEDONE    7
+#define V_REQ_LNB_RXSTATEDONE(x) ((x) << S_REQ_LNB_RXSTATEDONE)
+#define F_REQ_LNB_RXSTATEDONE    V_REQ_LNB_RXSTATEDONE(1U)
+
+#define S_REQ_LNB_RXSTATEREQ    6
+#define V_REQ_LNB_RXSTATEREQ(x) ((x) << S_REQ_LNB_RXSTATEREQ)
+#define F_REQ_LNB_RXSTATEREQ    V_REQ_LNB_RXSTATEREQ(1U)
+
+#define S_REQ_LNB_RXPWRSTATE    4
+#define M_REQ_LNB_RXPWRSTATE    0x3U
+#define V_REQ_LNB_RXPWRSTATE(x) ((x) << S_REQ_LNB_RXPWRSTATE)
+#define G_REQ_LNB_RXPWRSTATE(x) (((x) >> S_REQ_LNB_RXPWRSTATE) & M_REQ_LNB_RXPWRSTATE)
+
+#define S_REQ_LNA_RXSTATEDONE    3
+#define V_REQ_LNA_RXSTATEDONE(x) ((x) << S_REQ_LNA_RXSTATEDONE)
+#define F_REQ_LNA_RXSTATEDONE    V_REQ_LNA_RXSTATEDONE(1U)
+
+#define S_REQ_LNA_RXSTATEREQ    2
+#define V_REQ_LNA_RXSTATEREQ(x) ((x) << S_REQ_LNA_RXSTATEREQ)
+#define F_REQ_LNA_RXSTATEREQ    V_REQ_LNA_RXSTATEREQ(1U)
+
+#define S_REQ_LNA_RXPWRSTATE    0
+#define M_REQ_LNA_RXPWRSTATE    0x3U
+#define V_REQ_LNA_RXPWRSTATE(x) ((x) << S_REQ_LNA_RXPWRSTATE)
+#define G_REQ_LNA_RXPWRSTATE(x) (((x) >> S_REQ_LNA_RXPWRSTATE) & M_REQ_LNA_RXPWRSTATE)
+
+#define A_PCIE_PHY_CURRXPWR 0x5ba4
+
+#define S_T5_LNH_RXPWRSTATE    28
+#define M_T5_LNH_RXPWRSTATE    0x7U
+#define V_T5_LNH_RXPWRSTATE(x) ((x) << S_T5_LNH_RXPWRSTATE)
+#define G_T5_LNH_RXPWRSTATE(x) (((x) >> S_T5_LNH_RXPWRSTATE) & M_T5_LNH_RXPWRSTATE)
+
+#define S_T5_LNG_RXPWRSTATE    24
+#define M_T5_LNG_RXPWRSTATE    0x7U
+#define V_T5_LNG_RXPWRSTATE(x) ((x) << S_T5_LNG_RXPWRSTATE)
+#define G_T5_LNG_RXPWRSTATE(x) (((x) >> S_T5_LNG_RXPWRSTATE) & M_T5_LNG_RXPWRSTATE)
+
+#define S_T5_LNF_RXPWRSTATE    20
+#define M_T5_LNF_RXPWRSTATE    0x7U
+#define V_T5_LNF_RXPWRSTATE(x) ((x) << S_T5_LNF_RXPWRSTATE)
+#define G_T5_LNF_RXPWRSTATE(x) (((x) >> S_T5_LNF_RXPWRSTATE) & M_T5_LNF_RXPWRSTATE)
+
+#define S_T5_LNE_RXPWRSTATE    16
+#define M_T5_LNE_RXPWRSTATE    0x7U
+#define V_T5_LNE_RXPWRSTATE(x) ((x) << S_T5_LNE_RXPWRSTATE)
+#define G_T5_LNE_RXPWRSTATE(x) (((x) >> S_T5_LNE_RXPWRSTATE) & M_T5_LNE_RXPWRSTATE)
+
+#define S_T5_LND_RXPWRSTATE    12
+#define M_T5_LND_RXPWRSTATE    0x7U
+#define V_T5_LND_RXPWRSTATE(x) ((x) << S_T5_LND_RXPWRSTATE)
+#define G_T5_LND_RXPWRSTATE(x) (((x) >> S_T5_LND_RXPWRSTATE) & M_T5_LND_RXPWRSTATE)
+
+#define S_T5_LNC_RXPWRSTATE    8
+#define M_T5_LNC_RXPWRSTATE    0x7U
+#define V_T5_LNC_RXPWRSTATE(x) ((x) << S_T5_LNC_RXPWRSTATE)
+#define G_T5_LNC_RXPWRSTATE(x) (((x) >> S_T5_LNC_RXPWRSTATE) & M_T5_LNC_RXPWRSTATE)
+
+#define S_T5_LNB_RXPWRSTATE    4
+#define M_T5_LNB_RXPWRSTATE    0x7U
+#define V_T5_LNB_RXPWRSTATE(x) ((x) << S_T5_LNB_RXPWRSTATE)
+#define G_T5_LNB_RXPWRSTATE(x) (((x) >> S_T5_LNB_RXPWRSTATE) & M_T5_LNB_RXPWRSTATE)
+
+#define S_T5_LNA_RXPWRSTATE    0
+#define M_T5_LNA_RXPWRSTATE    0x7U
+#define V_T5_LNA_RXPWRSTATE(x) ((x) << S_T5_LNA_RXPWRSTATE)
+#define G_T5_LNA_RXPWRSTATE(x) (((x) >> S_T5_LNA_RXPWRSTATE) & M_T5_LNA_RXPWRSTATE)
+
+#define S_CUR_LNH_RXPWRSTATE    28
+#define M_CUR_LNH_RXPWRSTATE    0x7U
+#define V_CUR_LNH_RXPWRSTATE(x) ((x) << S_CUR_LNH_RXPWRSTATE)
+#define G_CUR_LNH_RXPWRSTATE(x) (((x) >> S_CUR_LNH_RXPWRSTATE) & M_CUR_LNH_RXPWRSTATE)
+
+#define S_CUR_LNG_RXPWRSTATE    24
+#define M_CUR_LNG_RXPWRSTATE    0x7U
+#define V_CUR_LNG_RXPWRSTATE(x) ((x) << S_CUR_LNG_RXPWRSTATE)
+#define G_CUR_LNG_RXPWRSTATE(x) (((x) >> S_CUR_LNG_RXPWRSTATE) & M_CUR_LNG_RXPWRSTATE)
+
+#define S_CUR_LNF_RXPWRSTATE    20
+#define M_CUR_LNF_RXPWRSTATE    0x7U
+#define V_CUR_LNF_RXPWRSTATE(x) ((x) << S_CUR_LNF_RXPWRSTATE)
+#define G_CUR_LNF_RXPWRSTATE(x) (((x) >> S_CUR_LNF_RXPWRSTATE) & M_CUR_LNF_RXPWRSTATE)
+
+#define S_CUR_LNE_RXPWRSTATE    16
+#define M_CUR_LNE_RXPWRSTATE    0x7U
+#define V_CUR_LNE_RXPWRSTATE(x) ((x) << S_CUR_LNE_RXPWRSTATE)
+#define G_CUR_LNE_RXPWRSTATE(x) (((x) >> S_CUR_LNE_RXPWRSTATE) & M_CUR_LNE_RXPWRSTATE)
+
+#define S_CUR_LND_RXPWRSTATE    12
+#define M_CUR_LND_RXPWRSTATE    0x7U
+#define V_CUR_LND_RXPWRSTATE(x) ((x) << S_CUR_LND_RXPWRSTATE)
+#define G_CUR_LND_RXPWRSTATE(x) (((x) >> S_CUR_LND_RXPWRSTATE) & M_CUR_LND_RXPWRSTATE)
+
+#define S_CUR_LNC_RXPWRSTATE    8
+#define M_CUR_LNC_RXPWRSTATE    0x7U
+#define V_CUR_LNC_RXPWRSTATE(x) ((x) << S_CUR_LNC_RXPWRSTATE)
+#define G_CUR_LNC_RXPWRSTATE(x) (((x) >> S_CUR_LNC_RXPWRSTATE) & M_CUR_LNC_RXPWRSTATE)
+
+#define S_CUR_LNB_RXPWRSTATE    4
+#define M_CUR_LNB_RXPWRSTATE    0x7U
+#define V_CUR_LNB_RXPWRSTATE(x) ((x) << S_CUR_LNB_RXPWRSTATE)
+#define G_CUR_LNB_RXPWRSTATE(x) (((x) >> S_CUR_LNB_RXPWRSTATE) & M_CUR_LNB_RXPWRSTATE)
+
+#define S_CUR_LNA_RXPWRSTATE    0
+#define M_CUR_LNA_RXPWRSTATE    0x7U
+#define V_CUR_LNA_RXPWRSTATE(x) ((x) << S_CUR_LNA_RXPWRSTATE)
+#define G_CUR_LNA_RXPWRSTATE(x) (((x) >> S_CUR_LNA_RXPWRSTATE) & M_CUR_LNA_RXPWRSTATE)
+
+#define A_PCIE_PHY_GEN3_AE0 0x5ba8
+
+#define S_LND_STAT    28
+#define M_LND_STAT    0x7U
+#define V_LND_STAT(x) ((x) << S_LND_STAT)
+#define G_LND_STAT(x) (((x) >> S_LND_STAT) & M_LND_STAT)
+
+#define S_LND_CMD    24
+#define M_LND_CMD    0x7U
+#define V_LND_CMD(x) ((x) << S_LND_CMD)
+#define G_LND_CMD(x) (((x) >> S_LND_CMD) & M_LND_CMD)
+
+#define S_LNC_STAT    20
+#define M_LNC_STAT    0x7U
+#define V_LNC_STAT(x) ((x) << S_LNC_STAT)
+#define G_LNC_STAT(x) (((x) >> S_LNC_STAT) & M_LNC_STAT)
+
+#define S_LNC_CMD    16
+#define M_LNC_CMD    0x7U
+#define V_LNC_CMD(x) ((x) << S_LNC_CMD)
+#define G_LNC_CMD(x) (((x) >> S_LNC_CMD) & M_LNC_CMD)
+
+#define S_LNB_STAT    12
+#define M_LNB_STAT    0x7U
+#define V_LNB_STAT(x) ((x) << S_LNB_STAT)
+#define G_LNB_STAT(x) (((x) >> S_LNB_STAT) & M_LNB_STAT)
+
+#define S_LNB_CMD    8
+#define M_LNB_CMD    0x7U
+#define V_LNB_CMD(x) ((x) << S_LNB_CMD)
+#define G_LNB_CMD(x) (((x) >> S_LNB_CMD) & M_LNB_CMD)
+
+#define S_LNA_STAT    4
+#define M_LNA_STAT    0x7U
+#define V_LNA_STAT(x) ((x) << S_LNA_STAT)
+#define G_LNA_STAT(x) (((x) >> S_LNA_STAT) & M_LNA_STAT)
+
+#define S_LNA_CMD    0
+#define M_LNA_CMD    0x7U
+#define V_LNA_CMD(x) ((x) << S_LNA_CMD)
+#define G_LNA_CMD(x) (((x) >> S_LNA_CMD) & M_LNA_CMD)
+
+#define A_PCIE_PHY_GEN3_AE1 0x5bac
+
+#define S_LNH_STAT    28
+#define M_LNH_STAT    0x7U
+#define V_LNH_STAT(x) ((x) << S_LNH_STAT)
+#define G_LNH_STAT(x) (((x) >> S_LNH_STAT) & M_LNH_STAT)
+
+#define S_LNH_CMD    24
+#define M_LNH_CMD    0x7U
+#define V_LNH_CMD(x) ((x) << S_LNH_CMD)
+#define G_LNH_CMD(x) (((x) >> S_LNH_CMD) & M_LNH_CMD)
+
+#define S_LNG_STAT    20
+#define M_LNG_STAT    0x7U
+#define V_LNG_STAT(x) ((x) << S_LNG_STAT)
+#define G_LNG_STAT(x) (((x) >> S_LNG_STAT) & M_LNG_STAT)
+
+#define S_LNG_CMD    16
+#define M_LNG_CMD    0x7U
+#define V_LNG_CMD(x) ((x) << S_LNG_CMD)
+#define G_LNG_CMD(x) (((x) >> S_LNG_CMD) & M_LNG_CMD)
+
+#define S_LNF_STAT    12
+#define M_LNF_STAT    0x7U
+#define V_LNF_STAT(x) ((x) << S_LNF_STAT)
+#define G_LNF_STAT(x) (((x) >> S_LNF_STAT) & M_LNF_STAT)
+
+#define S_LNF_CMD    8
+#define M_LNF_CMD    0x7U
+#define V_LNF_CMD(x) ((x) << S_LNF_CMD)
+#define G_LNF_CMD(x) (((x) >> S_LNF_CMD) & M_LNF_CMD)
+
+#define S_LNE_STAT    4
+#define M_LNE_STAT    0x7U
+#define V_LNE_STAT(x) ((x) << S_LNE_STAT)
+#define G_LNE_STAT(x) (((x) >> S_LNE_STAT) & M_LNE_STAT)
+
+#define S_LNE_CMD    0
+#define M_LNE_CMD    0x7U
+#define V_LNE_CMD(x) ((x) << S_LNE_CMD)
+#define G_LNE_CMD(x) (((x) >> S_LNE_CMD) & M_LNE_CMD)
+
+#define A_PCIE_PHY_FS_LF0 0x5bb0
+
+#define S_LANE1LF    24
+#define M_LANE1LF    0x3fU
+#define V_LANE1LF(x) ((x) << S_LANE1LF)
+#define G_LANE1LF(x) (((x) >> S_LANE1LF) & M_LANE1LF)
+
+#define S_LANE1FS    16
+#define M_LANE1FS    0x3fU
+#define V_LANE1FS(x) ((x) << S_LANE1FS)
+#define G_LANE1FS(x) (((x) >> S_LANE1FS) & M_LANE1FS)
+
+#define S_LANE0LF    8
+#define M_LANE0LF    0x3fU
+#define V_LANE0LF(x) ((x) << S_LANE0LF)
+#define G_LANE0LF(x) (((x) >> S_LANE0LF) & M_LANE0LF)
+
+#define S_LANE0FS    0
+#define M_LANE0FS    0x3fU
+#define V_LANE0FS(x) ((x) << S_LANE0FS)
+#define G_LANE0FS(x) (((x) >> S_LANE0FS) & M_LANE0FS)
+
+#define A_PCIE_PHY_FS_LF1 0x5bb4
+
+#define S_LANE3LF    24
+#define M_LANE3LF    0x3fU
+#define V_LANE3LF(x) ((x) << S_LANE3LF)
+#define G_LANE3LF(x) (((x) >> S_LANE3LF) & M_LANE3LF)
+
+#define S_LANE3FS    16
+#define M_LANE3FS    0x3fU
+#define V_LANE3FS(x) ((x) << S_LANE3FS)
+#define G_LANE3FS(x) (((x) >> S_LANE3FS) & M_LANE3FS)
+
+#define S_LANE2LF    8
+#define M_LANE2LF    0x3fU
+#define V_LANE2LF(x) ((x) << S_LANE2LF)
+#define G_LANE2LF(x) (((x) >> S_LANE2LF) & M_LANE2LF)
+
+#define S_LANE2FS    0
+#define M_LANE2FS    0x3fU
+#define V_LANE2FS(x) ((x) << S_LANE2FS)
+#define G_LANE2FS(x) (((x) >> S_LANE2FS) & M_LANE2FS)
+
+#define A_PCIE_PHY_FS_LF2 0x5bb8
+
+#define S_LANE5LF    24
+#define M_LANE5LF    0x3fU
+#define V_LANE5LF(x) ((x) << S_LANE5LF)
+#define G_LANE5LF(x) (((x) >> S_LANE5LF) & M_LANE5LF)
+
+#define S_LANE5FS    16
+#define M_LANE5FS    0x3fU
+#define V_LANE5FS(x) ((x) << S_LANE5FS)
+#define G_LANE5FS(x) (((x) >> S_LANE5FS) & M_LANE5FS)
+
+#define S_LANE4LF    8
+#define M_LANE4LF    0x3fU
+#define V_LANE4LF(x) ((x) << S_LANE4LF)
+#define G_LANE4LF(x) (((x) >> S_LANE4LF) & M_LANE4LF)
+
+#define S_LANE4FS    0
+#define M_LANE4FS    0x3fU
+#define V_LANE4FS(x) ((x) << S_LANE4FS)
+#define G_LANE4FS(x) (((x) >> S_LANE4FS) & M_LANE4FS)
+
+#define A_PCIE_PHY_FS_LF3 0x5bbc
+
+#define S_LANE7LF    24
+#define M_LANE7LF    0x3fU
+#define V_LANE7LF(x) ((x) << S_LANE7LF)
+#define G_LANE7LF(x) (((x) >> S_LANE7LF) & M_LANE7LF)
+
+#define S_LANE7FS    16
+#define M_LANE7FS    0x3fU
+#define V_LANE7FS(x) ((x) << S_LANE7FS)
+#define G_LANE7FS(x) (((x) >> S_LANE7FS) & M_LANE7FS)
+
+#define S_LANE6LF    8
+#define M_LANE6LF    0x3fU
+#define V_LANE6LF(x) ((x) << S_LANE6LF)
+#define G_LANE6LF(x) (((x) >> S_LANE6LF) & M_LANE6LF)
+
+#define S_LANE6FS    0
+#define M_LANE6FS    0x3fU
+#define V_LANE6FS(x) ((x) << S_LANE6FS)
+#define G_LANE6FS(x) (((x) >> S_LANE6FS) & M_LANE6FS)
+
+#define A_PCIE_PHY_PRESET_REQ 0x5bc0
+
+#define S_COEFFDONE    16
+#define V_COEFFDONE(x) ((x) << S_COEFFDONE)
+#define F_COEFFDONE    V_COEFFDONE(1U)
+
+#define S_COEFFLANE    8
+#define M_COEFFLANE    0x7U
+#define V_COEFFLANE(x) ((x) << S_COEFFLANE)
+#define G_COEFFLANE(x) (((x) >> S_COEFFLANE) & M_COEFFLANE)
+
+#define S_COEFFSTART    0
+#define V_COEFFSTART(x) ((x) << S_COEFFSTART)
+#define F_COEFFSTART    V_COEFFSTART(1U)
+
+#define S_T6_COEFFLANE    8
+#define M_T6_COEFFLANE    0xfU
+#define V_T6_COEFFLANE(x) ((x) << S_T6_COEFFLANE)
+#define G_T6_COEFFLANE(x) (((x) >> S_T6_COEFFLANE) & M_T6_COEFFLANE)
+
+#define A_PCIE_PHY_PRESET_COEFF 0x5bc4
+
+#define S_COEFF    0
+#define M_COEFF    0x3ffffU
+#define V_COEFF(x) ((x) << S_COEFF)
+#define G_COEFF(x) (((x) >> S_COEFF) & M_COEFF)
+
+#define A_PCIE_PHY_INDIR_REQ 0x5bf0
+
+#define S_PHYENABLE    31
+#define V_PHYENABLE(x) ((x) << S_PHYENABLE)
+#define F_PHYENABLE    V_PHYENABLE(1U)
+
+#define S_PCIE_PHY_REGADDR    0
+#define M_PCIE_PHY_REGADDR    0xffffU
+#define V_PCIE_PHY_REGADDR(x) ((x) << S_PCIE_PHY_REGADDR)
+#define G_PCIE_PHY_REGADDR(x) (((x) >> S_PCIE_PHY_REGADDR) & M_PCIE_PHY_REGADDR)
+
+#define A_PCIE_PHY_INDIR_DATA 0x5bf4
+#define A_PCIE_STATIC_SPARE1 0x5bf8
+#define A_PCIE_STATIC_SPARE2 0x5bfc
+#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10
+
+#define S_KDB_PF_LEN    24
+#define M_KDB_PF_LEN    0x1fU
+#define V_KDB_PF_LEN(x) ((x) << S_KDB_PF_LEN)
+#define G_KDB_PF_LEN(x) (((x) >> S_KDB_PF_LEN) & M_KDB_PF_LEN)
+
+#define S_KDB_PF_BASEADDR    0
+#define M_KDB_PF_BASEADDR    0xfffffU
+#define V_KDB_PF_BASEADDR(x) ((x) << S_KDB_PF_BASEADDR)
+#define G_KDB_PF_BASEADDR(x) (((x) >> S_KDB_PF_BASEADDR) & M_KDB_PF_BASEADDR)
+
+#define A_PCIE_KDOORBELL_GTS_VF_BASE_LEN 0x5c14
+
+#define S_KDB_VF_LEN    24
+#define M_KDB_VF_LEN    0x1fU
+#define V_KDB_VF_LEN(x) ((x) << S_KDB_VF_LEN)
+#define G_KDB_VF_LEN(x) (((x) >> S_KDB_VF_LEN) & M_KDB_VF_LEN)
+
+#define S_KDB_VF_BASEADDR    0
+#define M_KDB_VF_BASEADDR    0xfffffU
+#define V_KDB_VF_BASEADDR(x) ((x) << S_KDB_VF_BASEADDR)
+#define G_KDB_VF_BASEADDR(x) (((x) >> S_KDB_VF_BASEADDR) & M_KDB_VF_BASEADDR)
+
+#define A_PCIE_KDOORBELL_GTS_VF_OFFSET 0x5c18
+
+#define S_KDB_VF_MODOFST    0
+#define M_KDB_VF_MODOFST    0xfffU
+#define V_KDB_VF_MODOFST(x) ((x) << S_KDB_VF_MODOFST)
+#define G_KDB_VF_MODOFST(x) (((x) >> S_KDB_VF_MODOFST) & M_KDB_VF_MODOFST)
+
+#define A_PCIE_PHY_REQRXPWR1 0x5c1c
+
+#define S_REQ_LNP_RXSTATEDONE    31
+#define V_REQ_LNP_RXSTATEDONE(x) ((x) << S_REQ_LNP_RXSTATEDONE)
+#define F_REQ_LNP_RXSTATEDONE    V_REQ_LNP_RXSTATEDONE(1U)
+
+#define S_REQ_LNP_RXSTATEREQ    30
+#define V_REQ_LNP_RXSTATEREQ(x) ((x) << S_REQ_LNP_RXSTATEREQ)
+#define F_REQ_LNP_RXSTATEREQ    V_REQ_LNP_RXSTATEREQ(1U)
+
+#define S_REQ_LNP_RXPWRSTATE    28
+#define M_REQ_LNP_RXPWRSTATE    0x3U
+#define V_REQ_LNP_RXPWRSTATE(x) ((x) << S_REQ_LNP_RXPWRSTATE)
+#define G_REQ_LNP_RXPWRSTATE(x) (((x) >> S_REQ_LNP_RXPWRSTATE) & M_REQ_LNP_RXPWRSTATE)
+
+#define S_REQ_LNO_RXSTATEDONE    27
+#define V_REQ_LNO_RXSTATEDONE(x) ((x) << S_REQ_LNO_RXSTATEDONE)
+#define F_REQ_LNO_RXSTATEDONE    V_REQ_LNO_RXSTATEDONE(1U)
+
+#define S_REQ_LNO_RXSTATEREQ    26
+#define V_REQ_LNO_RXSTATEREQ(x) ((x) << S_REQ_LNO_RXSTATEREQ)
+#define F_REQ_LNO_RXSTATEREQ    V_REQ_LNO_RXSTATEREQ(1U)
+
+#define S_REQ_LNO_RXPWRSTATE    24
+#define M_REQ_LNO_RXPWRSTATE    0x3U
+#define V_REQ_LNO_RXPWRSTATE(x) ((x) << S_REQ_LNO_RXPWRSTATE)
+#define G_REQ_LNO_RXPWRSTATE(x) (((x) >> S_REQ_LNO_RXPWRSTATE) & M_REQ_LNO_RXPWRSTATE)
+
+#define S_REQ_LNN_RXSTATEDONE    23
+#define V_REQ_LNN_RXSTATEDONE(x) ((x) << S_REQ_LNN_RXSTATEDONE)
+#define F_REQ_LNN_RXSTATEDONE    V_REQ_LNN_RXSTATEDONE(1U)
+
+#define S_REQ_LNN_RXSTATEREQ    22
+#define V_REQ_LNN_RXSTATEREQ(x) ((x) << S_REQ_LNN_RXSTATEREQ)
+#define F_REQ_LNN_RXSTATEREQ    V_REQ_LNN_RXSTATEREQ(1U)
+
+#define S_REQ_LNN_RXPWRSTATE    20
+#define M_REQ_LNN_RXPWRSTATE    0x3U
+#define V_REQ_LNN_RXPWRSTATE(x) ((x) << S_REQ_LNN_RXPWRSTATE)
+#define G_REQ_LNN_RXPWRSTATE(x) (((x) >> S_REQ_LNN_RXPWRSTATE) & M_REQ_LNN_RXPWRSTATE)
+
+#define S_REQ_LNM_RXSTATEDONE    19
+#define V_REQ_LNM_RXSTATEDONE(x) ((x) << S_REQ_LNM_RXSTATEDONE)
+#define F_REQ_LNM_RXSTATEDONE    V_REQ_LNM_RXSTATEDONE(1U)
+
+#define S_REQ_LNM_RXSTATEREQ    18
+#define V_REQ_LNM_RXSTATEREQ(x) ((x) << S_REQ_LNM_RXSTATEREQ)
+#define F_REQ_LNM_RXSTATEREQ    V_REQ_LNM_RXSTATEREQ(1U)
+
+#define S_REQ_LNM_RXPWRSTATE    16
+#define M_REQ_LNM_RXPWRSTATE    0x3U
+#define V_REQ_LNM_RXPWRSTATE(x) ((x) << S_REQ_LNM_RXPWRSTATE)
+#define G_REQ_LNM_RXPWRSTATE(x) (((x) >> S_REQ_LNM_RXPWRSTATE) & M_REQ_LNM_RXPWRSTATE)
+
+#define S_REQ_LNL_RXSTATEDONE    15
+#define V_REQ_LNL_RXSTATEDONE(x) ((x) << S_REQ_LNL_RXSTATEDONE)
+#define F_REQ_LNL_RXSTATEDONE    V_REQ_LNL_RXSTATEDONE(1U)
+
+#define S_REQ_LNL_RXSTATEREQ    14
+#define V_REQ_LNL_RXSTATEREQ(x) ((x) << S_REQ_LNL_RXSTATEREQ)
+#define F_REQ_LNL_RXSTATEREQ    V_REQ_LNL_RXSTATEREQ(1U)
+
+#define S_REQ_LNL_RXPWRSTATE    12
+#define M_REQ_LNL_RXPWRSTATE    0x3U
+#define V_REQ_LNL_RXPWRSTATE(x) ((x) << S_REQ_LNL_RXPWRSTATE)
+#define G_REQ_LNL_RXPWRSTATE(x) (((x) >> S_REQ_LNL_RXPWRSTATE) & M_REQ_LNL_RXPWRSTATE)
+
+#define S_REQ_LNK_RXSTATEDONE    11
+#define V_REQ_LNK_RXSTATEDONE(x) ((x) << S_REQ_LNK_RXSTATEDONE)
+#define F_REQ_LNK_RXSTATEDONE    V_REQ_LNK_RXSTATEDONE(1U)
+
+#define S_REQ_LNK_RXSTATEREQ    10
+#define V_REQ_LNK_RXSTATEREQ(x) ((x) << S_REQ_LNK_RXSTATEREQ)
+#define F_REQ_LNK_RXSTATEREQ    V_REQ_LNK_RXSTATEREQ(1U)
+
+#define S_REQ_LNK_RXPWRSTATE    8
+#define M_REQ_LNK_RXPWRSTATE    0x3U
+#define V_REQ_LNK_RXPWRSTATE(x) ((x) << S_REQ_LNK_RXPWRSTATE)
+#define G_REQ_LNK_RXPWRSTATE(x) (((x) >> S_REQ_LNK_RXPWRSTATE) & M_REQ_LNK_RXPWRSTATE)
+
+#define S_REQ_LNJ_RXSTATEDONE    7
+#define V_REQ_LNJ_RXSTATEDONE(x) ((x) << S_REQ_LNJ_RXSTATEDONE)
+#define F_REQ_LNJ_RXSTATEDONE    V_REQ_LNJ_RXSTATEDONE(1U)
+
+#define S_REQ_LNJ_RXSTATEREQ    6
+#define V_REQ_LNJ_RXSTATEREQ(x) ((x) << S_REQ_LNJ_RXSTATEREQ)
+#define F_REQ_LNJ_RXSTATEREQ    V_REQ_LNJ_RXSTATEREQ(1U)
+
+#define S_REQ_LNJ_RXPWRSTATE    4
+#define M_REQ_LNJ_RXPWRSTATE    0x3U
+#define V_REQ_LNJ_RXPWRSTATE(x) ((x) << S_REQ_LNJ_RXPWRSTATE)
+#define G_REQ_LNJ_RXPWRSTATE(x) (((x) >> S_REQ_LNJ_RXPWRSTATE) & M_REQ_LNJ_RXPWRSTATE)
+
+#define S_REQ_LNI_RXSTATEDONE    3
+#define V_REQ_LNI_RXSTATEDONE(x) ((x) << S_REQ_LNI_RXSTATEDONE)
+#define F_REQ_LNI_RXSTATEDONE    V_REQ_LNI_RXSTATEDONE(1U)
+
+#define S_REQ_LNI_RXSTATEREQ    2
+#define V_REQ_LNI_RXSTATEREQ(x) ((x) << S_REQ_LNI_RXSTATEREQ)
+#define F_REQ_LNI_RXSTATEREQ    V_REQ_LNI_RXSTATEREQ(1U)
+
+#define S_REQ_LNI_RXPWRSTATE    0
+#define M_REQ_LNI_RXPWRSTATE    0x3U
+#define V_REQ_LNI_RXPWRSTATE(x) ((x) << S_REQ_LNI_RXPWRSTATE)
+#define G_REQ_LNI_RXPWRSTATE(x) (((x) >> S_REQ_LNI_RXPWRSTATE) & M_REQ_LNI_RXPWRSTATE)
+
+#define A_PCIE_PHY_CURRXPWR1 0x5c20
+
+#define S_CUR_LNP_RXPWRSTATE    28
+#define M_CUR_LNP_RXPWRSTATE    0x7U
+#define V_CUR_LNP_RXPWRSTATE(x) ((x) << S_CUR_LNP_RXPWRSTATE)
+#define G_CUR_LNP_RXPWRSTATE(x) (((x) >> S_CUR_LNP_RXPWRSTATE) & M_CUR_LNP_RXPWRSTATE)
+
+#define S_CUR_LNO_RXPWRSTATE    24
+#define M_CUR_LNO_RXPWRSTATE    0x7U
+#define V_CUR_LNO_RXPWRSTATE(x) ((x) << S_CUR_LNO_RXPWRSTATE)
+#define G_CUR_LNO_RXPWRSTATE(x) (((x) >> S_CUR_LNO_RXPWRSTATE) & M_CUR_LNO_RXPWRSTATE)
+
+#define S_CUR_LNN_RXPWRSTATE    20
+#define M_CUR_LNN_RXPWRSTATE    0x7U
+#define V_CUR_LNN_RXPWRSTATE(x) ((x) << S_CUR_LNN_RXPWRSTATE)
+#define G_CUR_LNN_RXPWRSTATE(x) (((x) >> S_CUR_LNN_RXPWRSTATE) & M_CUR_LNN_RXPWRSTATE)
+
+#define S_CUR_LNM_RXPWRSTATE    16
+#define M_CUR_LNM_RXPWRSTATE    0x7U
+#define V_CUR_LNM_RXPWRSTATE(x) ((x) << S_CUR_LNM_RXPWRSTATE)
+#define G_CUR_LNM_RXPWRSTATE(x) (((x) >> S_CUR_LNM_RXPWRSTATE) & M_CUR_LNM_RXPWRSTATE)
+
+#define S_CUR_LNL_RXPWRSTATE    12
+#define M_CUR_LNL_RXPWRSTATE    0x7U
+#define V_CUR_LNL_RXPWRSTATE(x) ((x) << S_CUR_LNL_RXPWRSTATE)
+#define G_CUR_LNL_RXPWRSTATE(x) (((x) >> S_CUR_LNL_RXPWRSTATE) & M_CUR_LNL_RXPWRSTATE)
+
+#define S_CUR_LNK_RXPWRSTATE    8
+#define M_CUR_LNK_RXPWRSTATE    0x7U
+#define V_CUR_LNK_RXPWRSTATE(x) ((x) << S_CUR_LNK_RXPWRSTATE)
+#define G_CUR_LNK_RXPWRSTATE(x) (((x) >> S_CUR_LNK_RXPWRSTATE) & M_CUR_LNK_RXPWRSTATE)
+
+#define S_CUR_LNJ_RXPWRSTATE    4
+#define M_CUR_LNJ_RXPWRSTATE    0x7U
+#define V_CUR_LNJ_RXPWRSTATE(x) ((x) << S_CUR_LNJ_RXPWRSTATE)
+#define G_CUR_LNJ_RXPWRSTATE(x) (((x) >> S_CUR_LNJ_RXPWRSTATE) & M_CUR_LNJ_RXPWRSTATE)
+
+#define S_CUR_LNI_RXPWRSTATE    0
+#define M_CUR_LNI_RXPWRSTATE    0x7U
+#define V_CUR_LNI_RXPWRSTATE(x) ((x) << S_CUR_LNI_RXPWRSTATE)
+#define G_CUR_LNI_RXPWRSTATE(x) (((x) >> S_CUR_LNI_RXPWRSTATE) & M_CUR_LNI_RXPWRSTATE)
+
+#define A_PCIE_PHY_GEN3_AE2 0x5c24
+
+#define S_LNL_STAT    28
+#define M_LNL_STAT    0x7U
+#define V_LNL_STAT(x) ((x) << S_LNL_STAT)
+#define G_LNL_STAT(x) (((x) >> S_LNL_STAT) & M_LNL_STAT)
+
+#define S_LNL_CMD    24
+#define M_LNL_CMD    0x7U
+#define V_LNL_CMD(x) ((x) << S_LNL_CMD)
+#define G_LNL_CMD(x) (((x) >> S_LNL_CMD) & M_LNL_CMD)
+
+#define S_LNK_STAT    20
+#define M_LNK_STAT    0x7U
+#define V_LNK_STAT(x) ((x) << S_LNK_STAT)
+#define G_LNK_STAT(x) (((x) >> S_LNK_STAT) & M_LNK_STAT)
+
+#define S_LNK_CMD    16
+#define M_LNK_CMD    0x7U
+#define V_LNK_CMD(x) ((x) << S_LNK_CMD)
+#define G_LNK_CMD(x) (((x) >> S_LNK_CMD) & M_LNK_CMD)
+
+#define S_LNJ_STAT    12
+#define M_LNJ_STAT    0x7U
+#define V_LNJ_STAT(x) ((x) << S_LNJ_STAT)
+#define G_LNJ_STAT(x) (((x) >> S_LNJ_STAT) & M_LNJ_STAT)
+
+#define S_LNJ_CMD    8
+#define M_LNJ_CMD    0x7U
+#define V_LNJ_CMD(x) ((x) << S_LNJ_CMD)
+#define G_LNJ_CMD(x) (((x) >> S_LNJ_CMD) & M_LNJ_CMD)
+
+#define S_LNI_STAT    4
+#define M_LNI_STAT    0x7U
+#define V_LNI_STAT(x) ((x) << S_LNI_STAT)
+#define G_LNI_STAT(x) (((x) >> S_LNI_STAT) & M_LNI_STAT)
+
+#define S_LNI_CMD    0
+#define M_LNI_CMD    0x7U
+#define V_LNI_CMD(x) ((x) << S_LNI_CMD)
+#define G_LNI_CMD(x) (((x) >> S_LNI_CMD) & M_LNI_CMD)
+
+#define A_PCIE_PHY_GEN3_AE3 0x5c28
+
+#define S_LNP_STAT    28
+#define M_LNP_STAT    0x7U
+#define V_LNP_STAT(x) ((x) << S_LNP_STAT)
+#define G_LNP_STAT(x) (((x) >> S_LNP_STAT) & M_LNP_STAT)
+
+#define S_LNP_CMD    24
+#define M_LNP_CMD    0x7U
+#define V_LNP_CMD(x) ((x) << S_LNP_CMD)
+#define G_LNP_CMD(x) (((x) >> S_LNP_CMD) & M_LNP_CMD)
+
+#define S_LNO_STAT    20
+#define M_LNO_STAT    0x7U
+#define V_LNO_STAT(x) ((x) << S_LNO_STAT)
+#define G_LNO_STAT(x) (((x) >> S_LNO_STAT) & M_LNO_STAT)
+
+#define S_LNO_CMD    16
+#define M_LNO_CMD    0x7U
+#define V_LNO_CMD(x) ((x) << S_LNO_CMD)
+#define G_LNO_CMD(x) (((x) >> S_LNO_CMD) & M_LNO_CMD)
+
+#define S_LNN_STAT    12
+#define M_LNN_STAT    0x7U
+#define V_LNN_STAT(x) ((x) << S_LNN_STAT)
+#define G_LNN_STAT(x) (((x) >> S_LNN_STAT) & M_LNN_STAT)
+
+#define S_LNN_CMD    8
+#define M_LNN_CMD    0x7U
+#define V_LNN_CMD(x) ((x) << S_LNN_CMD)
+#define G_LNN_CMD(x) (((x) >> S_LNN_CMD) & M_LNN_CMD)
+
+#define S_LNM_STAT    4
+#define M_LNM_STAT    0x7U
+#define V_LNM_STAT(x) ((x) << S_LNM_STAT)
+#define G_LNM_STAT(x) (((x) >> S_LNM_STAT) & M_LNM_STAT)
+
+#define S_LNM_CMD    0
+#define M_LNM_CMD    0x7U
+#define V_LNM_CMD(x) ((x) << S_LNM_CMD)
+#define G_LNM_CMD(x) (((x) >> S_LNM_CMD) & M_LNM_CMD)
+
+#define A_PCIE_PHY_FS_LF4 0x5c2c
+
+#define S_LANE9LF    24
+#define M_LANE9LF    0x3fU
+#define V_LANE9LF(x) ((x) << S_LANE9LF)
+#define G_LANE9LF(x) (((x) >> S_LANE9LF) & M_LANE9LF)
+
+#define S_LANE9FS    16
+#define M_LANE9FS    0x3fU
+#define V_LANE9FS(x) ((x) << S_LANE9FS)
+#define G_LANE9FS(x) (((x) >> S_LANE9FS) & M_LANE9FS)
+
+#define S_LANE8LF    8
+#define M_LANE8LF    0x3fU
+#define V_LANE8LF(x) ((x) << S_LANE8LF)
+#define G_LANE8LF(x) (((x) >> S_LANE8LF) & M_LANE8LF)
+
+#define S_LANE8FS    0
+#define M_LANE8FS    0x3fU
+#define V_LANE8FS(x) ((x) << S_LANE8FS)
+#define G_LANE8FS(x) (((x) >> S_LANE8FS) & M_LANE8FS)
+
+#define A_PCIE_PHY_FS_LF5 0x5c30
+
+#define S_LANE11LF    24
+#define M_LANE11LF    0x3fU
+#define V_LANE11LF(x) ((x) << S_LANE11LF)
+#define G_LANE11LF(x) (((x) >> S_LANE11LF) & M_LANE11LF)
+
+#define S_LANE11FS    16
+#define M_LANE11FS    0x3fU
+#define V_LANE11FS(x) ((x) << S_LANE11FS)
+#define G_LANE11FS(x) (((x) >> S_LANE11FS) & M_LANE11FS)
+
+#define S_LANE10LF    8
+#define M_LANE10LF    0x3fU
+#define V_LANE10LF(x) ((x) << S_LANE10LF)
+#define G_LANE10LF(x) (((x) >> S_LANE10LF) & M_LANE10LF)
+
+#define S_LANE10FS    0
+#define M_LANE10FS    0x3fU
+#define V_LANE10FS(x) ((x) << S_LANE10FS)
+#define G_LANE10FS(x) (((x) >> S_LANE10FS) & M_LANE10FS)
+
+#define A_PCIE_PHY_FS_LF6 0x5c34
+
+#define S_LANE13LF    24
+#define M_LANE13LF    0x3fU
+#define V_LANE13LF(x) ((x) << S_LANE13LF)
+#define G_LANE13LF(x) (((x) >> S_LANE13LF) & M_LANE13LF)
+
+#define S_LANE13FS    16
+#define M_LANE13FS    0x3fU
+#define V_LANE13FS(x) ((x) << S_LANE13FS)
+#define G_LANE13FS(x) (((x) >> S_LANE13FS) & M_LANE13FS)
+
+#define S_LANE12LF    8
+#define M_LANE12LF    0x3fU
+#define V_LANE12LF(x) ((x) << S_LANE12LF)
+#define G_LANE12LF(x) (((x) >> S_LANE12LF) & M_LANE12LF)
+
+#define S_LANE12FS    0
+#define M_LANE12FS    0x3fU
+#define V_LANE12FS(x) ((x) << S_LANE12FS)
+#define G_LANE12FS(x) (((x) >> S_LANE12FS) & M_LANE12FS)
+
+#define A_PCIE_PHY_FS_LF7 0x5c38
+
+#define S_LANE15LF    24
+#define M_LANE15LF    0x3fU
+#define V_LANE15LF(x) ((x) << S_LANE15LF)
+#define G_LANE15LF(x) (((x) >> S_LANE15LF) & M_LANE15LF)
+
+#define S_LANE15FS    16
+#define M_LANE15FS    0x3fU
+#define V_LANE15FS(x) ((x) << S_LANE15FS)
+#define G_LANE15FS(x) (((x) >> S_LANE15FS) & M_LANE15FS)
+
+#define S_LANE14LF    8
+#define M_LANE14LF    0x3fU
+#define V_LANE14LF(x) ((x) << S_LANE14LF)
+#define G_LANE14LF(x) (((x) >> S_LANE14LF) & M_LANE14LF)
+
+#define S_LANE14FS    0
+#define M_LANE14FS    0x3fU
+#define V_LANE14FS(x) ((x) << S_LANE14FS)
+#define G_LANE14FS(x) (((x) >> S_LANE14FS) & M_LANE14FS)
+
+#define A_PCIE_MULTI_PHY_INDIR_REQ 0x5c3c
+
+#define S_PHY_REG_ENABLE    31
+#define V_PHY_REG_ENABLE(x) ((x) << S_PHY_REG_ENABLE)
+#define F_PHY_REG_ENABLE    V_PHY_REG_ENABLE(1U)
+
+#define S_PHY_REG_SELECT    22
+#define M_PHY_REG_SELECT    0x3U
+#define V_PHY_REG_SELECT(x) ((x) << S_PHY_REG_SELECT)
+#define G_PHY_REG_SELECT(x) (((x) >> S_PHY_REG_SELECT) & M_PHY_REG_SELECT)
+
+#define S_PHY_REG_REGADDR    0
+#define M_PHY_REG_REGADDR    0xffffU
+#define V_PHY_REG_REGADDR(x) ((x) << S_PHY_REG_REGADDR)
+#define G_PHY_REG_REGADDR(x) (((x) >> S_PHY_REG_REGADDR) & M_PHY_REG_REGADDR)
+
+#define A_PCIE_MULTI_PHY_INDIR_DATA 0x5c40
+
+#define S_PHY_REG_DATA    0
+#define M_PHY_REG_DATA    0xffffU
+#define V_PHY_REG_DATA(x) ((x) << S_PHY_REG_DATA)
+#define G_PHY_REG_DATA(x) (((x) >> S_PHY_REG_DATA) & M_PHY_REG_DATA)
+
+#define A_PCIE_VF_INT_INDIR_REQ 0x5c44
+
+#define S_ENABLE_VF    24
+#define V_ENABLE_VF(x) ((x) << S_ENABLE_VF)
+#define F_ENABLE_VF    V_ENABLE_VF(1U)
+
+#define S_AI_VF    23
+#define V_AI_VF(x) ((x) << S_AI_VF)
+#define F_AI_VF    V_AI_VF(1U)
+
+#define S_VFID_PCIE    0
+#define M_VFID_PCIE    0x3ffU
+#define V_VFID_PCIE(x) ((x) << S_VFID_PCIE)
+#define G_VFID_PCIE(x) (((x) >> S_VFID_PCIE) & M_VFID_PCIE)
+
+#define A_PCIE_VF_INT_INDIR_DATA 0x5c48
+#define A_PCIE_VF_256_INT_CFG2 0x5c4c
+#define A_PCIE_VF_MSI_EN_4 0x5e50
+#define A_PCIE_VF_MSI_EN_5 0x5e54
+#define A_PCIE_VF_MSI_EN_6 0x5e58
+#define A_PCIE_VF_MSI_EN_7 0x5e5c
+#define A_PCIE_VF_MSIX_EN_4 0x5e60
+#define A_PCIE_VF_MSIX_EN_5 0x5e64
+#define A_PCIE_VF_MSIX_EN_6 0x5e68
+#define A_PCIE_VF_MSIX_EN_7 0x5e6c
+#define A_PCIE_FLR_VF4_STATUS 0x5e70
+#define A_PCIE_FLR_VF5_STATUS 0x5e74
+#define A_PCIE_FLR_VF6_STATUS 0x5e78
+#define A_PCIE_FLR_VF7_STATUS 0x5e7c
+#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80
+#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84
+#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88
+#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c
+#define A_PCIE_BUS_MST_STAT_8 0x5e90
+
+#define S_BUSMST_263_256    0
+#define M_BUSMST_263_256    0xffU
+#define V_BUSMST_263_256(x) ((x) << S_BUSMST_263_256)
+#define G_BUSMST_263_256(x) (((x) >> S_BUSMST_263_256) & M_BUSMST_263_256)
+
+#define A_PCIE_TGT_SKID_FIFO 0x5e94
+
+#define S_HDRFREECNT    16
+#define M_HDRFREECNT    0xfffU
+#define V_HDRFREECNT(x) ((x) << S_HDRFREECNT)
+#define G_HDRFREECNT(x) (((x) >> S_HDRFREECNT) & M_HDRFREECNT)
+
+#define S_DATAFREECNT    0
+#define M_DATAFREECNT    0xfffU
+#define V_DATAFREECNT(x) ((x) << S_DATAFREECNT)
+#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT)
+
+#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0
+#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4
+#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8
+#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac
+#define A_PCIE_RSP_ERR_STAT_8 0x5eb0
+
+#define S_RSPERR_263_256    0
+#define M_RSPERR_263_256    0xffU
+#define V_RSPERR_263_256(x) ((x) << S_RSPERR_263_256)
+#define G_RSPERR_263_256(x) (((x) >> S_RSPERR_263_256) & M_RSPERR_263_256)
+
+#define A_PCIE_PHY_STAT1 0x5ec0
+
+#define S_PHY0_RTUNE_ACK    31
+#define V_PHY0_RTUNE_ACK(x) ((x) << S_PHY0_RTUNE_ACK)
+#define F_PHY0_RTUNE_ACK    V_PHY0_RTUNE_ACK(1U)
+
+#define S_PHY1_RTUNE_ACK    30
+#define V_PHY1_RTUNE_ACK(x) ((x) << S_PHY1_RTUNE_ACK)
+#define F_PHY1_RTUNE_ACK    V_PHY1_RTUNE_ACK(1U)
+
+#define A_PCIE_PHY_CTRL1 0x5ec4
+
+#define S_PHY0_RTUNE_REQ    31
+#define V_PHY0_RTUNE_REQ(x) ((x) << S_PHY0_RTUNE_REQ)
+#define F_PHY0_RTUNE_REQ    V_PHY0_RTUNE_REQ(1U)
+
+#define S_PHY1_RTUNE_REQ    30
+#define V_PHY1_RTUNE_REQ(x) ((x) << S_PHY1_RTUNE_REQ)
+#define F_PHY1_RTUNE_REQ    V_PHY1_RTUNE_REQ(1U)
+
+#define S_TXDEEMPH_GEN1    16
+#define M_TXDEEMPH_GEN1    0xffU
+#define V_TXDEEMPH_GEN1(x) ((x) << S_TXDEEMPH_GEN1)
+#define G_TXDEEMPH_GEN1(x) (((x) >> S_TXDEEMPH_GEN1) & M_TXDEEMPH_GEN1)
+
+#define S_TXDEEMPH_GEN2_3P5DB    8
+#define M_TXDEEMPH_GEN2_3P5DB    0xffU
+#define V_TXDEEMPH_GEN2_3P5DB(x) ((x) << S_TXDEEMPH_GEN2_3P5DB)
+#define G_TXDEEMPH_GEN2_3P5DB(x) (((x) >> S_TXDEEMPH_GEN2_3P5DB) & M_TXDEEMPH_GEN2_3P5DB)
+
+#define S_TXDEEMPH_GEN2_6DB    0
+#define M_TXDEEMPH_GEN2_6DB    0xffU
+#define V_TXDEEMPH_GEN2_6DB(x) ((x) << S_TXDEEMPH_GEN2_6DB)
+#define G_TXDEEMPH_GEN2_6DB(x) (((x) >> S_TXDEEMPH_GEN2_6DB) & M_TXDEEMPH_GEN2_6DB)
+
+#define A_PCIE_PCIE_SPARE0 0x5ec8
+#define A_PCIE_RESET_STAT 0x5ecc
+
+#define S_PON_RST_STATE_FLAG    11
+#define V_PON_RST_STATE_FLAG(x) ((x) << S_PON_RST_STATE_FLAG)
+#define F_PON_RST_STATE_FLAG    V_PON_RST_STATE_FLAG(1U)
+
+#define S_BUS_RST_STATE_FLAG    10
+#define V_BUS_RST_STATE_FLAG(x) ((x) << S_BUS_RST_STATE_FLAG)
+#define F_BUS_RST_STATE_FLAG    V_BUS_RST_STATE_FLAG(1U)
+
+#define S_DL_DOWN_PCIECRST_MODE0_STATE_FLAG    9
+#define V_DL_DOWN_PCIECRST_MODE0_STATE_FLAG(x) ((x) << S_DL_DOWN_PCIECRST_MODE0_STATE_FLAG)
+#define F_DL_DOWN_PCIECRST_MODE0_STATE_FLAG    V_DL_DOWN_PCIECRST_MODE0_STATE_FLAG(1U)
+
+#define S_DL_DOWN_PCIECRST_MODE1_STATE_FLAG    8
+#define V_DL_DOWN_PCIECRST_MODE1_STATE_FLAG(x) ((x) << S_DL_DOWN_PCIECRST_MODE1_STATE_FLAG)
+#define F_DL_DOWN_PCIECRST_MODE1_STATE_FLAG    V_DL_DOWN_PCIECRST_MODE1_STATE_FLAG(1U)
+
+#define S_PCIE_WARM_RST_MODE0_STATE_FLAG    7
+#define V_PCIE_WARM_RST_MODE0_STATE_FLAG(x) ((x) << S_PCIE_WARM_RST_MODE0_STATE_FLAG)
+#define F_PCIE_WARM_RST_MODE0_STATE_FLAG    V_PCIE_WARM_RST_MODE0_STATE_FLAG(1U)
+
+#define S_PCIE_WARM_RST_MODE1_STATE_FLAG    6
+#define V_PCIE_WARM_RST_MODE1_STATE_FLAG(x) ((x) << S_PCIE_WARM_RST_MODE1_STATE_FLAG)
+#define F_PCIE_WARM_RST_MODE1_STATE_FLAG    V_PCIE_WARM_RST_MODE1_STATE_FLAG(1U)
+
+#define S_PIO_WARM_RST_MODE0_STATE_FLAG    5
+#define V_PIO_WARM_RST_MODE0_STATE_FLAG(x) ((x) << S_PIO_WARM_RST_MODE0_STATE_FLAG)
+#define F_PIO_WARM_RST_MODE0_STATE_FLAG    V_PIO_WARM_RST_MODE0_STATE_FLAG(1U)
+
+#define S_PIO_WARM_RST_MODE1_STATE_FLAG    4
+#define V_PIO_WARM_RST_MODE1_STATE_FLAG(x) ((x) << S_PIO_WARM_RST_MODE1_STATE_FLAG)
+#define F_PIO_WARM_RST_MODE1_STATE_FLAG    V_PIO_WARM_RST_MODE1_STATE_FLAG(1U)
+
+#define S_LASTRESETSTATE    0
+#define M_LASTRESETSTATE    0x7U
+#define V_LASTRESETSTATE(x) ((x) << S_LASTRESETSTATE)
+#define G_LASTRESETSTATE(x) (((x) >> S_LASTRESETSTATE) & M_LASTRESETSTATE)
+
+#define A_PCIE_FUNC_DSTATE 0x5ed0
+
+#define S_PF7_DSTATE    21
+#define M_PF7_DSTATE    0x7U
+#define V_PF7_DSTATE(x) ((x) << S_PF7_DSTATE)
+#define G_PF7_DSTATE(x) (((x) >> S_PF7_DSTATE) & M_PF7_DSTATE)
+
+#define S_PF6_DSTATE    18
+#define M_PF6_DSTATE    0x7U
+#define V_PF6_DSTATE(x) ((x) << S_PF6_DSTATE)
+#define G_PF6_DSTATE(x) (((x) >> S_PF6_DSTATE) & M_PF6_DSTATE)
+
+#define S_PF5_DSTATE    15
+#define M_PF5_DSTATE    0x7U
+#define V_PF5_DSTATE(x) ((x) << S_PF5_DSTATE)
+#define G_PF5_DSTATE(x) (((x) >> S_PF5_DSTATE) & M_PF5_DSTATE)
+
+#define S_PF4_DSTATE    12
+#define M_PF4_DSTATE    0x7U
+#define V_PF4_DSTATE(x) ((x) << S_PF4_DSTATE)
+#define G_PF4_DSTATE(x) (((x) >> S_PF4_DSTATE) & M_PF4_DSTATE)
+
+#define S_PF3_DSTATE    9
+#define M_PF3_DSTATE    0x7U
+#define V_PF3_DSTATE(x) ((x) << S_PF3_DSTATE)
+#define G_PF3_DSTATE(x) (((x) >> S_PF3_DSTATE) & M_PF3_DSTATE)
+
+#define S_PF2_DSTATE    6
+#define M_PF2_DSTATE    0x7U
+#define V_PF2_DSTATE(x) ((x) << S_PF2_DSTATE)
+#define G_PF2_DSTATE(x) (((x) >> S_PF2_DSTATE) & M_PF2_DSTATE)
+
+#define S_PF1_DSTATE    3
+#define M_PF1_DSTATE    0x7U
+#define V_PF1_DSTATE(x) ((x) << S_PF1_DSTATE)
+#define G_PF1_DSTATE(x) (((x) >> S_PF1_DSTATE) & M_PF1_DSTATE)
+
+#define S_PF0_DSTATE    0
+#define M_PF0_DSTATE    0x7U
+#define V_PF0_DSTATE(x) ((x) << S_PF0_DSTATE)
+#define G_PF0_DSTATE(x) (((x) >> S_PF0_DSTATE) & M_PF0_DSTATE)
+
+#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0
+#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0
+#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00
+#define A_PCIE_PDEBUG_REG_0X0 0x0
+#define A_PCIE_PDEBUG_REG_0X1 0x1
+#define A_PCIE_PDEBUG_REG_0X2 0x2
+
+#define S_TAGQ_CH0_TAGS_USED    11
+#define M_TAGQ_CH0_TAGS_USED    0xffU
+#define V_TAGQ_CH0_TAGS_USED(x) ((x) << S_TAGQ_CH0_TAGS_USED)
+#define G_TAGQ_CH0_TAGS_USED(x) (((x) >> S_TAGQ_CH0_TAGS_USED) & M_TAGQ_CH0_TAGS_USED)
+
+#define S_REQ_CH0_DATA_EMPTY    10
+#define V_REQ_CH0_DATA_EMPTY(x) ((x) << S_REQ_CH0_DATA_EMPTY)
+#define F_REQ_CH0_DATA_EMPTY    V_REQ_CH0_DATA_EMPTY(1U)
+
+#define S_RDQ_CH0_REQ_EMPTY    9
+#define V_RDQ_CH0_REQ_EMPTY(x) ((x) << S_RDQ_CH0_REQ_EMPTY)
+#define F_RDQ_CH0_REQ_EMPTY    V_RDQ_CH0_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_CMD    V_REQ_CTL_RD_CH0_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH0_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X3 0x3
+
+#define S_TAGQ_CH1_TAGS_USED    11
+#define M_TAGQ_CH1_TAGS_USED    0xffU
+#define V_TAGQ_CH1_TAGS_USED(x) ((x) << S_TAGQ_CH1_TAGS_USED)
+#define G_TAGQ_CH1_TAGS_USED(x) (((x) >> S_TAGQ_CH1_TAGS_USED) & M_TAGQ_CH1_TAGS_USED)
+
+#define S_REQ_CH1_DATA_EMPTY    10
+#define V_REQ_CH1_DATA_EMPTY(x) ((x) << S_REQ_CH1_DATA_EMPTY)
+#define F_REQ_CH1_DATA_EMPTY    V_REQ_CH1_DATA_EMPTY(1U)
+
+#define S_RDQ_CH1_REQ_EMPTY    9
+#define V_RDQ_CH1_REQ_EMPTY(x) ((x) << S_RDQ_CH1_REQ_EMPTY)
+#define F_RDQ_CH1_REQ_EMPTY    V_RDQ_CH1_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_CMD    V_REQ_CTL_RD_CH1_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH1_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X4 0x4
+
+#define S_TAGQ_CH2_TAGS_USED    11
+#define M_TAGQ_CH2_TAGS_USED    0xffU
+#define V_TAGQ_CH2_TAGS_USED(x) ((x) << S_TAGQ_CH2_TAGS_USED)
+#define G_TAGQ_CH2_TAGS_USED(x) (((x) >> S_TAGQ_CH2_TAGS_USED) & M_TAGQ_CH2_TAGS_USED)
+
+#define S_REQ_CH2_DATA_EMPTY    10
+#define V_REQ_CH2_DATA_EMPTY(x) ((x) << S_REQ_CH2_DATA_EMPTY)
+#define F_REQ_CH2_DATA_EMPTY    V_REQ_CH2_DATA_EMPTY(1U)
+
+#define S_RDQ_CH2_REQ_EMPTY    9
+#define V_RDQ_CH2_REQ_EMPTY(x) ((x) << S_RDQ_CH2_REQ_EMPTY)
+#define F_RDQ_CH2_REQ_EMPTY    V_RDQ_CH2_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_CMD    V_REQ_CTL_RD_CH2_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH2_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X5 0x5
+
+#define S_TAGQ_CH3_TAGS_USED    11
+#define M_TAGQ_CH3_TAGS_USED    0xffU
+#define V_TAGQ_CH3_TAGS_USED(x) ((x) << S_TAGQ_CH3_TAGS_USED)
+#define G_TAGQ_CH3_TAGS_USED(x) (((x) >> S_TAGQ_CH3_TAGS_USED) & M_TAGQ_CH3_TAGS_USED)
+
+#define S_REQ_CH3_DATA_EMPTY    10
+#define V_REQ_CH3_DATA_EMPTY(x) ((x) << S_REQ_CH3_DATA_EMPTY)
+#define F_REQ_CH3_DATA_EMPTY    V_REQ_CH3_DATA_EMPTY(1U)
+
+#define S_RDQ_CH3_REQ_EMPTY    9
+#define V_RDQ_CH3_REQ_EMPTY(x) ((x) << S_RDQ_CH3_REQ_EMPTY)
+#define F_RDQ_CH3_REQ_EMPTY    V_RDQ_CH3_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_CMD    V_REQ_CTL_RD_CH3_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH3_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X6 0x6
+
+#define S_TAGQ_CH4_TAGS_USED    11
+#define M_TAGQ_CH4_TAGS_USED    0xffU
+#define V_TAGQ_CH4_TAGS_USED(x) ((x) << S_TAGQ_CH4_TAGS_USED)
+#define G_TAGQ_CH4_TAGS_USED(x) (((x) >> S_TAGQ_CH4_TAGS_USED) & M_TAGQ_CH4_TAGS_USED)
+
+#define S_REQ_CH4_DATA_EMPTY    10
+#define V_REQ_CH4_DATA_EMPTY(x) ((x) << S_REQ_CH4_DATA_EMPTY)
+#define F_REQ_CH4_DATA_EMPTY    V_REQ_CH4_DATA_EMPTY(1U)
+
+#define S_RDQ_CH4_REQ_EMPTY    9
+#define V_RDQ_CH4_REQ_EMPTY(x) ((x) << S_RDQ_CH4_REQ_EMPTY)
+#define F_RDQ_CH4_REQ_EMPTY    V_RDQ_CH4_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_CMD    V_REQ_CTL_RD_CH4_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH4_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X7 0x7
+
+#define S_TAGQ_CH5_TAGS_USED    11
+#define M_TAGQ_CH5_TAGS_USED    0xffU
+#define V_TAGQ_CH5_TAGS_USED(x) ((x) << S_TAGQ_CH5_TAGS_USED)
+#define G_TAGQ_CH5_TAGS_USED(x) (((x) >> S_TAGQ_CH5_TAGS_USED) & M_TAGQ_CH5_TAGS_USED)
+
+#define S_REQ_CH5_DATA_EMPTY    10
+#define V_REQ_CH5_DATA_EMPTY(x) ((x) << S_REQ_CH5_DATA_EMPTY)
+#define F_REQ_CH5_DATA_EMPTY    V_REQ_CH5_DATA_EMPTY(1U)
+
+#define S_RDQ_CH5_REQ_EMPTY    9
+#define V_RDQ_CH5_REQ_EMPTY(x) ((x) << S_RDQ_CH5_REQ_EMPTY)
+#define F_RDQ_CH5_REQ_EMPTY    V_RDQ_CH5_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_CMD    V_REQ_CTL_RD_CH5_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH5_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X8 0x8
+
+#define S_TAGQ_CH6_TAGS_USED    11
+#define M_TAGQ_CH6_TAGS_USED    0xffU
+#define V_TAGQ_CH6_TAGS_USED(x) ((x) << S_TAGQ_CH6_TAGS_USED)
+#define G_TAGQ_CH6_TAGS_USED(x) (((x) >> S_TAGQ_CH6_TAGS_USED) & M_TAGQ_CH6_TAGS_USED)
+
+#define S_REQ_CH6_DATA_EMPTY    10
+#define V_REQ_CH6_DATA_EMPTY(x) ((x) << S_REQ_CH6_DATA_EMPTY)
+#define F_REQ_CH6_DATA_EMPTY    V_REQ_CH6_DATA_EMPTY(1U)
+
+#define S_RDQ_CH6_REQ_EMPTY    9
+#define V_RDQ_CH6_REQ_EMPTY(x) ((x) << S_RDQ_CH6_REQ_EMPTY)
+#define F_RDQ_CH6_REQ_EMPTY    V_RDQ_CH6_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_CMD    V_REQ_CTL_RD_CH6_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH6_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X9 0x9
+
+#define S_TAGQ_CH7_TAGS_USED    11
+#define M_TAGQ_CH7_TAGS_USED    0xffU
+#define V_TAGQ_CH7_TAGS_USED(x) ((x) << S_TAGQ_CH7_TAGS_USED)
+#define G_TAGQ_CH7_TAGS_USED(x) (((x) >> S_TAGQ_CH7_TAGS_USED) & M_TAGQ_CH7_TAGS_USED)
+
+#define S_REQ_CH7_DATA_EMPTY    10
+#define V_REQ_CH7_DATA_EMPTY(x) ((x) << S_REQ_CH7_DATA_EMPTY)
+#define F_REQ_CH7_DATA_EMPTY    V_REQ_CH7_DATA_EMPTY(1U)
+
+#define S_RDQ_CH7_REQ_EMPTY    9
+#define V_RDQ_CH7_REQ_EMPTY(x) ((x) << S_RDQ_CH7_REQ_EMPTY)
+#define F_RDQ_CH7_REQ_EMPTY    V_RDQ_CH7_REQ_EMPTY(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ    8
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ    V_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_CMD    7
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_CMD)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_CMD    V_REQ_CTL_RD_CH7_WAIT_FOR_CMD(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM    6
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM    V_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_RDQ    5
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_RDQ)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_RDQ    V_REQ_CTL_RD_CH7_WAIT_FOR_RDQ(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO    4
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO    V_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO(1U)
+
+#define S_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED    V_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED    V_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE    V_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA    V_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XA 0xa
+
+#define S_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM    27
+#define V_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM)
+#define F_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM    V_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM(1U)
+
+#define S_REQ_CTL_WR_CH0_SEQNUM    19
+#define M_REQ_CTL_WR_CH0_SEQNUM    0xffU
+#define V_REQ_CTL_WR_CH0_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH0_SEQNUM)
+#define G_REQ_CTL_WR_CH0_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH0_SEQNUM) & M_REQ_CTL_WR_CH0_SEQNUM)
+
+#define S_REQ_CTL_RD_CH0_SEQNUM    11
+#define M_REQ_CTL_RD_CH0_SEQNUM    0xffU
+#define V_REQ_CTL_RD_CH0_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH0_SEQNUM)
+#define G_REQ_CTL_RD_CH0_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH0_SEQNUM) & M_REQ_CTL_RD_CH0_SEQNUM)
+
+#define S_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO    4
+#define V_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO)
+#define F_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO    V_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO(1U)
+
+#define S_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED    V_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED    V_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE    V_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA    V_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XB 0xb
+
+#define S_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM    27
+#define V_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM)
+#define F_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM    V_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM(1U)
+
+#define S_REQ_CTL_WR_CH1_SEQNUM    19
+#define M_REQ_CTL_WR_CH1_SEQNUM    0xffU
+#define V_REQ_CTL_WR_CH1_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH1_SEQNUM)
+#define G_REQ_CTL_WR_CH1_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH1_SEQNUM) & M_REQ_CTL_WR_CH1_SEQNUM)
+
+#define S_REQ_CTL_RD_CH1_SEQNUM    11
+#define M_REQ_CTL_RD_CH1_SEQNUM    0xffU
+#define V_REQ_CTL_RD_CH1_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH1_SEQNUM)
+#define G_REQ_CTL_RD_CH1_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH1_SEQNUM) & M_REQ_CTL_RD_CH1_SEQNUM)
+
+#define S_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO    4
+#define V_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO)
+#define F_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO    V_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO(1U)
+
+#define S_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED    V_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED    V_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE    V_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA    V_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XC 0xc
+
+#define S_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM    27
+#define V_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM)
+#define F_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM    V_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM(1U)
+
+#define S_REQ_CTL_WR_CH2_SEQNUM    19
+#define M_REQ_CTL_WR_CH2_SEQNUM    0xffU
+#define V_REQ_CTL_WR_CH2_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH2_SEQNUM)
+#define G_REQ_CTL_WR_CH2_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH2_SEQNUM) & M_REQ_CTL_WR_CH2_SEQNUM)
+
+#define S_REQ_CTL_RD_CH2_SEQNUM    11
+#define M_REQ_CTL_RD_CH2_SEQNUM    0xffU
+#define V_REQ_CTL_RD_CH2_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH2_SEQNUM)
+#define G_REQ_CTL_RD_CH2_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH2_SEQNUM) & M_REQ_CTL_RD_CH2_SEQNUM)
+
+#define S_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO    4
+#define V_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO)
+#define F_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO    V_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO(1U)
+
+#define S_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED    V_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED    V_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE    V_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA    V_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XD 0xd
+
+#define S_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM    27
+#define V_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM)
+#define F_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM    V_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM(1U)
+
+#define S_REQ_CTL_WR_CH3_SEQNUM    19
+#define M_REQ_CTL_WR_CH3_SEQNUM    0xffU
+#define V_REQ_CTL_WR_CH3_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH3_SEQNUM)
+#define G_REQ_CTL_WR_CH3_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH3_SEQNUM) & M_REQ_CTL_WR_CH3_SEQNUM)
+
+#define S_REQ_CTL_RD_CH3_SEQNUM    11
+#define M_REQ_CTL_RD_CH3_SEQNUM    0xffU
+#define V_REQ_CTL_RD_CH3_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH3_SEQNUM)
+#define G_REQ_CTL_RD_CH3_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH3_SEQNUM) & M_REQ_CTL_RD_CH3_SEQNUM)
+
+#define S_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO    4
+#define V_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO)
+#define F_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO    V_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO(1U)
+
+#define S_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED    V_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED    V_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE    V_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA    V_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XE 0xe
+
+#define S_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM    27
+#define V_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM)
+#define F_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM    V_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM(1U)
+
+#define S_REQ_CTL_WR_CH4_SEQNUM    19
+#define M_REQ_CTL_WR_CH4_SEQNUM    0xffU
+#define V_REQ_CTL_WR_CH4_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH4_SEQNUM)
+#define G_REQ_CTL_WR_CH4_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH4_SEQNUM) & M_REQ_CTL_WR_CH4_SEQNUM)
+
+#define S_REQ_CTL_RD_CH4_SEQNUM    11
+#define M_REQ_CTL_RD_CH4_SEQNUM    0xffU
+#define V_REQ_CTL_RD_CH4_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH4_SEQNUM)
+#define G_REQ_CTL_RD_CH4_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH4_SEQNUM) & M_REQ_CTL_RD_CH4_SEQNUM)
+
+#define S_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO    4
+#define V_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO)
+#define F_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO    V_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO(1U)
+
+#define S_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED    3
+#define V_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED)
+#define F_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED    V_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED    2
+#define V_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED)
+#define F_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED    V_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED(1U)
+
+#define S_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE    1
+#define V_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE)
+#define F_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE    V_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE(1U)
+
+#define S_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA    0
+#define V_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA)
+#define F_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA    V_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA(1U)
+
+#define A_PCIE_PDEBUG_REG_0XF 0xf
+#define A_PCIE_PDEBUG_REG_0X10 0x10
+
+#define S_PIPE0_TX3_DATAK_0    31
+#define V_PIPE0_TX3_DATAK_0(x) ((x) << S_PIPE0_TX3_DATAK_0)
+#define F_PIPE0_TX3_DATAK_0    V_PIPE0_TX3_DATAK_0(1U)
+
+#define S_PIPE0_TX3_DATA_6_0    24
+#define M_PIPE0_TX3_DATA_6_0    0x7fU
+#define V_PIPE0_TX3_DATA_6_0(x) ((x) << S_PIPE0_TX3_DATA_6_0)
+#define G_PIPE0_TX3_DATA_6_0(x) (((x) >> S_PIPE0_TX3_DATA_6_0) & M_PIPE0_TX3_DATA_6_0)
+
+#define S_PIPE0_TX2_DATA_7_0    16
+#define M_PIPE0_TX2_DATA_7_0    0xffU
+#define V_PIPE0_TX2_DATA_7_0(x) ((x) << S_PIPE0_TX2_DATA_7_0)
+#define G_PIPE0_TX2_DATA_7_0(x) (((x) >> S_PIPE0_TX2_DATA_7_0) & M_PIPE0_TX2_DATA_7_0)
+
+#define S_PIPE0_TX1_DATA_7_0    8
+#define M_PIPE0_TX1_DATA_7_0    0xffU
+#define V_PIPE0_TX1_DATA_7_0(x) ((x) << S_PIPE0_TX1_DATA_7_0)
+#define G_PIPE0_TX1_DATA_7_0(x) (((x) >> S_PIPE0_TX1_DATA_7_0) & M_PIPE0_TX1_DATA_7_0)
+
+#define S_PIPE0_TX0_DATAK_0    7
+#define V_PIPE0_TX0_DATAK_0(x) ((x) << S_PIPE0_TX0_DATAK_0)
+#define F_PIPE0_TX0_DATAK_0    V_PIPE0_TX0_DATAK_0(1U)
+
+#define S_PIPE0_TX0_DATA_6_0    0
+#define M_PIPE0_TX0_DATA_6_0    0x7fU
+#define V_PIPE0_TX0_DATA_6_0(x) ((x) << S_PIPE0_TX0_DATA_6_0)
+#define G_PIPE0_TX0_DATA_6_0(x) (((x) >> S_PIPE0_TX0_DATA_6_0) & M_PIPE0_TX0_DATA_6_0)
+
+#define A_PCIE_PDEBUG_REG_0X11 0x11
+
+#define S_PIPE0_TX3_DATAK_1    31
+#define V_PIPE0_TX3_DATAK_1(x) ((x) << S_PIPE0_TX3_DATAK_1)
+#define F_PIPE0_TX3_DATAK_1    V_PIPE0_TX3_DATAK_1(1U)
+
+#define S_PIPE0_TX3_DATA_14_8    24
+#define M_PIPE0_TX3_DATA_14_8    0x7fU
+#define V_PIPE0_TX3_DATA_14_8(x) ((x) << S_PIPE0_TX3_DATA_14_8)
+#define G_PIPE0_TX3_DATA_14_8(x) (((x) >> S_PIPE0_TX3_DATA_14_8) & M_PIPE0_TX3_DATA_14_8)
+
+#define S_PIPE0_TX2_DATA_15_8    16
+#define M_PIPE0_TX2_DATA_15_8    0xffU
+#define V_PIPE0_TX2_DATA_15_8(x) ((x) << S_PIPE0_TX2_DATA_15_8)
+#define G_PIPE0_TX2_DATA_15_8(x) (((x) >> S_PIPE0_TX2_DATA_15_8) & M_PIPE0_TX2_DATA_15_8)
+
+#define S_PIPE0_TX1_DATA_15_8    8
+#define M_PIPE0_TX1_DATA_15_8    0xffU
+#define V_PIPE0_TX1_DATA_15_8(x) ((x) << S_PIPE0_TX1_DATA_15_8)
+#define G_PIPE0_TX1_DATA_15_8(x) (((x) >> S_PIPE0_TX1_DATA_15_8) & M_PIPE0_TX1_DATA_15_8)
+
+#define S_PIPE0_TX0_DATAK_1    7
+#define V_PIPE0_TX0_DATAK_1(x) ((x) << S_PIPE0_TX0_DATAK_1)
+#define F_PIPE0_TX0_DATAK_1    V_PIPE0_TX0_DATAK_1(1U)
+
+#define S_PIPE0_TX0_DATA_14_8    0
+#define M_PIPE0_TX0_DATA_14_8    0x7fU
+#define V_PIPE0_TX0_DATA_14_8(x) ((x) << S_PIPE0_TX0_DATA_14_8)
+#define G_PIPE0_TX0_DATA_14_8(x) (((x) >> S_PIPE0_TX0_DATA_14_8) & M_PIPE0_TX0_DATA_14_8)
+
+#define A_PCIE_PDEBUG_REG_0X12 0x12
+
+#define S_PIPE0_TX7_DATAK_0    31
+#define V_PIPE0_TX7_DATAK_0(x) ((x) << S_PIPE0_TX7_DATAK_0)
+#define F_PIPE0_TX7_DATAK_0    V_PIPE0_TX7_DATAK_0(1U)
+
+#define S_PIPE0_TX7_DATA_6_0    24
+#define M_PIPE0_TX7_DATA_6_0    0x7fU
+#define V_PIPE0_TX7_DATA_6_0(x) ((x) << S_PIPE0_TX7_DATA_6_0)
+#define G_PIPE0_TX7_DATA_6_0(x) (((x) >> S_PIPE0_TX7_DATA_6_0) & M_PIPE0_TX7_DATA_6_0)
+
+#define S_PIPE0_TX6_DATA_7_0    16
+#define M_PIPE0_TX6_DATA_7_0    0xffU
+#define V_PIPE0_TX6_DATA_7_0(x) ((x) << S_PIPE0_TX6_DATA_7_0)
+#define G_PIPE0_TX6_DATA_7_0(x) (((x) >> S_PIPE0_TX6_DATA_7_0) & M_PIPE0_TX6_DATA_7_0)
+
+#define S_PIPE0_TX5_DATA_7_0    8
+#define M_PIPE0_TX5_DATA_7_0    0xffU
+#define V_PIPE0_TX5_DATA_7_0(x) ((x) << S_PIPE0_TX5_DATA_7_0)
+#define G_PIPE0_TX5_DATA_7_0(x) (((x) >> S_PIPE0_TX5_DATA_7_0) & M_PIPE0_TX5_DATA_7_0)
+
+#define S_PIPE0_TX4_DATAK_0    7
+#define V_PIPE0_TX4_DATAK_0(x) ((x) << S_PIPE0_TX4_DATAK_0)
+#define F_PIPE0_TX4_DATAK_0    V_PIPE0_TX4_DATAK_0(1U)
+
+#define S_PIPE0_TX4_DATA_6_0    0
+#define M_PIPE0_TX4_DATA_6_0    0x7fU
+#define V_PIPE0_TX4_DATA_6_0(x) ((x) << S_PIPE0_TX4_DATA_6_0)
+#define G_PIPE0_TX4_DATA_6_0(x) (((x) >> S_PIPE0_TX4_DATA_6_0) & M_PIPE0_TX4_DATA_6_0)
+
+#define A_PCIE_PDEBUG_REG_0X13 0x13
+
+#define S_PIPE0_TX7_DATAK_1    31
+#define V_PIPE0_TX7_DATAK_1(x) ((x) << S_PIPE0_TX7_DATAK_1)
+#define F_PIPE0_TX7_DATAK_1    V_PIPE0_TX7_DATAK_1(1U)
+
+#define S_PIPE0_TX7_DATA_14_8    24
+#define M_PIPE0_TX7_DATA_14_8    0x7fU
+#define V_PIPE0_TX7_DATA_14_8(x) ((x) << S_PIPE0_TX7_DATA_14_8)
+#define G_PIPE0_TX7_DATA_14_8(x) (((x) >> S_PIPE0_TX7_DATA_14_8) & M_PIPE0_TX7_DATA_14_8)
+
+#define S_PIPE0_TX6_DATA_15_8    16
+#define M_PIPE0_TX6_DATA_15_8    0xffU
+#define V_PIPE0_TX6_DATA_15_8(x) ((x) << S_PIPE0_TX6_DATA_15_8)
+#define G_PIPE0_TX6_DATA_15_8(x) (((x) >> S_PIPE0_TX6_DATA_15_8) & M_PIPE0_TX6_DATA_15_8)
+
+#define S_PIPE0_TX5_DATA_15_8    8
+#define M_PIPE0_TX5_DATA_15_8    0xffU
+#define V_PIPE0_TX5_DATA_15_8(x) ((x) << S_PIPE0_TX5_DATA_15_8)
+#define G_PIPE0_TX5_DATA_15_8(x) (((x) >> S_PIPE0_TX5_DATA_15_8) & M_PIPE0_TX5_DATA_15_8)
+
+#define S_PIPE0_TX4_DATAK_1    7
+#define V_PIPE0_TX4_DATAK_1(x) ((x) << S_PIPE0_TX4_DATAK_1)
+#define F_PIPE0_TX4_DATAK_1    V_PIPE0_TX4_DATAK_1(1U)
+
+#define S_PIPE0_TX4_DATA_14_8    0
+#define M_PIPE0_TX4_DATA_14_8    0x7fU
+#define V_PIPE0_TX4_DATA_14_8(x) ((x) << S_PIPE0_TX4_DATA_14_8)
+#define G_PIPE0_TX4_DATA_14_8(x) (((x) >> S_PIPE0_TX4_DATA_14_8) & M_PIPE0_TX4_DATA_14_8)
+
+#define A_PCIE_PDEBUG_REG_0X14 0x14
+
+#define S_PIPE0_RX3_VALID_14    31
+#define V_PIPE0_RX3_VALID_14(x) ((x) << S_PIPE0_RX3_VALID_14)
+#define F_PIPE0_RX3_VALID_14    V_PIPE0_RX3_VALID_14(1U)
+
+#define S_PIPE0_RX3_VALID2_14    24
+#define M_PIPE0_RX3_VALID2_14    0x7fU
+#define V_PIPE0_RX3_VALID2_14(x) ((x) << S_PIPE0_RX3_VALID2_14)
+#define G_PIPE0_RX3_VALID2_14(x) (((x) >> S_PIPE0_RX3_VALID2_14) & M_PIPE0_RX3_VALID2_14)
+
+#define S_PIPE0_RX2_VALID_14    16
+#define M_PIPE0_RX2_VALID_14    0xffU
+#define V_PIPE0_RX2_VALID_14(x) ((x) << S_PIPE0_RX2_VALID_14)
+#define G_PIPE0_RX2_VALID_14(x) (((x) >> S_PIPE0_RX2_VALID_14) & M_PIPE0_RX2_VALID_14)
+
+#define S_PIPE0_RX1_VALID_14    8
+#define M_PIPE0_RX1_VALID_14    0xffU
+#define V_PIPE0_RX1_VALID_14(x) ((x) << S_PIPE0_RX1_VALID_14)
+#define G_PIPE0_RX1_VALID_14(x) (((x) >> S_PIPE0_RX1_VALID_14) & M_PIPE0_RX1_VALID_14)
+
+#define S_PIPE0_RX0_VALID_14    7
+#define V_PIPE0_RX0_VALID_14(x) ((x) << S_PIPE0_RX0_VALID_14)
+#define F_PIPE0_RX0_VALID_14    V_PIPE0_RX0_VALID_14(1U)
+
+#define S_PIPE0_RX0_VALID2_14    0
+#define M_PIPE0_RX0_VALID2_14    0x7fU
+#define V_PIPE0_RX0_VALID2_14(x) ((x) << S_PIPE0_RX0_VALID2_14)
+#define G_PIPE0_RX0_VALID2_14(x) (((x) >> S_PIPE0_RX0_VALID2_14) & M_PIPE0_RX0_VALID2_14)
+
+#define A_PCIE_PDEBUG_REG_0X15 0x15
+
+#define S_PIPE0_RX3_VALID_15    31
+#define V_PIPE0_RX3_VALID_15(x) ((x) << S_PIPE0_RX3_VALID_15)
+#define F_PIPE0_RX3_VALID_15    V_PIPE0_RX3_VALID_15(1U)
+
+#define S_PIPE0_RX3_VALID2_15    24
+#define M_PIPE0_RX3_VALID2_15    0x7fU
+#define V_PIPE0_RX3_VALID2_15(x) ((x) << S_PIPE0_RX3_VALID2_15)
+#define G_PIPE0_RX3_VALID2_15(x) (((x) >> S_PIPE0_RX3_VALID2_15) & M_PIPE0_RX3_VALID2_15)
+
+#define S_PIPE0_RX2_VALID_15    16
+#define M_PIPE0_RX2_VALID_15    0xffU
+#define V_PIPE0_RX2_VALID_15(x) ((x) << S_PIPE0_RX2_VALID_15)
+#define G_PIPE0_RX2_VALID_15(x) (((x) >> S_PIPE0_RX2_VALID_15) & M_PIPE0_RX2_VALID_15)
+
+#define S_PIPE0_RX1_VALID_15    8
+#define M_PIPE0_RX1_VALID_15    0xffU
+#define V_PIPE0_RX1_VALID_15(x) ((x) << S_PIPE0_RX1_VALID_15)
+#define G_PIPE0_RX1_VALID_15(x) (((x) >> S_PIPE0_RX1_VALID_15) & M_PIPE0_RX1_VALID_15)
+
+#define S_PIPE0_RX0_VALID_15    7
+#define V_PIPE0_RX0_VALID_15(x) ((x) << S_PIPE0_RX0_VALID_15)
+#define F_PIPE0_RX0_VALID_15    V_PIPE0_RX0_VALID_15(1U)
+
+#define S_PIPE0_RX0_VALID2_15    0
+#define M_PIPE0_RX0_VALID2_15    0x7fU
+#define V_PIPE0_RX0_VALID2_15(x) ((x) << S_PIPE0_RX0_VALID2_15)
+#define G_PIPE0_RX0_VALID2_15(x) (((x) >> S_PIPE0_RX0_VALID2_15) & M_PIPE0_RX0_VALID2_15)
+
+#define A_PCIE_PDEBUG_REG_0X16 0x16
+
+#define S_PIPE0_RX7_VALID_16    31
+#define V_PIPE0_RX7_VALID_16(x) ((x) << S_PIPE0_RX7_VALID_16)
+#define F_PIPE0_RX7_VALID_16    V_PIPE0_RX7_VALID_16(1U)
+
+#define S_PIPE0_RX7_VALID2_16    24
+#define M_PIPE0_RX7_VALID2_16    0x7fU
+#define V_PIPE0_RX7_VALID2_16(x) ((x) << S_PIPE0_RX7_VALID2_16)
+#define G_PIPE0_RX7_VALID2_16(x) (((x) >> S_PIPE0_RX7_VALID2_16) & M_PIPE0_RX7_VALID2_16)
+
+#define S_PIPE0_RX6_VALID_16    16
+#define M_PIPE0_RX6_VALID_16    0xffU
+#define V_PIPE0_RX6_VALID_16(x) ((x) << S_PIPE0_RX6_VALID_16)
+#define G_PIPE0_RX6_VALID_16(x) (((x) >> S_PIPE0_RX6_VALID_16) & M_PIPE0_RX6_VALID_16)
+
+#define S_PIPE0_RX5_VALID_16    8
+#define M_PIPE0_RX5_VALID_16    0xffU
+#define V_PIPE0_RX5_VALID_16(x) ((x) << S_PIPE0_RX5_VALID_16)
+#define G_PIPE0_RX5_VALID_16(x) (((x) >> S_PIPE0_RX5_VALID_16) & M_PIPE0_RX5_VALID_16)
+
+#define S_PIPE0_RX4_VALID_16    7
+#define V_PIPE0_RX4_VALID_16(x) ((x) << S_PIPE0_RX4_VALID_16)
+#define F_PIPE0_RX4_VALID_16    V_PIPE0_RX4_VALID_16(1U)
+
+#define S_PIPE0_RX4_VALID2_16    0
+#define M_PIPE0_RX4_VALID2_16    0x7fU
+#define V_PIPE0_RX4_VALID2_16(x) ((x) << S_PIPE0_RX4_VALID2_16)
+#define G_PIPE0_RX4_VALID2_16(x) (((x) >> S_PIPE0_RX4_VALID2_16) & M_PIPE0_RX4_VALID2_16)
+
+#define A_PCIE_PDEBUG_REG_0X17 0x17
+
+#define S_PIPE0_RX7_VALID_17    31
+#define V_PIPE0_RX7_VALID_17(x) ((x) << S_PIPE0_RX7_VALID_17)
+#define F_PIPE0_RX7_VALID_17    V_PIPE0_RX7_VALID_17(1U)
+
+#define S_PIPE0_RX7_VALID2_17    24
+#define M_PIPE0_RX7_VALID2_17    0x7fU
+#define V_PIPE0_RX7_VALID2_17(x) ((x) << S_PIPE0_RX7_VALID2_17)
+#define G_PIPE0_RX7_VALID2_17(x) (((x) >> S_PIPE0_RX7_VALID2_17) & M_PIPE0_RX7_VALID2_17)
+
+#define S_PIPE0_RX6_VALID_17    16
+#define M_PIPE0_RX6_VALID_17    0xffU
+#define V_PIPE0_RX6_VALID_17(x) ((x) << S_PIPE0_RX6_VALID_17)
+#define G_PIPE0_RX6_VALID_17(x) (((x) >> S_PIPE0_RX6_VALID_17) & M_PIPE0_RX6_VALID_17)
+
+#define S_PIPE0_RX5_VALID_17    8
+#define M_PIPE0_RX5_VALID_17    0xffU
+#define V_PIPE0_RX5_VALID_17(x) ((x) << S_PIPE0_RX5_VALID_17)
+#define G_PIPE0_RX5_VALID_17(x) (((x) >> S_PIPE0_RX5_VALID_17) & M_PIPE0_RX5_VALID_17)
+
+#define S_PIPE0_RX4_VALID_17    7
+#define V_PIPE0_RX4_VALID_17(x) ((x) << S_PIPE0_RX4_VALID_17)
+#define F_PIPE0_RX4_VALID_17    V_PIPE0_RX4_VALID_17(1U)
+
+#define S_PIPE0_RX4_VALID2_17    0
+#define M_PIPE0_RX4_VALID2_17    0x7fU
+#define V_PIPE0_RX4_VALID2_17(x) ((x) << S_PIPE0_RX4_VALID2_17)
+#define G_PIPE0_RX4_VALID2_17(x) (((x) >> S_PIPE0_RX4_VALID2_17) & M_PIPE0_RX4_VALID2_17)
+
+#define A_PCIE_PDEBUG_REG_0X18 0x18
+
+#define S_PIPE0_RX7_POLARITY    31
+#define V_PIPE0_RX7_POLARITY(x) ((x) << S_PIPE0_RX7_POLARITY)
+#define F_PIPE0_RX7_POLARITY    V_PIPE0_RX7_POLARITY(1U)
+
+#define S_PIPE0_RX7_STATUS    28
+#define M_PIPE0_RX7_STATUS    0x7U
+#define V_PIPE0_RX7_STATUS(x) ((x) << S_PIPE0_RX7_STATUS)
+#define G_PIPE0_RX7_STATUS(x) (((x) >> S_PIPE0_RX7_STATUS) & M_PIPE0_RX7_STATUS)
+
+#define S_PIPE0_RX6_POLARITY    27
+#define V_PIPE0_RX6_POLARITY(x) ((x) << S_PIPE0_RX6_POLARITY)
+#define F_PIPE0_RX6_POLARITY    V_PIPE0_RX6_POLARITY(1U)
+
+#define S_PIPE0_RX6_STATUS    24
+#define M_PIPE0_RX6_STATUS    0x7U
+#define V_PIPE0_RX6_STATUS(x) ((x) << S_PIPE0_RX6_STATUS)
+#define G_PIPE0_RX6_STATUS(x) (((x) >> S_PIPE0_RX6_STATUS) & M_PIPE0_RX6_STATUS)
+
+#define S_PIPE0_RX5_POLARITY    23
+#define V_PIPE0_RX5_POLARITY(x) ((x) << S_PIPE0_RX5_POLARITY)
+#define F_PIPE0_RX5_POLARITY    V_PIPE0_RX5_POLARITY(1U)
+
+#define S_PIPE0_RX5_STATUS    20
+#define M_PIPE0_RX5_STATUS    0x7U
+#define V_PIPE0_RX5_STATUS(x) ((x) << S_PIPE0_RX5_STATUS)
+#define G_PIPE0_RX5_STATUS(x) (((x) >> S_PIPE0_RX5_STATUS) & M_PIPE0_RX5_STATUS)
+
+#define S_PIPE0_RX4_POLARITY    19
+#define V_PIPE0_RX4_POLARITY(x) ((x) << S_PIPE0_RX4_POLARITY)
+#define F_PIPE0_RX4_POLARITY    V_PIPE0_RX4_POLARITY(1U)
+
+#define S_PIPE0_RX4_STATUS    16
+#define M_PIPE0_RX4_STATUS    0x7U
+#define V_PIPE0_RX4_STATUS(x) ((x) << S_PIPE0_RX4_STATUS)
+#define G_PIPE0_RX4_STATUS(x) (((x) >> S_PIPE0_RX4_STATUS) & M_PIPE0_RX4_STATUS)
+
+#define S_PIPE0_RX3_POLARITY    15
+#define V_PIPE0_RX3_POLARITY(x) ((x) << S_PIPE0_RX3_POLARITY)
+#define F_PIPE0_RX3_POLARITY    V_PIPE0_RX3_POLARITY(1U)
+
+#define S_PIPE0_RX3_STATUS    12
+#define M_PIPE0_RX3_STATUS    0x7U
+#define V_PIPE0_RX3_STATUS(x) ((x) << S_PIPE0_RX3_STATUS)
+#define G_PIPE0_RX3_STATUS(x) (((x) >> S_PIPE0_RX3_STATUS) & M_PIPE0_RX3_STATUS)
+
+#define S_PIPE0_RX2_POLARITY    11
+#define V_PIPE0_RX2_POLARITY(x) ((x) << S_PIPE0_RX2_POLARITY)
+#define F_PIPE0_RX2_POLARITY    V_PIPE0_RX2_POLARITY(1U)
+
+#define S_PIPE0_RX2_STATUS    8
+#define M_PIPE0_RX2_STATUS    0x7U
+#define V_PIPE0_RX2_STATUS(x) ((x) << S_PIPE0_RX2_STATUS)
+#define G_PIPE0_RX2_STATUS(x) (((x) >> S_PIPE0_RX2_STATUS) & M_PIPE0_RX2_STATUS)
+
+#define S_PIPE0_RX1_POLARITY    7
+#define V_PIPE0_RX1_POLARITY(x) ((x) << S_PIPE0_RX1_POLARITY)
+#define F_PIPE0_RX1_POLARITY    V_PIPE0_RX1_POLARITY(1U)
+
+#define S_PIPE0_RX1_STATUS    4
+#define M_PIPE0_RX1_STATUS    0x7U
+#define V_PIPE0_RX1_STATUS(x) ((x) << S_PIPE0_RX1_STATUS)
+#define G_PIPE0_RX1_STATUS(x) (((x) >> S_PIPE0_RX1_STATUS) & M_PIPE0_RX1_STATUS)
+
+#define S_PIPE0_RX0_POLARITY    3
+#define V_PIPE0_RX0_POLARITY(x) ((x) << S_PIPE0_RX0_POLARITY)
+#define F_PIPE0_RX0_POLARITY    V_PIPE0_RX0_POLARITY(1U)
+
+#define S_PIPE0_RX0_STATUS    0
+#define M_PIPE0_RX0_STATUS    0x7U
+#define V_PIPE0_RX0_STATUS(x) ((x) << S_PIPE0_RX0_STATUS)
+#define G_PIPE0_RX0_STATUS(x) (((x) >> S_PIPE0_RX0_STATUS) & M_PIPE0_RX0_STATUS)
+
+#define A_PCIE_PDEBUG_REG_0X19 0x19
+
+#define S_PIPE0_TX7_COMPLIANCE    31
+#define V_PIPE0_TX7_COMPLIANCE(x) ((x) << S_PIPE0_TX7_COMPLIANCE)
+#define F_PIPE0_TX7_COMPLIANCE    V_PIPE0_TX7_COMPLIANCE(1U)
+
+#define S_PIPE0_TX6_COMPLIANCE    30
+#define V_PIPE0_TX6_COMPLIANCE(x) ((x) << S_PIPE0_TX6_COMPLIANCE)
+#define F_PIPE0_TX6_COMPLIANCE    V_PIPE0_TX6_COMPLIANCE(1U)
+
+#define S_PIPE0_TX5_COMPLIANCE    29
+#define V_PIPE0_TX5_COMPLIANCE(x) ((x) << S_PIPE0_TX5_COMPLIANCE)
+#define F_PIPE0_TX5_COMPLIANCE    V_PIPE0_TX5_COMPLIANCE(1U)
+
+#define S_PIPE0_TX4_COMPLIANCE    28
+#define V_PIPE0_TX4_COMPLIANCE(x) ((x) << S_PIPE0_TX4_COMPLIANCE)
+#define F_PIPE0_TX4_COMPLIANCE    V_PIPE0_TX4_COMPLIANCE(1U)
+
+#define S_PIPE0_TX3_COMPLIANCE    27
+#define V_PIPE0_TX3_COMPLIANCE(x) ((x) << S_PIPE0_TX3_COMPLIANCE)
+#define F_PIPE0_TX3_COMPLIANCE    V_PIPE0_TX3_COMPLIANCE(1U)
+
+#define S_PIPE0_TX2_COMPLIANCE    26
+#define V_PIPE0_TX2_COMPLIANCE(x) ((x) << S_PIPE0_TX2_COMPLIANCE)
+#define F_PIPE0_TX2_COMPLIANCE    V_PIPE0_TX2_COMPLIANCE(1U)
+
+#define S_PIPE0_TX1_COMPLIANCE    25
+#define V_PIPE0_TX1_COMPLIANCE(x) ((x) << S_PIPE0_TX1_COMPLIANCE)
+#define F_PIPE0_TX1_COMPLIANCE    V_PIPE0_TX1_COMPLIANCE(1U)
+
+#define S_PIPE0_TX0_COMPLIANCE    24
+#define V_PIPE0_TX0_COMPLIANCE(x) ((x) << S_PIPE0_TX0_COMPLIANCE)
+#define F_PIPE0_TX0_COMPLIANCE    V_PIPE0_TX0_COMPLIANCE(1U)
+
+#define S_PIPE0_TX7_ELECIDLE    23
+#define V_PIPE0_TX7_ELECIDLE(x) ((x) << S_PIPE0_TX7_ELECIDLE)
+#define F_PIPE0_TX7_ELECIDLE    V_PIPE0_TX7_ELECIDLE(1U)
+
+#define S_PIPE0_TX6_ELECIDLE    22
+#define V_PIPE0_TX6_ELECIDLE(x) ((x) << S_PIPE0_TX6_ELECIDLE)
+#define F_PIPE0_TX6_ELECIDLE    V_PIPE0_TX6_ELECIDLE(1U)
+
+#define S_PIPE0_TX5_ELECIDLE    21
+#define V_PIPE0_TX5_ELECIDLE(x) ((x) << S_PIPE0_TX5_ELECIDLE)
+#define F_PIPE0_TX5_ELECIDLE    V_PIPE0_TX5_ELECIDLE(1U)
+
+#define S_PIPE0_TX4_ELECIDLE    20
+#define V_PIPE0_TX4_ELECIDLE(x) ((x) << S_PIPE0_TX4_ELECIDLE)
+#define F_PIPE0_TX4_ELECIDLE    V_PIPE0_TX4_ELECIDLE(1U)
+
+#define S_PIPE0_TX3_ELECIDLE    19
+#define V_PIPE0_TX3_ELECIDLE(x) ((x) << S_PIPE0_TX3_ELECIDLE)
+#define F_PIPE0_TX3_ELECIDLE    V_PIPE0_TX3_ELECIDLE(1U)
+
+#define S_PIPE0_TX2_ELECIDLE    18
+#define V_PIPE0_TX2_ELECIDLE(x) ((x) << S_PIPE0_TX2_ELECIDLE)
+#define F_PIPE0_TX2_ELECIDLE    V_PIPE0_TX2_ELECIDLE(1U)
+
+#define S_PIPE0_TX1_ELECIDLE    17
+#define V_PIPE0_TX1_ELECIDLE(x) ((x) << S_PIPE0_TX1_ELECIDLE)
+#define F_PIPE0_TX1_ELECIDLE    V_PIPE0_TX1_ELECIDLE(1U)
+
+#define S_PIPE0_TX0_ELECIDLE    16
+#define V_PIPE0_TX0_ELECIDLE(x) ((x) << S_PIPE0_TX0_ELECIDLE)
+#define F_PIPE0_TX0_ELECIDLE    V_PIPE0_TX0_ELECIDLE(1U)
+
+#define S_PIPE0_RX7_POLARITY_19    15
+#define V_PIPE0_RX7_POLARITY_19(x) ((x) << S_PIPE0_RX7_POLARITY_19)
+#define F_PIPE0_RX7_POLARITY_19    V_PIPE0_RX7_POLARITY_19(1U)
+
+#define S_PIPE0_RX6_POLARITY_19    14
+#define V_PIPE0_RX6_POLARITY_19(x) ((x) << S_PIPE0_RX6_POLARITY_19)
+#define F_PIPE0_RX6_POLARITY_19    V_PIPE0_RX6_POLARITY_19(1U)
+
+#define S_PIPE0_RX5_POLARITY_19    13
+#define V_PIPE0_RX5_POLARITY_19(x) ((x) << S_PIPE0_RX5_POLARITY_19)
+#define F_PIPE0_RX5_POLARITY_19    V_PIPE0_RX5_POLARITY_19(1U)
+
+#define S_PIPE0_RX4_POLARITY_19    12
+#define V_PIPE0_RX4_POLARITY_19(x) ((x) << S_PIPE0_RX4_POLARITY_19)
+#define F_PIPE0_RX4_POLARITY_19    V_PIPE0_RX4_POLARITY_19(1U)
+
+#define S_PIPE0_RX3_POLARITY_19    11
+#define V_PIPE0_RX3_POLARITY_19(x) ((x) << S_PIPE0_RX3_POLARITY_19)
+#define F_PIPE0_RX3_POLARITY_19    V_PIPE0_RX3_POLARITY_19(1U)
+
+#define S_PIPE0_RX2_POLARITY_19    10
+#define V_PIPE0_RX2_POLARITY_19(x) ((x) << S_PIPE0_RX2_POLARITY_19)
+#define F_PIPE0_RX2_POLARITY_19    V_PIPE0_RX2_POLARITY_19(1U)
+
+#define S_PIPE0_RX1_POLARITY_19    9
+#define V_PIPE0_RX1_POLARITY_19(x) ((x) << S_PIPE0_RX1_POLARITY_19)
+#define F_PIPE0_RX1_POLARITY_19    V_PIPE0_RX1_POLARITY_19(1U)
+
+#define S_PIPE0_RX0_POLARITY_19    8
+#define V_PIPE0_RX0_POLARITY_19(x) ((x) << S_PIPE0_RX0_POLARITY_19)
+#define F_PIPE0_RX0_POLARITY_19    V_PIPE0_RX0_POLARITY_19(1U)
+
+#define S_PIPE0_RX7_ELECIDLE    7
+#define V_PIPE0_RX7_ELECIDLE(x) ((x) << S_PIPE0_RX7_ELECIDLE)
+#define F_PIPE0_RX7_ELECIDLE    V_PIPE0_RX7_ELECIDLE(1U)
+
+#define S_PIPE0_RX6_ELECIDLE    6
+#define V_PIPE0_RX6_ELECIDLE(x) ((x) << S_PIPE0_RX6_ELECIDLE)
+#define F_PIPE0_RX6_ELECIDLE    V_PIPE0_RX6_ELECIDLE(1U)
+
+#define S_PIPE0_RX5_ELECIDLE    5
+#define V_PIPE0_RX5_ELECIDLE(x) ((x) << S_PIPE0_RX5_ELECIDLE)
+#define F_PIPE0_RX5_ELECIDLE    V_PIPE0_RX5_ELECIDLE(1U)
+
+#define S_PIPE0_RX4_ELECIDLE    4
+#define V_PIPE0_RX4_ELECIDLE(x) ((x) << S_PIPE0_RX4_ELECIDLE)
+#define F_PIPE0_RX4_ELECIDLE    V_PIPE0_RX4_ELECIDLE(1U)
+
+#define S_PIPE0_RX3_ELECIDLE    3
+#define V_PIPE0_RX3_ELECIDLE(x) ((x) << S_PIPE0_RX3_ELECIDLE)
+#define F_PIPE0_RX3_ELECIDLE    V_PIPE0_RX3_ELECIDLE(1U)
+
+#define S_PIPE0_RX2_ELECIDLE    2
+#define V_PIPE0_RX2_ELECIDLE(x) ((x) << S_PIPE0_RX2_ELECIDLE)
+#define F_PIPE0_RX2_ELECIDLE    V_PIPE0_RX2_ELECIDLE(1U)
+
+#define S_PIPE0_RX1_ELECIDLE    1
+#define V_PIPE0_RX1_ELECIDLE(x) ((x) << S_PIPE0_RX1_ELECIDLE)
+#define F_PIPE0_RX1_ELECIDLE    V_PIPE0_RX1_ELECIDLE(1U)
+
+#define S_PIPE0_RX0_ELECIDLE    0
+#define V_PIPE0_RX0_ELECIDLE(x) ((x) << S_PIPE0_RX0_ELECIDLE)
+#define F_PIPE0_RX0_ELECIDLE    V_PIPE0_RX0_ELECIDLE(1U)
+
+#define A_PCIE_PDEBUG_REG_0X1A 0x1a
+
+#define S_PIPE0_RESET_N    21
+#define V_PIPE0_RESET_N(x) ((x) << S_PIPE0_RESET_N)
+#define F_PIPE0_RESET_N    V_PIPE0_RESET_N(1U)
+
+#define S_PCS_COMMON_CLOCKS    20
+#define V_PCS_COMMON_CLOCKS(x) ((x) << S_PCS_COMMON_CLOCKS)
+#define F_PCS_COMMON_CLOCKS    V_PCS_COMMON_CLOCKS(1U)
+
+#define S_PCS_CLK_REQ    19
+#define V_PCS_CLK_REQ(x) ((x) << S_PCS_CLK_REQ)
+#define F_PCS_CLK_REQ    V_PCS_CLK_REQ(1U)
+
+#define S_PIPE_CLKREQ_N    18
+#define V_PIPE_CLKREQ_N(x) ((x) << S_PIPE_CLKREQ_N)
+#define F_PIPE_CLKREQ_N    V_PIPE_CLKREQ_N(1U)
+
+#define S_MAC_CLKREQ_N_TO_MUX    17
+#define V_MAC_CLKREQ_N_TO_MUX(x) ((x) << S_MAC_CLKREQ_N_TO_MUX)
+#define F_MAC_CLKREQ_N_TO_MUX    V_MAC_CLKREQ_N_TO_MUX(1U)
+
+#define S_PIPE0_TX2RX_LOOPBK    16
+#define V_PIPE0_TX2RX_LOOPBK(x) ((x) << S_PIPE0_TX2RX_LOOPBK)
+#define F_PIPE0_TX2RX_LOOPBK    V_PIPE0_TX2RX_LOOPBK(1U)
+
+#define S_PIPE0_TX_SWING    15
+#define V_PIPE0_TX_SWING(x) ((x) << S_PIPE0_TX_SWING)
+#define F_PIPE0_TX_SWING    V_PIPE0_TX_SWING(1U)
+
+#define S_PIPE0_TX_MARGIN    12
+#define M_PIPE0_TX_MARGIN    0x7U
+#define V_PIPE0_TX_MARGIN(x) ((x) << S_PIPE0_TX_MARGIN)
+#define G_PIPE0_TX_MARGIN(x) (((x) >> S_PIPE0_TX_MARGIN) & M_PIPE0_TX_MARGIN)
+
+#define S_PIPE0_TX_DEEMPH    11
+#define V_PIPE0_TX_DEEMPH(x) ((x) << S_PIPE0_TX_DEEMPH)
+#define F_PIPE0_TX_DEEMPH    V_PIPE0_TX_DEEMPH(1U)
+
+#define S_PIPE0_TX_DETECTRX    10
+#define V_PIPE0_TX_DETECTRX(x) ((x) << S_PIPE0_TX_DETECTRX)
+#define F_PIPE0_TX_DETECTRX    V_PIPE0_TX_DETECTRX(1U)
+
+#define S_PIPE0_POWERDOWN    8
+#define M_PIPE0_POWERDOWN    0x3U
+#define V_PIPE0_POWERDOWN(x) ((x) << S_PIPE0_POWERDOWN)
+#define G_PIPE0_POWERDOWN(x) (((x) >> S_PIPE0_POWERDOWN) & M_PIPE0_POWERDOWN)
+
+#define S_PHY_MAC_PHYSTATUS    0
+#define M_PHY_MAC_PHYSTATUS    0xffU
+#define V_PHY_MAC_PHYSTATUS(x) ((x) << S_PHY_MAC_PHYSTATUS)
+#define G_PHY_MAC_PHYSTATUS(x) (((x) >> S_PHY_MAC_PHYSTATUS) & M_PHY_MAC_PHYSTATUS)
+
+#define A_PCIE_PDEBUG_REG_0X1B 0x1b
+
+#define S_PIPE0_RX7_EQ_IN_PROG    31
+#define V_PIPE0_RX7_EQ_IN_PROG(x) ((x) << S_PIPE0_RX7_EQ_IN_PROG)
+#define F_PIPE0_RX7_EQ_IN_PROG    V_PIPE0_RX7_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX7_EQ_INVLD_REQ    30
+#define V_PIPE0_RX7_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX7_EQ_INVLD_REQ)
+#define F_PIPE0_RX7_EQ_INVLD_REQ    V_PIPE0_RX7_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX7_SYNCHEADER    28
+#define M_PIPE0_RX7_SYNCHEADER    0x3U
+#define V_PIPE0_RX7_SYNCHEADER(x) ((x) << S_PIPE0_RX7_SYNCHEADER)
+#define G_PIPE0_RX7_SYNCHEADER(x) (((x) >> S_PIPE0_RX7_SYNCHEADER) & M_PIPE0_RX7_SYNCHEADER)
+
+#define S_PIPE0_RX6_EQ_IN_PROG    27
+#define V_PIPE0_RX6_EQ_IN_PROG(x) ((x) << S_PIPE0_RX6_EQ_IN_PROG)
+#define F_PIPE0_RX6_EQ_IN_PROG    V_PIPE0_RX6_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX6_EQ_INVLD_REQ    26
+#define V_PIPE0_RX6_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX6_EQ_INVLD_REQ)
+#define F_PIPE0_RX6_EQ_INVLD_REQ    V_PIPE0_RX6_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX6_SYNCHEADER    24
+#define M_PIPE0_RX6_SYNCHEADER    0x3U
+#define V_PIPE0_RX6_SYNCHEADER(x) ((x) << S_PIPE0_RX6_SYNCHEADER)
+#define G_PIPE0_RX6_SYNCHEADER(x) (((x) >> S_PIPE0_RX6_SYNCHEADER) & M_PIPE0_RX6_SYNCHEADER)
+
+#define S_PIPE0_RX5_EQ_IN_PROG    23
+#define V_PIPE0_RX5_EQ_IN_PROG(x) ((x) << S_PIPE0_RX5_EQ_IN_PROG)
+#define F_PIPE0_RX5_EQ_IN_PROG    V_PIPE0_RX5_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX5_EQ_INVLD_REQ    22
+#define V_PIPE0_RX5_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX5_EQ_INVLD_REQ)
+#define F_PIPE0_RX5_EQ_INVLD_REQ    V_PIPE0_RX5_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX5_SYNCHEADER    20
+#define M_PIPE0_RX5_SYNCHEADER    0x3U
+#define V_PIPE0_RX5_SYNCHEADER(x) ((x) << S_PIPE0_RX5_SYNCHEADER)
+#define G_PIPE0_RX5_SYNCHEADER(x) (((x) >> S_PIPE0_RX5_SYNCHEADER) & M_PIPE0_RX5_SYNCHEADER)
+
+#define S_PIPE0_RX4_EQ_IN_PROG    19
+#define V_PIPE0_RX4_EQ_IN_PROG(x) ((x) << S_PIPE0_RX4_EQ_IN_PROG)
+#define F_PIPE0_RX4_EQ_IN_PROG    V_PIPE0_RX4_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX4_EQ_INVLD_REQ    18
+#define V_PIPE0_RX4_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX4_EQ_INVLD_REQ)
+#define F_PIPE0_RX4_EQ_INVLD_REQ    V_PIPE0_RX4_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX4_SYNCHEADER    16
+#define M_PIPE0_RX4_SYNCHEADER    0x3U
+#define V_PIPE0_RX4_SYNCHEADER(x) ((x) << S_PIPE0_RX4_SYNCHEADER)
+#define G_PIPE0_RX4_SYNCHEADER(x) (((x) >> S_PIPE0_RX4_SYNCHEADER) & M_PIPE0_RX4_SYNCHEADER)
+
+#define S_PIPE0_RX3_EQ_IN_PROG    15
+#define V_PIPE0_RX3_EQ_IN_PROG(x) ((x) << S_PIPE0_RX3_EQ_IN_PROG)
+#define F_PIPE0_RX3_EQ_IN_PROG    V_PIPE0_RX3_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX3_EQ_INVLD_REQ    14
+#define V_PIPE0_RX3_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX3_EQ_INVLD_REQ)
+#define F_PIPE0_RX3_EQ_INVLD_REQ    V_PIPE0_RX3_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX3_SYNCHEADER    12
+#define M_PIPE0_RX3_SYNCHEADER    0x3U
+#define V_PIPE0_RX3_SYNCHEADER(x) ((x) << S_PIPE0_RX3_SYNCHEADER)
+#define G_PIPE0_RX3_SYNCHEADER(x) (((x) >> S_PIPE0_RX3_SYNCHEADER) & M_PIPE0_RX3_SYNCHEADER)
+
+#define S_PIPE0_RX2_EQ_IN_PROG    11
+#define V_PIPE0_RX2_EQ_IN_PROG(x) ((x) << S_PIPE0_RX2_EQ_IN_PROG)
+#define F_PIPE0_RX2_EQ_IN_PROG    V_PIPE0_RX2_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX2_EQ_INVLD_REQ    10
+#define V_PIPE0_RX2_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX2_EQ_INVLD_REQ)
+#define F_PIPE0_RX2_EQ_INVLD_REQ    V_PIPE0_RX2_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX2_SYNCHEADER    8
+#define M_PIPE0_RX2_SYNCHEADER    0x3U
+#define V_PIPE0_RX2_SYNCHEADER(x) ((x) << S_PIPE0_RX2_SYNCHEADER)
+#define G_PIPE0_RX2_SYNCHEADER(x) (((x) >> S_PIPE0_RX2_SYNCHEADER) & M_PIPE0_RX2_SYNCHEADER)
+
+#define S_PIPE0_RX1_EQ_IN_PROG    7
+#define V_PIPE0_RX1_EQ_IN_PROG(x) ((x) << S_PIPE0_RX1_EQ_IN_PROG)
+#define F_PIPE0_RX1_EQ_IN_PROG    V_PIPE0_RX1_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX1_EQ_INVLD_REQ    6
+#define V_PIPE0_RX1_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX1_EQ_INVLD_REQ)
+#define F_PIPE0_RX1_EQ_INVLD_REQ    V_PIPE0_RX1_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX1_SYNCHEADER    4
+#define M_PIPE0_RX1_SYNCHEADER    0x3U
+#define V_PIPE0_RX1_SYNCHEADER(x) ((x) << S_PIPE0_RX1_SYNCHEADER)
+#define G_PIPE0_RX1_SYNCHEADER(x) (((x) >> S_PIPE0_RX1_SYNCHEADER) & M_PIPE0_RX1_SYNCHEADER)
+
+#define S_PIPE0_RX0_EQ_IN_PROG    3
+#define V_PIPE0_RX0_EQ_IN_PROG(x) ((x) << S_PIPE0_RX0_EQ_IN_PROG)
+#define F_PIPE0_RX0_EQ_IN_PROG    V_PIPE0_RX0_EQ_IN_PROG(1U)
+
+#define S_PIPE0_RX0_EQ_INVLD_REQ    2
+#define V_PIPE0_RX0_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX0_EQ_INVLD_REQ)
+#define F_PIPE0_RX0_EQ_INVLD_REQ    V_PIPE0_RX0_EQ_INVLD_REQ(1U)
+
+#define S_PIPE0_RX0_SYNCHEADER    0
+#define M_PIPE0_RX0_SYNCHEADER    0x3U
+#define V_PIPE0_RX0_SYNCHEADER(x) ((x) << S_PIPE0_RX0_SYNCHEADER)
+#define G_PIPE0_RX0_SYNCHEADER(x) (((x) >> S_PIPE0_RX0_SYNCHEADER) & M_PIPE0_RX0_SYNCHEADER)
+
+#define A_PCIE_PDEBUG_REG_0X1C 0x1c
+
+#define S_SI_REQVFID    24
+#define M_SI_REQVFID    0xffU
+#define V_SI_REQVFID(x) ((x) << S_SI_REQVFID)
+#define G_SI_REQVFID(x) (((x) >> S_SI_REQVFID) & M_SI_REQVFID)
+
+#define S_SI_REQVEC    13
+#define M_SI_REQVEC    0x7ffU
+#define V_SI_REQVEC(x) ((x) << S_SI_REQVEC)
+#define G_SI_REQVEC(x) (((x) >> S_SI_REQVEC) & M_SI_REQVEC)
+
+#define S_SI_REQTCVAL    10
+#define M_SI_REQTCVAL    0x7U
+#define V_SI_REQTCVAL(x) ((x) << S_SI_REQTCVAL)
+#define G_SI_REQTCVAL(x) (((x) >> S_SI_REQTCVAL) & M_SI_REQTCVAL)
+
+#define S_SI_REQRDY    9
+#define V_SI_REQRDY(x) ((x) << S_SI_REQRDY)
+#define F_SI_REQRDY    V_SI_REQRDY(1U)
+
+#define S_SI_REQVLD    8
+#define V_SI_REQVLD(x) ((x) << S_SI_REQVLD)
+#define F_SI_REQVLD    V_SI_REQVLD(1U)
+
+#define S_T5_AI    0
+#define M_T5_AI    0xffU
+#define V_T5_AI(x) ((x) << S_T5_AI)
+#define G_T5_AI(x) (((x) >> S_T5_AI) & M_T5_AI)
+
+#define A_PCIE_PDEBUG_REG_0X1D 0x1d
+
+#define S_GNTSI    31
+#define V_GNTSI(x) ((x) << S_GNTSI)
+#define F_GNTSI    V_GNTSI(1U)
+
+#define S_DROPINTFORFLR    30
+#define V_DROPINTFORFLR(x) ((x) << S_DROPINTFORFLR)
+#define F_DROPINTFORFLR    V_DROPINTFORFLR(1U)
+
+#define S_SMARB    27
+#define M_SMARB    0x7U
+#define V_SMARB(x) ((x) << S_SMARB)
+#define G_SMARB(x) (((x) >> S_SMARB) & M_SMARB)
+
+#define S_SMDEFR    24
+#define M_SMDEFR    0x7U
+#define V_SMDEFR(x) ((x) << S_SMDEFR)
+#define G_SMDEFR(x) (((x) >> S_SMDEFR) & M_SMDEFR)
+
+#define S_SYS_INT    16
+#define M_SYS_INT    0xffU
+#define V_SYS_INT(x) ((x) << S_SYS_INT)
+#define G_SYS_INT(x) (((x) >> S_SYS_INT) & M_SYS_INT)
+
+#define S_CFG_INTXCLR    8
+#define M_CFG_INTXCLR    0xffU
+#define V_CFG_INTXCLR(x) ((x) << S_CFG_INTXCLR)
+#define G_CFG_INTXCLR(x) (((x) >> S_CFG_INTXCLR) & M_CFG_INTXCLR)
+
+#define S_PIO_INTXCLR    0
+#define M_PIO_INTXCLR    0xffU
+#define V_PIO_INTXCLR(x) ((x) << S_PIO_INTXCLR)
+#define G_PIO_INTXCLR(x) (((x) >> S_PIO_INTXCLR) & M_PIO_INTXCLR)
+
+#define A_PCIE_PDEBUG_REG_0X1E 0x1e
+
+#define S_PLI_TABDATWREN    31
+#define V_PLI_TABDATWREN(x) ((x) << S_PLI_TABDATWREN)
+#define F_PLI_TABDATWREN    V_PLI_TABDATWREN(1U)
+
+#define S_TAB_RDENA    30
+#define V_TAB_RDENA(x) ((x) << S_TAB_RDENA)
+#define F_TAB_RDENA    V_TAB_RDENA(1U)
+
+#define S_TAB_RDENA2    19
+#define M_TAB_RDENA2    0x7ffU
+#define V_TAB_RDENA2(x) ((x) << S_TAB_RDENA2)
+#define G_TAB_RDENA2(x) (((x) >> S_TAB_RDENA2) & M_TAB_RDENA2)
+
+#define S_PLI_REQADDR    10
+#define M_PLI_REQADDR    0x1ffU
+#define V_PLI_REQADDR(x) ((x) << S_PLI_REQADDR)
+#define G_PLI_REQADDR(x) (((x) >> S_PLI_REQADDR) & M_PLI_REQADDR)
+
+#define S_PLI_REQVFID    2
+#define M_PLI_REQVFID    0xffU
+#define V_PLI_REQVFID(x) ((x) << S_PLI_REQVFID)
+#define G_PLI_REQVFID(x) (((x) >> S_PLI_REQVFID) & M_PLI_REQVFID)
+
+#define S_PLI_REQTABHIT    1
+#define V_PLI_REQTABHIT(x) ((x) << S_PLI_REQTABHIT)
+#define F_PLI_REQTABHIT    V_PLI_REQTABHIT(1U)
+
+#define S_PLI_REQRDVLD    0
+#define V_PLI_REQRDVLD(x) ((x) << S_PLI_REQRDVLD)
+#define F_PLI_REQRDVLD    V_PLI_REQRDVLD(1U)
+
+#define A_PCIE_PDEBUG_REG_0X1F 0x1f
+#define A_PCIE_PDEBUG_REG_0X20 0x20
+#define A_PCIE_PDEBUG_REG_0X21 0x21
+
+#define S_PLI_REQPBASTART    20
+#define M_PLI_REQPBASTART    0xfffU
+#define V_PLI_REQPBASTART(x) ((x) << S_PLI_REQPBASTART)
+#define G_PLI_REQPBASTART(x) (((x) >> S_PLI_REQPBASTART) & M_PLI_REQPBASTART)
+
+#define S_PLI_REQPBAEND    9
+#define M_PLI_REQPBAEND    0x7ffU
+#define V_PLI_REQPBAEND(x) ((x) << S_PLI_REQPBAEND)
+#define G_PLI_REQPBAEND(x) (((x) >> S_PLI_REQPBAEND) & M_PLI_REQPBAEND)
+
+#define S_T5_PLI_REQVFID    2
+#define M_T5_PLI_REQVFID    0x7fU
+#define V_T5_PLI_REQVFID(x) ((x) << S_T5_PLI_REQVFID)
+#define G_T5_PLI_REQVFID(x) (((x) >> S_T5_PLI_REQVFID) & M_T5_PLI_REQVFID)
+
+#define S_PLI_REQPBAHIT    1
+#define V_PLI_REQPBAHIT(x) ((x) << S_PLI_REQPBAHIT)
+#define F_PLI_REQPBAHIT    V_PLI_REQPBAHIT(1U)
+
+#define A_PCIE_PDEBUG_REG_0X22 0x22
+
+#define S_GNTSI1    31
+#define V_GNTSI1(x) ((x) << S_GNTSI1)
+#define F_GNTSI1    V_GNTSI1(1U)
+
+#define S_GNTSI2    30
+#define V_GNTSI2(x) ((x) << S_GNTSI2)
+#define F_GNTSI2    V_GNTSI2(1U)
+
+#define S_GNTSI3    27
+#define M_GNTSI3    0x7U
+#define V_GNTSI3(x) ((x) << S_GNTSI3)
+#define G_GNTSI3(x) (((x) >> S_GNTSI3) & M_GNTSI3)
+
+#define S_GNTSI4    16
+#define M_GNTSI4    0x7ffU
+#define V_GNTSI4(x) ((x) << S_GNTSI4)
+#define G_GNTSI4(x) (((x) >> S_GNTSI4) & M_GNTSI4)
+
+#define S_GNTSI5    8
+#define M_GNTSI5    0xffU
+#define V_GNTSI5(x) ((x) << S_GNTSI5)
+#define G_GNTSI5(x) (((x) >> S_GNTSI5) & M_GNTSI5)
+
+#define S_GNTSI6    7
+#define V_GNTSI6(x) ((x) << S_GNTSI6)
+#define F_GNTSI6    V_GNTSI6(1U)
+
+#define S_GNTSI7    6
+#define V_GNTSI7(x) ((x) << S_GNTSI7)
+#define F_GNTSI7    V_GNTSI7(1U)
+
+#define S_GNTSI8    5
+#define V_GNTSI8(x) ((x) << S_GNTSI8)
+#define F_GNTSI8    V_GNTSI8(1U)
+
+#define S_GNTSI9    4
+#define V_GNTSI9(x) ((x) << S_GNTSI9)
+#define F_GNTSI9    V_GNTSI9(1U)
+
+#define S_GNTSIA    3
+#define V_GNTSIA(x) ((x) << S_GNTSIA)
+#define F_GNTSIA    V_GNTSIA(1U)
+
+#define S_GNTAI    2
+#define V_GNTAI(x) ((x) << S_GNTAI)
+#define F_GNTAI    V_GNTAI(1U)
+
+#define S_GNTDB    1
+#define V_GNTDB(x) ((x) << S_GNTDB)
+#define F_GNTDB    V_GNTDB(1U)
+
+#define S_GNTDI    0
+#define V_GNTDI(x) ((x) << S_GNTDI)
+#define F_GNTDI    V_GNTDI(1U)
+
+#define A_PCIE_PDEBUG_REG_0X23 0x23
+
+#define S_DI_REQVLD    31
+#define V_DI_REQVLD(x) ((x) << S_DI_REQVLD)
+#define F_DI_REQVLD    V_DI_REQVLD(1U)
+
+#define S_DI_REQRDY    30
+#define V_DI_REQRDY(x) ((x) << S_DI_REQRDY)
+#define F_DI_REQRDY    V_DI_REQRDY(1U)
+
+#define S_DI_REQWREN    19
+#define M_DI_REQWREN    0x7ffU
+#define V_DI_REQWREN(x) ((x) << S_DI_REQWREN)
+#define G_DI_REQWREN(x) (((x) >> S_DI_REQWREN) & M_DI_REQWREN)
+
+#define S_DI_REQMSIEN    18
+#define V_DI_REQMSIEN(x) ((x) << S_DI_REQMSIEN)
+#define F_DI_REQMSIEN    V_DI_REQMSIEN(1U)
+
+#define S_DI_REQMSXEN    17
+#define V_DI_REQMSXEN(x) ((x) << S_DI_REQMSXEN)
+#define F_DI_REQMSXEN    V_DI_REQMSXEN(1U)
+
+#define S_DI_REQMSXVFIDMSK    16
+#define V_DI_REQMSXVFIDMSK(x) ((x) << S_DI_REQMSXVFIDMSK)
+#define F_DI_REQMSXVFIDMSK    V_DI_REQMSXVFIDMSK(1U)
+
+#define S_DI_REQWREN2    2
+#define M_DI_REQWREN2    0x3fffU
+#define V_DI_REQWREN2(x) ((x) << S_DI_REQWREN2)
+#define G_DI_REQWREN2(x) (((x) >> S_DI_REQWREN2) & M_DI_REQWREN2)
+
+#define S_DI_REQRDEN    1
+#define V_DI_REQRDEN(x) ((x) << S_DI_REQRDEN)
+#define F_DI_REQRDEN    V_DI_REQRDEN(1U)
+
+#define S_DI_REQWREN3    0
+#define V_DI_REQWREN3(x) ((x) << S_DI_REQWREN3)
+#define F_DI_REQWREN3    V_DI_REQWREN3(1U)
+
+#define A_PCIE_PDEBUG_REG_0X24 0x24
+#define A_PCIE_PDEBUG_REG_0X25 0x25
+#define A_PCIE_PDEBUG_REG_0X26 0x26
+#define A_PCIE_PDEBUG_REG_0X27 0x27
+
+#define S_FID_STI_RSPVLD    31
+#define V_FID_STI_RSPVLD(x) ((x) << S_FID_STI_RSPVLD)
+#define F_FID_STI_RSPVLD    V_FID_STI_RSPVLD(1U)
+
+#define S_TAB_STIRDENA    30
+#define V_TAB_STIRDENA(x) ((x) << S_TAB_STIRDENA)
+#define F_TAB_STIRDENA    V_TAB_STIRDENA(1U)
+
+#define S_TAB_STIWRENA    29
+#define V_TAB_STIWRENA(x) ((x) << S_TAB_STIWRENA)
+#define F_TAB_STIWRENA    V_TAB_STIWRENA(1U)
+
+#define S_TAB_STIRDENA2    18
+#define M_TAB_STIRDENA2    0x7ffU
+#define V_TAB_STIRDENA2(x) ((x) << S_TAB_STIRDENA2)
+#define G_TAB_STIRDENA2(x) (((x) >> S_TAB_STIRDENA2) & M_TAB_STIRDENA2)
+
+#define S_T5_PLI_REQTABHIT    7
+#define M_T5_PLI_REQTABHIT    0x7ffU
+#define V_T5_PLI_REQTABHIT(x) ((x) << S_T5_PLI_REQTABHIT)
+#define G_T5_PLI_REQTABHIT(x) (((x) >> S_T5_PLI_REQTABHIT) & M_T5_PLI_REQTABHIT)
+
+#define S_T5_GNTSI    0
+#define M_T5_GNTSI    0x7fU
+#define V_T5_GNTSI(x) ((x) << S_T5_GNTSI)
+#define G_T5_GNTSI(x) (((x) >> S_T5_GNTSI) & M_T5_GNTSI)
+
+#define A_PCIE_PDEBUG_REG_0X28 0x28
+
+#define S_PLI_REQWRVLD    31
+#define V_PLI_REQWRVLD(x) ((x) << S_PLI_REQWRVLD)
+#define F_PLI_REQWRVLD    V_PLI_REQWRVLD(1U)
+
+#define S_T5_PLI_REQPBAHIT    30
+#define V_T5_PLI_REQPBAHIT(x) ((x) << S_T5_PLI_REQPBAHIT)
+#define F_T5_PLI_REQPBAHIT    V_T5_PLI_REQPBAHIT(1U)
+
+#define S_PLI_TABADDRLWREN    29
+#define V_PLI_TABADDRLWREN(x) ((x) << S_PLI_TABADDRLWREN)
+#define F_PLI_TABADDRLWREN    V_PLI_TABADDRLWREN(1U)
+
+#define S_PLI_TABADDRHWREN    28
+#define V_PLI_TABADDRHWREN(x) ((x) << S_PLI_TABADDRHWREN)
+#define F_PLI_TABADDRHWREN    V_PLI_TABADDRHWREN(1U)
+
+#define S_T5_PLI_TABDATWREN    27
+#define V_T5_PLI_TABDATWREN(x) ((x) << S_T5_PLI_TABDATWREN)
+#define F_T5_PLI_TABDATWREN    V_T5_PLI_TABDATWREN(1U)
+
+#define S_PLI_TABMSKWREN    26
+#define V_PLI_TABMSKWREN(x) ((x) << S_PLI_TABMSKWREN)
+#define F_PLI_TABMSKWREN    V_PLI_TABMSKWREN(1U)
+
+#define S_AI_REQVLD    23
+#define M_AI_REQVLD    0x7U
+#define V_AI_REQVLD(x) ((x) << S_AI_REQVLD)
+#define G_AI_REQVLD(x) (((x) >> S_AI_REQVLD) & M_AI_REQVLD)
+
+#define S_AI_REQVLD2    22
+#define V_AI_REQVLD2(x) ((x) << S_AI_REQVLD2)
+#define F_AI_REQVLD2    V_AI_REQVLD2(1U)
+
+#define S_AI_REQRDY    21
+#define V_AI_REQRDY(x) ((x) << S_AI_REQRDY)
+#define F_AI_REQRDY    V_AI_REQRDY(1U)
+
+#define S_VEN_MSI_REQ_28    18
+#define M_VEN_MSI_REQ_28    0x7U
+#define V_VEN_MSI_REQ_28(x) ((x) << S_VEN_MSI_REQ_28)
+#define G_VEN_MSI_REQ_28(x) (((x) >> S_VEN_MSI_REQ_28) & M_VEN_MSI_REQ_28)
+
+#define S_VEN_MSI_REQ2    11
+#define M_VEN_MSI_REQ2    0x7fU
+#define V_VEN_MSI_REQ2(x) ((x) << S_VEN_MSI_REQ2)
+#define G_VEN_MSI_REQ2(x) (((x) >> S_VEN_MSI_REQ2) & M_VEN_MSI_REQ2)
+
+#define S_VEN_MSI_REQ3    6
+#define M_VEN_MSI_REQ3    0x1fU
+#define V_VEN_MSI_REQ3(x) ((x) << S_VEN_MSI_REQ3)
+#define G_VEN_MSI_REQ3(x) (((x) >> S_VEN_MSI_REQ3) & M_VEN_MSI_REQ3)
+
+#define S_VEN_MSI_REQ4    3
+#define M_VEN_MSI_REQ4    0x7U
+#define V_VEN_MSI_REQ4(x) ((x) << S_VEN_MSI_REQ4)
+#define G_VEN_MSI_REQ4(x) (((x) >> S_VEN_MSI_REQ4) & M_VEN_MSI_REQ4)
+
+#define S_VEN_MSI_REQ5    2
+#define V_VEN_MSI_REQ5(x) ((x) << S_VEN_MSI_REQ5)
+#define F_VEN_MSI_REQ5    V_VEN_MSI_REQ5(1U)
+
+#define S_VEN_MSI_GRANT    1
+#define V_VEN_MSI_GRANT(x) ((x) << S_VEN_MSI_GRANT)
+#define F_VEN_MSI_GRANT    V_VEN_MSI_GRANT(1U)
+
+#define S_VEN_MSI_REQ6    0
+#define V_VEN_MSI_REQ6(x) ((x) << S_VEN_MSI_REQ6)
+#define F_VEN_MSI_REQ6    V_VEN_MSI_REQ6(1U)
+
+#define A_PCIE_PDEBUG_REG_0X29 0x29
+
+#define S_TRGT1_REQDATAVLD    16
+#define M_TRGT1_REQDATAVLD    0xffffU
+#define V_TRGT1_REQDATAVLD(x) ((x) << S_TRGT1_REQDATAVLD)
+#define G_TRGT1_REQDATAVLD(x) (((x) >> S_TRGT1_REQDATAVLD) & M_TRGT1_REQDATAVLD)
+
+#define S_TRGT1_REQDATAVLD2    12
+#define M_TRGT1_REQDATAVLD2    0xfU
+#define V_TRGT1_REQDATAVLD2(x) ((x) << S_TRGT1_REQDATAVLD2)
+#define G_TRGT1_REQDATAVLD2(x) (((x) >> S_TRGT1_REQDATAVLD2) & M_TRGT1_REQDATAVLD2)
+
+#define S_TRGT1_REQDATAVLD3    11
+#define V_TRGT1_REQDATAVLD3(x) ((x) << S_TRGT1_REQDATAVLD3)
+#define F_TRGT1_REQDATAVLD3    V_TRGT1_REQDATAVLD3(1U)
+
+#define S_TRGT1_REQDATAVLD4    10
+#define V_TRGT1_REQDATAVLD4(x) ((x) << S_TRGT1_REQDATAVLD4)
+#define F_TRGT1_REQDATAVLD4    V_TRGT1_REQDATAVLD4(1U)
+
+#define S_TRGT1_REQDATAVLD5    9
+#define V_TRGT1_REQDATAVLD5(x) ((x) << S_TRGT1_REQDATAVLD5)
+#define F_TRGT1_REQDATAVLD5    V_TRGT1_REQDATAVLD5(1U)
+
+#define S_TRGT1_REQDATAVLD6    8
+#define V_TRGT1_REQDATAVLD6(x) ((x) << S_TRGT1_REQDATAVLD6)
+#define F_TRGT1_REQDATAVLD6    V_TRGT1_REQDATAVLD6(1U)
+
+#define S_TRGT1_REQDATAVLD7    4
+#define M_TRGT1_REQDATAVLD7    0xfU
+#define V_TRGT1_REQDATAVLD7(x) ((x) << S_TRGT1_REQDATAVLD7)
+#define G_TRGT1_REQDATAVLD7(x) (((x) >> S_TRGT1_REQDATAVLD7) & M_TRGT1_REQDATAVLD7)
+
+#define S_TRGT1_REQDATAVLD8    2
+#define M_TRGT1_REQDATAVLD8    0x3U
+#define V_TRGT1_REQDATAVLD8(x) ((x) << S_TRGT1_REQDATAVLD8)
+#define G_TRGT1_REQDATAVLD8(x) (((x) >> S_TRGT1_REQDATAVLD8) & M_TRGT1_REQDATAVLD8)
+
+#define S_TRGT1_REQDATARDY    1
+#define V_TRGT1_REQDATARDY(x) ((x) << S_TRGT1_REQDATARDY)
+#define F_TRGT1_REQDATARDY    V_TRGT1_REQDATARDY(1U)
+
+#define S_TRGT1_REQDATAVLD0    0
+#define V_TRGT1_REQDATAVLD0(x) ((x) << S_TRGT1_REQDATAVLD0)
+#define F_TRGT1_REQDATAVLD0    V_TRGT1_REQDATAVLD0(1U)
+
+#define A_PCIE_PDEBUG_REG_0X2A 0x2a
+#define A_PCIE_PDEBUG_REG_0X2B 0x2b
+
+#define S_RADM_TRGT1_ADDR    20
+#define M_RADM_TRGT1_ADDR    0xfffU
+#define V_RADM_TRGT1_ADDR(x) ((x) << S_RADM_TRGT1_ADDR)
+#define G_RADM_TRGT1_ADDR(x) (((x) >> S_RADM_TRGT1_ADDR) & M_RADM_TRGT1_ADDR)
+
+#define S_RADM_TRGT1_DWEN    16
+#define M_RADM_TRGT1_DWEN    0xfU
+#define V_RADM_TRGT1_DWEN(x) ((x) << S_RADM_TRGT1_DWEN)
+#define G_RADM_TRGT1_DWEN(x) (((x) >> S_RADM_TRGT1_DWEN) & M_RADM_TRGT1_DWEN)
+
+#define S_RADM_TRGT1_FMT    14
+#define M_RADM_TRGT1_FMT    0x3U
+#define V_RADM_TRGT1_FMT(x) ((x) << S_RADM_TRGT1_FMT)
+#define G_RADM_TRGT1_FMT(x) (((x) >> S_RADM_TRGT1_FMT) & M_RADM_TRGT1_FMT)
+
+#define S_RADM_TRGT1_TYPE    9
+#define M_RADM_TRGT1_TYPE    0x1fU
+#define V_RADM_TRGT1_TYPE(x) ((x) << S_RADM_TRGT1_TYPE)
+#define G_RADM_TRGT1_TYPE(x) (((x) >> S_RADM_TRGT1_TYPE) & M_RADM_TRGT1_TYPE)
+
+#define S_RADM_TRGT1_IN_MEMBAR_RANGE    6
+#define M_RADM_TRGT1_IN_MEMBAR_RANGE    0x7U
+#define V_RADM_TRGT1_IN_MEMBAR_RANGE(x) ((x) << S_RADM_TRGT1_IN_MEMBAR_RANGE)
+#define G_RADM_TRGT1_IN_MEMBAR_RANGE(x) (((x) >> S_RADM_TRGT1_IN_MEMBAR_RANGE) & M_RADM_TRGT1_IN_MEMBAR_RANGE)
+
+#define S_RADM_TRGT1_ECRC_ERR    5
+#define V_RADM_TRGT1_ECRC_ERR(x) ((x) << S_RADM_TRGT1_ECRC_ERR)
+#define F_RADM_TRGT1_ECRC_ERR    V_RADM_TRGT1_ECRC_ERR(1U)
+
+#define S_RADM_TRGT1_DLLP_ABORT    4
+#define V_RADM_TRGT1_DLLP_ABORT(x) ((x) << S_RADM_TRGT1_DLLP_ABORT)
+#define F_RADM_TRGT1_DLLP_ABORT    V_RADM_TRGT1_DLLP_ABORT(1U)
+
+#define S_RADM_TRGT1_TLP_ABORT    3
+#define V_RADM_TRGT1_TLP_ABORT(x) ((x) << S_RADM_TRGT1_TLP_ABORT)
+#define F_RADM_TRGT1_TLP_ABORT    V_RADM_TRGT1_TLP_ABORT(1U)
+
+#define S_RADM_TRGT1_EOT    2
+#define V_RADM_TRGT1_EOT(x) ((x) << S_RADM_TRGT1_EOT)
+#define F_RADM_TRGT1_EOT    V_RADM_TRGT1_EOT(1U)
+
+#define S_RADM_TRGT1_DV_2B    1
+#define V_RADM_TRGT1_DV_2B(x) ((x) << S_RADM_TRGT1_DV_2B)
+#define F_RADM_TRGT1_DV_2B    V_RADM_TRGT1_DV_2B(1U)
+
+#define S_RADM_TRGT1_HV_2B    0
+#define V_RADM_TRGT1_HV_2B(x) ((x) << S_RADM_TRGT1_HV_2B)
+#define F_RADM_TRGT1_HV_2B    V_RADM_TRGT1_HV_2B(1U)
+
+#define A_PCIE_PDEBUG_REG_0X2C 0x2c
+
+#define S_STATEMPIO    29
+#define M_STATEMPIO    0x7U
+#define V_STATEMPIO(x) ((x) << S_STATEMPIO)
+#define G_STATEMPIO(x) (((x) >> S_STATEMPIO) & M_STATEMPIO)
+
+#define S_STATECPL    25
+#define M_STATECPL    0xfU
+#define V_STATECPL(x) ((x) << S_STATECPL)
+#define G_STATECPL(x) (((x) >> S_STATECPL) & M_STATECPL)
+
+#define S_STATEALIN    22
+#define M_STATEALIN    0x7U
+#define V_STATEALIN(x) ((x) << S_STATEALIN)
+#define G_STATEALIN(x) (((x) >> S_STATEALIN) & M_STATEALIN)
+
+#define S_STATEPL    19
+#define M_STATEPL    0x7U
+#define V_STATEPL(x) ((x) << S_STATEPL)
+#define G_STATEPL(x) (((x) >> S_STATEPL) & M_STATEPL)
+
+#define S_STATEMARSP    18
+#define V_STATEMARSP(x) ((x) << S_STATEMARSP)
+#define F_STATEMARSP    V_STATEMARSP(1U)
+
+#define S_MA_TAGSINUSE    11
+#define M_MA_TAGSINUSE    0x7fU
+#define V_MA_TAGSINUSE(x) ((x) << S_MA_TAGSINUSE)
+#define G_MA_TAGSINUSE(x) (((x) >> S_MA_TAGSINUSE) & M_MA_TAGSINUSE)
+
+#define S_RADM_TRGT1_HSRDY    10
+#define V_RADM_TRGT1_HSRDY(x) ((x) << S_RADM_TRGT1_HSRDY)
+#define F_RADM_TRGT1_HSRDY    V_RADM_TRGT1_HSRDY(1U)
+
+#define S_RADM_TRGT1_DSRDY    9
+#define V_RADM_TRGT1_DSRDY(x) ((x) << S_RADM_TRGT1_DSRDY)
+#define F_RADM_TRGT1_DSRDY    V_RADM_TRGT1_DSRDY(1U)
+
+#define S_ALIND_REQWRDATAVLD    8
+#define V_ALIND_REQWRDATAVLD(x) ((x) << S_ALIND_REQWRDATAVLD)
+#define F_ALIND_REQWRDATAVLD    V_ALIND_REQWRDATAVLD(1U)
+
+#define S_FID_LKUPWRHDRVLD    7
+#define V_FID_LKUPWRHDRVLD(x) ((x) << S_FID_LKUPWRHDRVLD)
+#define F_FID_LKUPWRHDRVLD    V_FID_LKUPWRHDRVLD(1U)
+
+#define S_MPIO_WRVLD    6
+#define V_MPIO_WRVLD(x) ((x) << S_MPIO_WRVLD)
+#define F_MPIO_WRVLD    V_MPIO_WRVLD(1U)
+
+#define S_TRGT1_RADM_HALT    5
+#define V_TRGT1_RADM_HALT(x) ((x) << S_TRGT1_RADM_HALT)
+#define F_TRGT1_RADM_HALT    V_TRGT1_RADM_HALT(1U)
+
+#define S_RADM_TRGT1_DV_2C    4
+#define V_RADM_TRGT1_DV_2C(x) ((x) << S_RADM_TRGT1_DV_2C)
+#define F_RADM_TRGT1_DV_2C    V_RADM_TRGT1_DV_2C(1U)
+
+#define S_RADM_TRGT1_DV_2C_2    3
+#define V_RADM_TRGT1_DV_2C_2(x) ((x) << S_RADM_TRGT1_DV_2C_2)
+#define F_RADM_TRGT1_DV_2C_2    V_RADM_TRGT1_DV_2C_2(1U)
+
+#define S_RADM_TRGT1_TLP_ABORT_2C    2
+#define V_RADM_TRGT1_TLP_ABORT_2C(x) ((x) << S_RADM_TRGT1_TLP_ABORT_2C)
+#define F_RADM_TRGT1_TLP_ABORT_2C    V_RADM_TRGT1_TLP_ABORT_2C(1U)
+
+#define S_RADM_TRGT1_DLLP_ABORT_2C    1
+#define V_RADM_TRGT1_DLLP_ABORT_2C(x) ((x) << S_RADM_TRGT1_DLLP_ABORT_2C)
+#define F_RADM_TRGT1_DLLP_ABORT_2C    V_RADM_TRGT1_DLLP_ABORT_2C(1U)
+
+#define S_RADM_TRGT1_ECRC_ERR_2C    0
+#define V_RADM_TRGT1_ECRC_ERR_2C(x) ((x) << S_RADM_TRGT1_ECRC_ERR_2C)
+#define F_RADM_TRGT1_ECRC_ERR_2C    V_RADM_TRGT1_ECRC_ERR_2C(1U)
+
+#define A_PCIE_PDEBUG_REG_0X2D 0x2d
+
+#define S_RADM_TRGT1_HV_2D    31
+#define V_RADM_TRGT1_HV_2D(x) ((x) << S_RADM_TRGT1_HV_2D)
+#define F_RADM_TRGT1_HV_2D    V_RADM_TRGT1_HV_2D(1U)
+
+#define S_RADM_TRGT1_DV_2D    30
+#define V_RADM_TRGT1_DV_2D(x) ((x) << S_RADM_TRGT1_DV_2D)
+#define F_RADM_TRGT1_DV_2D    V_RADM_TRGT1_DV_2D(1U)
+
+#define S_RADM_TRGT1_HV2    23
+#define M_RADM_TRGT1_HV2    0x7fU
+#define V_RADM_TRGT1_HV2(x) ((x) << S_RADM_TRGT1_HV2)
+#define G_RADM_TRGT1_HV2(x) (((x) >> S_RADM_TRGT1_HV2) & M_RADM_TRGT1_HV2)
+
+#define S_RADM_TRGT1_HV3    20
+#define M_RADM_TRGT1_HV3    0x7U
+#define V_RADM_TRGT1_HV3(x) ((x) << S_RADM_TRGT1_HV3)
+#define G_RADM_TRGT1_HV3(x) (((x) >> S_RADM_TRGT1_HV3) & M_RADM_TRGT1_HV3)
+
+#define S_RADM_TRGT1_HV4    16
+#define M_RADM_TRGT1_HV4    0xfU
+#define V_RADM_TRGT1_HV4(x) ((x) << S_RADM_TRGT1_HV4)
+#define G_RADM_TRGT1_HV4(x) (((x) >> S_RADM_TRGT1_HV4) & M_RADM_TRGT1_HV4)
+
+#define S_RADM_TRGT1_HV5    12
+#define M_RADM_TRGT1_HV5    0xfU
+#define V_RADM_TRGT1_HV5(x) ((x) << S_RADM_TRGT1_HV5)
+#define G_RADM_TRGT1_HV5(x) (((x) >> S_RADM_TRGT1_HV5) & M_RADM_TRGT1_HV5)
+
+#define S_RADM_TRGT1_HV6    11
+#define V_RADM_TRGT1_HV6(x) ((x) << S_RADM_TRGT1_HV6)
+#define F_RADM_TRGT1_HV6    V_RADM_TRGT1_HV6(1U)
+
+#define S_RADM_TRGT1_HV7    10
+#define V_RADM_TRGT1_HV7(x) ((x) << S_RADM_TRGT1_HV7)
+#define F_RADM_TRGT1_HV7    V_RADM_TRGT1_HV7(1U)
+
+#define S_RADM_TRGT1_HV8    7
+#define M_RADM_TRGT1_HV8    0x7U
+#define V_RADM_TRGT1_HV8(x) ((x) << S_RADM_TRGT1_HV8)
+#define G_RADM_TRGT1_HV8(x) (((x) >> S_RADM_TRGT1_HV8) & M_RADM_TRGT1_HV8)
+
+#define S_RADM_TRGT1_HV9    6
+#define V_RADM_TRGT1_HV9(x) ((x) << S_RADM_TRGT1_HV9)
+#define F_RADM_TRGT1_HV9    V_RADM_TRGT1_HV9(1U)
+
+#define S_RADM_TRGT1_HVA    5
+#define V_RADM_TRGT1_HVA(x) ((x) << S_RADM_TRGT1_HVA)
+#define F_RADM_TRGT1_HVA    V_RADM_TRGT1_HVA(1U)
+
+#define S_RADM_TRGT1_DSRDY_2D    4
+#define V_RADM_TRGT1_DSRDY_2D(x) ((x) << S_RADM_TRGT1_DSRDY_2D)
+#define F_RADM_TRGT1_DSRDY_2D    V_RADM_TRGT1_DSRDY_2D(1U)
+
+#define S_RADM_TRGT1_WRCNT    0
+#define M_RADM_TRGT1_WRCNT    0xfU
+#define V_RADM_TRGT1_WRCNT(x) ((x) << S_RADM_TRGT1_WRCNT)
+#define G_RADM_TRGT1_WRCNT(x) (((x) >> S_RADM_TRGT1_WRCNT) & M_RADM_TRGT1_WRCNT)
+
+#define A_PCIE_PDEBUG_REG_0X2E 0x2e
+
+#define S_RADM_TRGT1_HV_2E    30
+#define M_RADM_TRGT1_HV_2E    0x3U
+#define V_RADM_TRGT1_HV_2E(x) ((x) << S_RADM_TRGT1_HV_2E)
+#define G_RADM_TRGT1_HV_2E(x) (((x) >> S_RADM_TRGT1_HV_2E) & M_RADM_TRGT1_HV_2E)
+
+#define S_RADM_TRGT1_HV_2E_2    20
+#define M_RADM_TRGT1_HV_2E_2    0x3ffU
+#define V_RADM_TRGT1_HV_2E_2(x) ((x) << S_RADM_TRGT1_HV_2E_2)
+#define G_RADM_TRGT1_HV_2E_2(x) (((x) >> S_RADM_TRGT1_HV_2E_2) & M_RADM_TRGT1_HV_2E_2)
+
+#define S_RADM_TRGT1_HV_WE_3    12
+#define M_RADM_TRGT1_HV_WE_3    0xffU
+#define V_RADM_TRGT1_HV_WE_3(x) ((x) << S_RADM_TRGT1_HV_WE_3)
+#define G_RADM_TRGT1_HV_WE_3(x) (((x) >> S_RADM_TRGT1_HV_WE_3) & M_RADM_TRGT1_HV_WE_3)
+
+#define S_ALIN_REQDATAVLD4    8
+#define M_ALIN_REQDATAVLD4    0xfU
+#define V_ALIN_REQDATAVLD4(x) ((x) << S_ALIN_REQDATAVLD4)
+#define G_ALIN_REQDATAVLD4(x) (((x) >> S_ALIN_REQDATAVLD4) & M_ALIN_REQDATAVLD4)
+
+#define S_ALIN_REQDATAVLD5    7
+#define V_ALIN_REQDATAVLD5(x) ((x) << S_ALIN_REQDATAVLD5)
+#define F_ALIN_REQDATAVLD5    V_ALIN_REQDATAVLD5(1U)
+
+#define S_ALIN_REQDATAVLD6    6
+#define V_ALIN_REQDATAVLD6(x) ((x) << S_ALIN_REQDATAVLD6)
+#define F_ALIN_REQDATAVLD6    V_ALIN_REQDATAVLD6(1U)
+
+#define S_ALIN_REQDATAVLD7    4
+#define M_ALIN_REQDATAVLD7    0x3U
+#define V_ALIN_REQDATAVLD7(x) ((x) << S_ALIN_REQDATAVLD7)
+#define G_ALIN_REQDATAVLD7(x) (((x) >> S_ALIN_REQDATAVLD7) & M_ALIN_REQDATAVLD7)
+
+#define S_ALIN_REQDATAVLD8    3
+#define V_ALIN_REQDATAVLD8(x) ((x) << S_ALIN_REQDATAVLD8)
+#define F_ALIN_REQDATAVLD8    V_ALIN_REQDATAVLD8(1U)
+
+#define S_ALIN_REQDATAVLD9    2
+#define V_ALIN_REQDATAVLD9(x) ((x) << S_ALIN_REQDATAVLD9)
+#define F_ALIN_REQDATAVLD9    V_ALIN_REQDATAVLD9(1U)
+
+#define S_ALIN_REQDATARDY    1
+#define V_ALIN_REQDATARDY(x) ((x) << S_ALIN_REQDATARDY)
+#define F_ALIN_REQDATARDY    V_ALIN_REQDATARDY(1U)
+
+#define S_ALIN_REQDATAVLDA    0
+#define V_ALIN_REQDATAVLDA(x) ((x) << S_ALIN_REQDATAVLDA)
+#define F_ALIN_REQDATAVLDA    V_ALIN_REQDATAVLDA(1U)
+
+#define A_PCIE_PDEBUG_REG_0X2F 0x2f
+#define A_PCIE_PDEBUG_REG_0X30 0x30
+
+#define S_RADM_TRGT1_HV_30    25
+#define M_RADM_TRGT1_HV_30    0x7fU
+#define V_RADM_TRGT1_HV_30(x) ((x) << S_RADM_TRGT1_HV_30)
+#define G_RADM_TRGT1_HV_30(x) (((x) >> S_RADM_TRGT1_HV_30) & M_RADM_TRGT1_HV_30)
+
+#define S_PIO_WRCNT    15
+#define M_PIO_WRCNT    0x3ffU
+#define V_PIO_WRCNT(x) ((x) << S_PIO_WRCNT)
+#define G_PIO_WRCNT(x) (((x) >> S_PIO_WRCNT) & M_PIO_WRCNT)
+
+#define S_ALIND_REQWRCNT    12
+#define M_ALIND_REQWRCNT    0x7U
+#define V_ALIND_REQWRCNT(x) ((x) << S_ALIND_REQWRCNT)
+#define G_ALIND_REQWRCNT(x) (((x) >> S_ALIND_REQWRCNT) & M_ALIND_REQWRCNT)
+
+#define S_FID_LKUPWRCNT    9
+#define M_FID_LKUPWRCNT    0x7U
+#define V_FID_LKUPWRCNT(x) ((x) << S_FID_LKUPWRCNT)
+#define G_FID_LKUPWRCNT(x) (((x) >> S_FID_LKUPWRCNT) & M_FID_LKUPWRCNT)
+
+#define S_ALIND_REQRDDATAVLD    8
+#define V_ALIND_REQRDDATAVLD(x) ((x) << S_ALIND_REQRDDATAVLD)
+#define F_ALIND_REQRDDATAVLD    V_ALIND_REQRDDATAVLD(1U)
+
+#define S_ALIND_REQRDDATARDY    7
+#define V_ALIND_REQRDDATARDY(x) ((x) << S_ALIND_REQRDDATARDY)
+#define F_ALIND_REQRDDATARDY    V_ALIND_REQRDDATARDY(1U)
+
+#define S_ALIND_REQRDDATAVLD2    6
+#define V_ALIND_REQRDDATAVLD2(x) ((x) << S_ALIND_REQRDDATAVLD2)
+#define F_ALIND_REQRDDATAVLD2    V_ALIND_REQRDDATAVLD2(1U)
+
+#define S_ALIND_REQWRDATAVLD3    3
+#define M_ALIND_REQWRDATAVLD3    0x7U
+#define V_ALIND_REQWRDATAVLD3(x) ((x) << S_ALIND_REQWRDATAVLD3)
+#define G_ALIND_REQWRDATAVLD3(x) (((x) >> S_ALIND_REQWRDATAVLD3) & M_ALIND_REQWRDATAVLD3)
+
+#define S_ALIND_REQWRDATAVLD4    2
+#define V_ALIND_REQWRDATAVLD4(x) ((x) << S_ALIND_REQWRDATAVLD4)
+#define F_ALIND_REQWRDATAVLD4    V_ALIND_REQWRDATAVLD4(1U)
+
+#define S_ALIND_REQWRDATARDYOPEN    1
+#define V_ALIND_REQWRDATARDYOPEN(x) ((x) << S_ALIND_REQWRDATARDYOPEN)
+#define F_ALIND_REQWRDATARDYOPEN    V_ALIND_REQWRDATARDYOPEN(1U)
+
+#define S_ALIND_REQWRDATAVLD5    0
+#define V_ALIND_REQWRDATAVLD5(x) ((x) << S_ALIND_REQWRDATAVLD5)
+#define F_ALIND_REQWRDATAVLD5    V_ALIND_REQWRDATAVLD5(1U)
+
+#define A_PCIE_PDEBUG_REG_0X31 0x31
+#define A_PCIE_PDEBUG_REG_0X32 0x32
+#define A_PCIE_PDEBUG_REG_0X33 0x33
+#define A_PCIE_PDEBUG_REG_0X34 0x34
+#define A_PCIE_PDEBUG_REG_0X35 0x35
+
+#define S_T5_MPIO_WRVLD    19
+#define M_T5_MPIO_WRVLD    0x1fffU
+#define V_T5_MPIO_WRVLD(x) ((x) << S_T5_MPIO_WRVLD)
+#define G_T5_MPIO_WRVLD(x) (((x) >> S_T5_MPIO_WRVLD) & M_T5_MPIO_WRVLD)
+
+#define S_FID_LKUPRDHDRVLD    18
+#define V_FID_LKUPRDHDRVLD(x) ((x) << S_FID_LKUPRDHDRVLD)
+#define F_FID_LKUPRDHDRVLD    V_FID_LKUPRDHDRVLD(1U)
+
+#define S_FID_LKUPRDHDRVLD2    17
+#define V_FID_LKUPRDHDRVLD2(x) ((x) << S_FID_LKUPRDHDRVLD2)
+#define F_FID_LKUPRDHDRVLD2    V_FID_LKUPRDHDRVLD2(1U)
+
+#define S_FID_LKUPRDHDRVLD3    16
+#define V_FID_LKUPRDHDRVLD3(x) ((x) << S_FID_LKUPRDHDRVLD3)
+#define F_FID_LKUPRDHDRVLD3    V_FID_LKUPRDHDRVLD3(1U)
+
+#define S_FID_LKUPRDHDRVLD4    15
+#define V_FID_LKUPRDHDRVLD4(x) ((x) << S_FID_LKUPRDHDRVLD4)
+#define F_FID_LKUPRDHDRVLD4    V_FID_LKUPRDHDRVLD4(1U)
+
+#define S_FID_LKUPRDHDRVLD5    14
+#define V_FID_LKUPRDHDRVLD5(x) ((x) << S_FID_LKUPRDHDRVLD5)
+#define F_FID_LKUPRDHDRVLD5    V_FID_LKUPRDHDRVLD5(1U)
+
+#define S_FID_LKUPRDHDRVLD6    13
+#define V_FID_LKUPRDHDRVLD6(x) ((x) << S_FID_LKUPRDHDRVLD6)
+#define F_FID_LKUPRDHDRVLD6    V_FID_LKUPRDHDRVLD6(1U)
+
+#define S_FID_LKUPRDHDRVLD7    12
+#define V_FID_LKUPRDHDRVLD7(x) ((x) << S_FID_LKUPRDHDRVLD7)
+#define F_FID_LKUPRDHDRVLD7    V_FID_LKUPRDHDRVLD7(1U)
+
+#define S_FID_LKUPRDHDRVLD8    11
+#define V_FID_LKUPRDHDRVLD8(x) ((x) << S_FID_LKUPRDHDRVLD8)
+#define F_FID_LKUPRDHDRVLD8    V_FID_LKUPRDHDRVLD8(1U)
+
+#define S_FID_LKUPRDHDRVLD9    10
+#define V_FID_LKUPRDHDRVLD9(x) ((x) << S_FID_LKUPRDHDRVLD9)
+#define F_FID_LKUPRDHDRVLD9    V_FID_LKUPRDHDRVLD9(1U)
+
+#define S_FID_LKUPRDHDRVLDA    9
+#define V_FID_LKUPRDHDRVLDA(x) ((x) << S_FID_LKUPRDHDRVLDA)
+#define F_FID_LKUPRDHDRVLDA    V_FID_LKUPRDHDRVLDA(1U)
+
+#define S_FID_LKUPRDHDRVLDB    8
+#define V_FID_LKUPRDHDRVLDB(x) ((x) << S_FID_LKUPRDHDRVLDB)
+#define F_FID_LKUPRDHDRVLDB    V_FID_LKUPRDHDRVLDB(1U)
+
+#define S_FID_LKUPRDHDRVLDC    7
+#define V_FID_LKUPRDHDRVLDC(x) ((x) << S_FID_LKUPRDHDRVLDC)
+#define F_FID_LKUPRDHDRVLDC    V_FID_LKUPRDHDRVLDC(1U)
+
+#define S_MPIO_WRVLD1    6
+#define V_MPIO_WRVLD1(x) ((x) << S_MPIO_WRVLD1)
+#define F_MPIO_WRVLD1    V_MPIO_WRVLD1(1U)
+
+#define S_MPIO_WRVLD2    5
+#define V_MPIO_WRVLD2(x) ((x) << S_MPIO_WRVLD2)
+#define F_MPIO_WRVLD2    V_MPIO_WRVLD2(1U)
+
+#define S_MPIO_WRVLD3    4
+#define V_MPIO_WRVLD3(x) ((x) << S_MPIO_WRVLD3)
+#define F_MPIO_WRVLD3    V_MPIO_WRVLD3(1U)
+
+#define S_MPIO_WRVLD4    0
+#define M_MPIO_WRVLD4    0xfU
+#define V_MPIO_WRVLD4(x) ((x) << S_MPIO_WRVLD4)
+#define G_MPIO_WRVLD4(x) (((x) >> S_MPIO_WRVLD4) & M_MPIO_WRVLD4)
+
+#define A_PCIE_PDEBUG_REG_0X36 0x36
+#define A_PCIE_PDEBUG_REG_0X37 0x37
+#define A_PCIE_PDEBUG_REG_0X38 0x38
+#define A_PCIE_PDEBUG_REG_0X39 0x39
+#define A_PCIE_PDEBUG_REG_0X3A 0x3a
+
+#define S_CLIENT0_TLP_VFUNC_ACTIVE    31
+#define V_CLIENT0_TLP_VFUNC_ACTIVE(x) ((x) << S_CLIENT0_TLP_VFUNC_ACTIVE)
+#define F_CLIENT0_TLP_VFUNC_ACTIVE    V_CLIENT0_TLP_VFUNC_ACTIVE(1U)
+
+#define S_CLIENT0_TLP_VFUNC_NUM    24
+#define M_CLIENT0_TLP_VFUNC_NUM    0x7fU
+#define V_CLIENT0_TLP_VFUNC_NUM(x) ((x) << S_CLIENT0_TLP_VFUNC_NUM)
+#define G_CLIENT0_TLP_VFUNC_NUM(x) (((x) >> S_CLIENT0_TLP_VFUNC_NUM) & M_CLIENT0_TLP_VFUNC_NUM)
+
+#define S_CLIENT0_TLP_FUNC_NUM    21
+#define M_CLIENT0_TLP_FUNC_NUM    0x7U
+#define V_CLIENT0_TLP_FUNC_NUM(x) ((x) << S_CLIENT0_TLP_FUNC_NUM)
+#define G_CLIENT0_TLP_FUNC_NUM(x) (((x) >> S_CLIENT0_TLP_FUNC_NUM) & M_CLIENT0_TLP_FUNC_NUM)
+
+#define S_CLIENT0_TLP_BYTE_EN    13
+#define M_CLIENT0_TLP_BYTE_EN    0xffU
+#define V_CLIENT0_TLP_BYTE_EN(x) ((x) << S_CLIENT0_TLP_BYTE_EN)
+#define G_CLIENT0_TLP_BYTE_EN(x) (((x) >> S_CLIENT0_TLP_BYTE_EN) & M_CLIENT0_TLP_BYTE_EN)
+
+#define S_CLIENT0_TLP_BYTE_LEN    0
+#define M_CLIENT0_TLP_BYTE_LEN    0x1fffU
+#define V_CLIENT0_TLP_BYTE_LEN(x) ((x) << S_CLIENT0_TLP_BYTE_LEN)
+#define G_CLIENT0_TLP_BYTE_LEN(x) (((x) >> S_CLIENT0_TLP_BYTE_LEN) & M_CLIENT0_TLP_BYTE_LEN)
+
+#define A_PCIE_PDEBUG_REG_0X3B 0x3b
+
+#define S_XADM_CLIENT0_HALT    31
+#define V_XADM_CLIENT0_HALT(x) ((x) << S_XADM_CLIENT0_HALT)
+#define F_XADM_CLIENT0_HALT    V_XADM_CLIENT0_HALT(1U)
+
+#define S_CLIENT0_TLP_DV    30
+#define V_CLIENT0_TLP_DV(x) ((x) << S_CLIENT0_TLP_DV)
+#define F_CLIENT0_TLP_DV    V_CLIENT0_TLP_DV(1U)
+
+#define S_CLIENT0_ADDR_ALIGN_EN    29
+#define V_CLIENT0_ADDR_ALIGN_EN(x) ((x) << S_CLIENT0_ADDR_ALIGN_EN)
+#define F_CLIENT0_ADDR_ALIGN_EN    V_CLIENT0_ADDR_ALIGN_EN(1U)
+
+#define S_CLIENT0_CPL_BCM    28
+#define V_CLIENT0_CPL_BCM(x) ((x) << S_CLIENT0_CPL_BCM)
+#define F_CLIENT0_CPL_BCM    V_CLIENT0_CPL_BCM(1U)
+
+#define S_CLIENT0_TLP_EP    27
+#define V_CLIENT0_TLP_EP(x) ((x) << S_CLIENT0_TLP_EP)
+#define F_CLIENT0_TLP_EP    V_CLIENT0_TLP_EP(1U)
+
+#define S_CLIENT0_CPL_STATUS    24
+#define M_CLIENT0_CPL_STATUS    0x7U
+#define V_CLIENT0_CPL_STATUS(x) ((x) << S_CLIENT0_CPL_STATUS)
+#define G_CLIENT0_CPL_STATUS(x) (((x) >> S_CLIENT0_CPL_STATUS) & M_CLIENT0_CPL_STATUS)
+
+#define S_CLIENT0_TLP_TD    23
+#define V_CLIENT0_TLP_TD(x) ((x) << S_CLIENT0_TLP_TD)
+#define F_CLIENT0_TLP_TD    V_CLIENT0_TLP_TD(1U)
+
+#define S_CLIENT0_TLP_TYPE    18
+#define M_CLIENT0_TLP_TYPE    0x1fU
+#define V_CLIENT0_TLP_TYPE(x) ((x) << S_CLIENT0_TLP_TYPE)
+#define G_CLIENT0_TLP_TYPE(x) (((x) >> S_CLIENT0_TLP_TYPE) & M_CLIENT0_TLP_TYPE)
+
+#define S_CLIENT0_TLP_FMT    16
+#define M_CLIENT0_TLP_FMT    0x3U
+#define V_CLIENT0_TLP_FMT(x) ((x) << S_CLIENT0_TLP_FMT)
+#define G_CLIENT0_TLP_FMT(x) (((x) >> S_CLIENT0_TLP_FMT) & M_CLIENT0_TLP_FMT)
+
+#define S_CLIENT0_TLP_BAD_EOT    15
+#define V_CLIENT0_TLP_BAD_EOT(x) ((x) << S_CLIENT0_TLP_BAD_EOT)
+#define F_CLIENT0_TLP_BAD_EOT    V_CLIENT0_TLP_BAD_EOT(1U)
+
+#define S_CLIENT0_TLP_EOT    14
+#define V_CLIENT0_TLP_EOT(x) ((x) << S_CLIENT0_TLP_EOT)
+#define F_CLIENT0_TLP_EOT    V_CLIENT0_TLP_EOT(1U)
+
+#define S_CLIENT0_TLP_ATTR    11
+#define M_CLIENT0_TLP_ATTR    0x7U
+#define V_CLIENT0_TLP_ATTR(x) ((x) << S_CLIENT0_TLP_ATTR)
+#define G_CLIENT0_TLP_ATTR(x) (((x) >> S_CLIENT0_TLP_ATTR) & M_CLIENT0_TLP_ATTR)
+
+#define S_CLIENT0_TLP_TC    8
+#define M_CLIENT0_TLP_TC    0x7U
+#define V_CLIENT0_TLP_TC(x) ((x) << S_CLIENT0_TLP_TC)
+#define G_CLIENT0_TLP_TC(x) (((x) >> S_CLIENT0_TLP_TC) & M_CLIENT0_TLP_TC)
+
+#define S_CLIENT0_TLP_TID    0
+#define M_CLIENT0_TLP_TID    0xffU
+#define V_CLIENT0_TLP_TID(x) ((x) << S_CLIENT0_TLP_TID)
+#define G_CLIENT0_TLP_TID(x) (((x) >> S_CLIENT0_TLP_TID) & M_CLIENT0_TLP_TID)
+
+#define A_PCIE_PDEBUG_REG_0X3C 0x3c
+
+#define S_MEM_RSPRRAVLD    31
+#define V_MEM_RSPRRAVLD(x) ((x) << S_MEM_RSPRRAVLD)
+#define F_MEM_RSPRRAVLD    V_MEM_RSPRRAVLD(1U)
+
+#define S_MEM_RSPRRARDY    30
+#define V_MEM_RSPRRARDY(x) ((x) << S_MEM_RSPRRARDY)
+#define F_MEM_RSPRRARDY    V_MEM_RSPRRARDY(1U)
+
+#define S_PIO_RSPRRAVLD    29
+#define V_PIO_RSPRRAVLD(x) ((x) << S_PIO_RSPRRAVLD)
+#define F_PIO_RSPRRAVLD    V_PIO_RSPRRAVLD(1U)
+
+#define S_PIO_RSPRRARDY    28
+#define V_PIO_RSPRRARDY(x) ((x) << S_PIO_RSPRRARDY)
+#define F_PIO_RSPRRARDY    V_PIO_RSPRRARDY(1U)
+
+#define S_MEM_RSPRDVLD    27
+#define V_MEM_RSPRDVLD(x) ((x) << S_MEM_RSPRDVLD)
+#define F_MEM_RSPRDVLD    V_MEM_RSPRDVLD(1U)
+
+#define S_MEM_RSPRDRRARDY    26
+#define V_MEM_RSPRDRRARDY(x) ((x) << S_MEM_RSPRDRRARDY)
+#define F_MEM_RSPRDRRARDY    V_MEM_RSPRDRRARDY(1U)
+
+#define S_PIO_RSPRDVLD    25
+#define V_PIO_RSPRDVLD(x) ((x) << S_PIO_RSPRDVLD)
+#define F_PIO_RSPRDVLD    V_PIO_RSPRDVLD(1U)
+
+#define S_PIO_RSPRDRRARDY    24
+#define V_PIO_RSPRDRRARDY(x) ((x) << S_PIO_RSPRDRRARDY)
+#define F_PIO_RSPRDRRARDY    V_PIO_RSPRDRRARDY(1U)
+
+#define S_TGT_TAGQ_RDVLD    16
+#define M_TGT_TAGQ_RDVLD    0xffU
+#define V_TGT_TAGQ_RDVLD(x) ((x) << S_TGT_TAGQ_RDVLD)
+#define G_TGT_TAGQ_RDVLD(x) (((x) >> S_TGT_TAGQ_RDVLD) & M_TGT_TAGQ_RDVLD)
+
+#define S_CPLTXNDISABLE    8
+#define M_CPLTXNDISABLE    0xffU
+#define V_CPLTXNDISABLE(x) ((x) << S_CPLTXNDISABLE)
+#define G_CPLTXNDISABLE(x) (((x) >> S_CPLTXNDISABLE) & M_CPLTXNDISABLE)
+
+#define S_CPLTXNDISABLE2    7
+#define V_CPLTXNDISABLE2(x) ((x) << S_CPLTXNDISABLE2)
+#define F_CPLTXNDISABLE2    V_CPLTXNDISABLE2(1U)
+
+#define S_CLIENT0_TLP_HV    0
+#define M_CLIENT0_TLP_HV    0x7fU
+#define V_CLIENT0_TLP_HV(x) ((x) << S_CLIENT0_TLP_HV)
+#define G_CLIENT0_TLP_HV(x) (((x) >> S_CLIENT0_TLP_HV) & M_CLIENT0_TLP_HV)
+
+#define A_PCIE_PDEBUG_REG_0X3D 0x3d
+#define A_PCIE_PDEBUG_REG_0X3E 0x3e
+#define A_PCIE_PDEBUG_REG_0X3F 0x3f
+#define A_PCIE_PDEBUG_REG_0X40 0x40
+#define A_PCIE_PDEBUG_REG_0X41 0x41
+#define A_PCIE_PDEBUG_REG_0X42 0x42
+#define A_PCIE_PDEBUG_REG_0X43 0x43
+#define A_PCIE_PDEBUG_REG_0X44 0x44
+#define A_PCIE_PDEBUG_REG_0X45 0x45
+#define A_PCIE_PDEBUG_REG_0X46 0x46
+#define A_PCIE_PDEBUG_REG_0X47 0x47
+#define A_PCIE_PDEBUG_REG_0X48 0x48
+#define A_PCIE_PDEBUG_REG_0X49 0x49
+#define A_PCIE_PDEBUG_REG_0X4A 0x4a
+#define A_PCIE_PDEBUG_REG_0X4B 0x4b
+#define A_PCIE_PDEBUG_REG_0X4C 0x4c
+#define A_PCIE_PDEBUG_REG_0X4D 0x4d
+#define A_PCIE_PDEBUG_REG_0X4E 0x4e
+#define A_PCIE_PDEBUG_REG_0X4F 0x4f
+#define A_PCIE_PDEBUG_REG_0X50 0x50
+#define A_PCIE_CDEBUG_REG_0X0 0x0
+#define A_PCIE_CDEBUG_REG_0X1 0x1
+#define A_PCIE_CDEBUG_REG_0X2 0x2
+
+#define S_FLR_REQVLD    31
+#define V_FLR_REQVLD(x) ((x) << S_FLR_REQVLD)
+#define F_FLR_REQVLD    V_FLR_REQVLD(1U)
+
+#define S_D_RSPVLD    28
+#define M_D_RSPVLD    0x7U
+#define V_D_RSPVLD(x) ((x) << S_D_RSPVLD)
+#define G_D_RSPVLD(x) (((x) >> S_D_RSPVLD) & M_D_RSPVLD)
+
+#define S_D_RSPVLD2    27
+#define V_D_RSPVLD2(x) ((x) << S_D_RSPVLD2)
+#define F_D_RSPVLD2    V_D_RSPVLD2(1U)
+
+#define S_D_RSPVLD3    26
+#define V_D_RSPVLD3(x) ((x) << S_D_RSPVLD3)
+#define F_D_RSPVLD3    V_D_RSPVLD3(1U)
+
+#define S_D_RSPVLD4    25
+#define V_D_RSPVLD4(x) ((x) << S_D_RSPVLD4)
+#define F_D_RSPVLD4    V_D_RSPVLD4(1U)
+
+#define S_D_RSPVLD5    24
+#define V_D_RSPVLD5(x) ((x) << S_D_RSPVLD5)
+#define F_D_RSPVLD5    V_D_RSPVLD5(1U)
+
+#define S_D_RSPVLD6    20
+#define M_D_RSPVLD6    0xfU
+#define V_D_RSPVLD6(x) ((x) << S_D_RSPVLD6)
+#define G_D_RSPVLD6(x) (((x) >> S_D_RSPVLD6) & M_D_RSPVLD6)
+
+#define S_D_RSPAFULL    16
+#define M_D_RSPAFULL    0xfU
+#define V_D_RSPAFULL(x) ((x) << S_D_RSPAFULL)
+#define G_D_RSPAFULL(x) (((x) >> S_D_RSPAFULL) & M_D_RSPAFULL)
+
+#define S_D_RDREQVLD    12
+#define M_D_RDREQVLD    0xfU
+#define V_D_RDREQVLD(x) ((x) << S_D_RDREQVLD)
+#define G_D_RDREQVLD(x) (((x) >> S_D_RDREQVLD) & M_D_RDREQVLD)
+
+#define S_D_RDREQAFULL    8
+#define M_D_RDREQAFULL    0xfU
+#define V_D_RDREQAFULL(x) ((x) << S_D_RDREQAFULL)
+#define G_D_RDREQAFULL(x) (((x) >> S_D_RDREQAFULL) & M_D_RDREQAFULL)
+
+#define S_D_WRREQVLD    4
+#define M_D_WRREQVLD    0xfU
+#define V_D_WRREQVLD(x) ((x) << S_D_WRREQVLD)
+#define G_D_WRREQVLD(x) (((x) >> S_D_WRREQVLD) & M_D_WRREQVLD)
+
+#define S_D_WRREQAFULL    0
+#define M_D_WRREQAFULL    0xfU
+#define V_D_WRREQAFULL(x) ((x) << S_D_WRREQAFULL)
+#define G_D_WRREQAFULL(x) (((x) >> S_D_WRREQAFULL) & M_D_WRREQAFULL)
+
+#define A_PCIE_CDEBUG_REG_0X3 0x3
+
+#define S_C_REQVLD    19
+#define M_C_REQVLD    0x1fffU
+#define V_C_REQVLD(x) ((x) << S_C_REQVLD)
+#define G_C_REQVLD(x) (((x) >> S_C_REQVLD) & M_C_REQVLD)
+
+#define S_C_RSPVLD2    16
+#define M_C_RSPVLD2    0x7U
+#define V_C_RSPVLD2(x) ((x) << S_C_RSPVLD2)
+#define G_C_RSPVLD2(x) (((x) >> S_C_RSPVLD2) & M_C_RSPVLD2)
+
+#define S_C_RSPVLD3    15
+#define V_C_RSPVLD3(x) ((x) << S_C_RSPVLD3)
+#define F_C_RSPVLD3    V_C_RSPVLD3(1U)
+
+#define S_C_RSPVLD4    14
+#define V_C_RSPVLD4(x) ((x) << S_C_RSPVLD4)
+#define F_C_RSPVLD4    V_C_RSPVLD4(1U)
+
+#define S_C_RSPVLD5    13
+#define V_C_RSPVLD5(x) ((x) << S_C_RSPVLD5)
+#define F_C_RSPVLD5    V_C_RSPVLD5(1U)
+
+#define S_C_RSPVLD6    12
+#define V_C_RSPVLD6(x) ((x) << S_C_RSPVLD6)
+#define F_C_RSPVLD6    V_C_RSPVLD6(1U)
+
+#define S_C_RSPVLD7    9
+#define M_C_RSPVLD7    0x7U
+#define V_C_RSPVLD7(x) ((x) << S_C_RSPVLD7)
+#define G_C_RSPVLD7(x) (((x) >> S_C_RSPVLD7) & M_C_RSPVLD7)
+
+#define S_C_RSPAFULL    6
+#define M_C_RSPAFULL    0x7U
+#define V_C_RSPAFULL(x) ((x) << S_C_RSPAFULL)
+#define G_C_RSPAFULL(x) (((x) >> S_C_RSPAFULL) & M_C_RSPAFULL)
+
+#define S_C_REQVLD8    3
+#define M_C_REQVLD8    0x7U
+#define V_C_REQVLD8(x) ((x) << S_C_REQVLD8)
+#define G_C_REQVLD8(x) (((x) >> S_C_REQVLD8) & M_C_REQVLD8)
+
+#define S_C_REQAFULL    0
+#define M_C_REQAFULL    0x7U
+#define V_C_REQAFULL(x) ((x) << S_C_REQAFULL)
+#define G_C_REQAFULL(x) (((x) >> S_C_REQAFULL) & M_C_REQAFULL)
+
+#define A_PCIE_CDEBUG_REG_0X4 0x4
+
+#define S_H_REQVLD    7
+#define M_H_REQVLD    0x1ffffffU
+#define V_H_REQVLD(x) ((x) << S_H_REQVLD)
+#define G_H_REQVLD(x) (((x) >> S_H_REQVLD) & M_H_REQVLD)
+
+#define S_H_RSPVLD    6
+#define V_H_RSPVLD(x) ((x) << S_H_RSPVLD)
+#define F_H_RSPVLD    V_H_RSPVLD(1U)
+
+#define S_H_RSPVLD2    5
+#define V_H_RSPVLD2(x) ((x) << S_H_RSPVLD2)
+#define F_H_RSPVLD2    V_H_RSPVLD2(1U)
+
+#define S_H_RSPVLD3    4
+#define V_H_RSPVLD3(x) ((x) << S_H_RSPVLD3)
+#define F_H_RSPVLD3    V_H_RSPVLD3(1U)
+
+#define S_H_RSPVLD4    3
+#define V_H_RSPVLD4(x) ((x) << S_H_RSPVLD4)
+#define F_H_RSPVLD4    V_H_RSPVLD4(1U)
+
+#define S_H_RSPAFULL    2
+#define V_H_RSPAFULL(x) ((x) << S_H_RSPAFULL)
+#define F_H_RSPAFULL    V_H_RSPAFULL(1U)
+
+#define S_H_REQVLD2    1
+#define V_H_REQVLD2(x) ((x) << S_H_REQVLD2)
+#define F_H_REQVLD2    V_H_REQVLD2(1U)
+
+#define S_H_REQAFULL    0
+#define V_H_REQAFULL(x) ((x) << S_H_REQAFULL)
+#define F_H_REQAFULL    V_H_REQAFULL(1U)
+
+#define A_PCIE_CDEBUG_REG_0X5 0x5
+
+#define S_ER_RSPVLD    16
+#define M_ER_RSPVLD    0xffffU
+#define V_ER_RSPVLD(x) ((x) << S_ER_RSPVLD)
+#define G_ER_RSPVLD(x) (((x) >> S_ER_RSPVLD) & M_ER_RSPVLD)
+
+#define S_ER_REQVLD2    5
+#define M_ER_REQVLD2    0x7ffU
+#define V_ER_REQVLD2(x) ((x) << S_ER_REQVLD2)
+#define G_ER_REQVLD2(x) (((x) >> S_ER_REQVLD2) & M_ER_REQVLD2)
+
+#define S_ER_REQVLD3    2
+#define M_ER_REQVLD3    0x7U
+#define V_ER_REQVLD3(x) ((x) << S_ER_REQVLD3)
+#define G_ER_REQVLD3(x) (((x) >> S_ER_REQVLD3) & M_ER_REQVLD3)
+
+#define S_ER_RSPVLD4    1
+#define V_ER_RSPVLD4(x) ((x) << S_ER_RSPVLD4)
+#define F_ER_RSPVLD4    V_ER_RSPVLD4(1U)
+
+#define S_ER_REQVLD5    0
+#define V_ER_REQVLD5(x) ((x) << S_ER_REQVLD5)
+#define F_ER_REQVLD5    V_ER_REQVLD5(1U)
+
+#define A_PCIE_CDEBUG_REG_0X6 0x6
+
+#define S_PL_BAR2_REQVLD    4
+#define M_PL_BAR2_REQVLD    0xfffffffU
+#define V_PL_BAR2_REQVLD(x) ((x) << S_PL_BAR2_REQVLD)
+#define G_PL_BAR2_REQVLD(x) (((x) >> S_PL_BAR2_REQVLD) & M_PL_BAR2_REQVLD)
+
+#define S_PL_BAR2_REQVLD2    3
+#define V_PL_BAR2_REQVLD2(x) ((x) << S_PL_BAR2_REQVLD2)
+#define F_PL_BAR2_REQVLD2    V_PL_BAR2_REQVLD2(1U)
+
+#define S_PL_BAR2_REQVLDE    2
+#define V_PL_BAR2_REQVLDE(x) ((x) << S_PL_BAR2_REQVLDE)
+#define F_PL_BAR2_REQVLDE    V_PL_BAR2_REQVLDE(1U)
+
+#define S_PL_BAR2_REQFULL    1
+#define V_PL_BAR2_REQFULL(x) ((x) << S_PL_BAR2_REQFULL)
+#define F_PL_BAR2_REQFULL    V_PL_BAR2_REQFULL(1U)
+
+#define S_PL_BAR2_REQVLD4    0
+#define V_PL_BAR2_REQVLD4(x) ((x) << S_PL_BAR2_REQVLD4)
+#define F_PL_BAR2_REQVLD4    V_PL_BAR2_REQVLD4(1U)
+
+#define A_PCIE_CDEBUG_REG_0X7 0x7
+#define A_PCIE_CDEBUG_REG_0X8 0x8
+#define A_PCIE_CDEBUG_REG_0X9 0x9
+#define A_PCIE_CDEBUG_REG_0XA 0xa
+
+#define S_VPD_RSPVLD    20
+#define M_VPD_RSPVLD    0xfffU
+#define V_VPD_RSPVLD(x) ((x) << S_VPD_RSPVLD)
+#define G_VPD_RSPVLD(x) (((x) >> S_VPD_RSPVLD) & M_VPD_RSPVLD)
+
+#define S_VPD_REQVLD2    9
+#define M_VPD_REQVLD2    0x7ffU
+#define V_VPD_REQVLD2(x) ((x) << S_VPD_REQVLD2)
+#define G_VPD_REQVLD2(x) (((x) >> S_VPD_REQVLD2) & M_VPD_REQVLD2)
+
+#define S_VPD_REQVLD3    6
+#define M_VPD_REQVLD3    0x7U
+#define V_VPD_REQVLD3(x) ((x) << S_VPD_REQVLD3)
+#define G_VPD_REQVLD3(x) (((x) >> S_VPD_REQVLD3) & M_VPD_REQVLD3)
+
+#define S_VPD_REQVLD4    5
+#define V_VPD_REQVLD4(x) ((x) << S_VPD_REQVLD4)
+#define F_VPD_REQVLD4    V_VPD_REQVLD4(1U)
+
+#define S_VPD_REQVLD5    3
+#define M_VPD_REQVLD5    0x3U
+#define V_VPD_REQVLD5(x) ((x) << S_VPD_REQVLD5)
+#define G_VPD_REQVLD5(x) (((x) >> S_VPD_REQVLD5) & M_VPD_REQVLD5)
+
+#define S_VPD_RSPVLD2    2
+#define V_VPD_RSPVLD2(x) ((x) << S_VPD_RSPVLD2)
+#define F_VPD_RSPVLD2    V_VPD_RSPVLD2(1U)
+
+#define S_VPD_RSPVLD3    1
+#define V_VPD_RSPVLD3(x) ((x) << S_VPD_RSPVLD3)
+#define F_VPD_RSPVLD3    V_VPD_RSPVLD3(1U)
+
+#define S_VPD_REQVLD6    0
+#define V_VPD_REQVLD6(x) ((x) << S_VPD_REQVLD6)
+#define F_VPD_REQVLD6    V_VPD_REQVLD6(1U)
+
+#define A_PCIE_CDEBUG_REG_0XB 0xb
+
+#define S_MA_REQDATAVLD    28
+#define M_MA_REQDATAVLD    0xfU
+#define V_MA_REQDATAVLD(x) ((x) << S_MA_REQDATAVLD)
+#define G_MA_REQDATAVLD(x) (((x) >> S_MA_REQDATAVLD) & M_MA_REQDATAVLD)
+
+#define S_MA_REQADDRVLD    27
+#define V_MA_REQADDRVLD(x) ((x) << S_MA_REQADDRVLD)
+#define F_MA_REQADDRVLD    V_MA_REQADDRVLD(1U)
+
+#define S_MA_REQADDRVLD2    26
+#define V_MA_REQADDRVLD2(x) ((x) << S_MA_REQADDRVLD2)
+#define F_MA_REQADDRVLD2    V_MA_REQADDRVLD2(1U)
+
+#define S_MA_RSPDATAVLD2    22
+#define M_MA_RSPDATAVLD2    0xfU
+#define V_MA_RSPDATAVLD2(x) ((x) << S_MA_RSPDATAVLD2)
+#define G_MA_RSPDATAVLD2(x) (((x) >> S_MA_RSPDATAVLD2) & M_MA_RSPDATAVLD2)
+
+#define S_MA_REQADDRVLD3    20
+#define M_MA_REQADDRVLD3    0x3U
+#define V_MA_REQADDRVLD3(x) ((x) << S_MA_REQADDRVLD3)
+#define G_MA_REQADDRVLD3(x) (((x) >> S_MA_REQADDRVLD3) & M_MA_REQADDRVLD3)
+
+#define S_MA_REQADDRVLD4    4
+#define M_MA_REQADDRVLD4    0xffffU
+#define V_MA_REQADDRVLD4(x) ((x) << S_MA_REQADDRVLD4)
+#define G_MA_REQADDRVLD4(x) (((x) >> S_MA_REQADDRVLD4) & M_MA_REQADDRVLD4)
+
+#define S_MA_REQADDRVLD5    3
+#define V_MA_REQADDRVLD5(x) ((x) << S_MA_REQADDRVLD5)
+#define F_MA_REQADDRVLD5    V_MA_REQADDRVLD5(1U)
+
+#define S_MA_REQADDRVLD6    2
+#define V_MA_REQADDRVLD6(x) ((x) << S_MA_REQADDRVLD6)
+#define F_MA_REQADDRVLD6    V_MA_REQADDRVLD6(1U)
+
+#define S_MA_REQADDRRDY    1
+#define V_MA_REQADDRRDY(x) ((x) << S_MA_REQADDRRDY)
+#define F_MA_REQADDRRDY    V_MA_REQADDRRDY(1U)
+
+#define S_MA_REQADDRVLD7    0
+#define V_MA_REQADDRVLD7(x) ((x) << S_MA_REQADDRVLD7)
+#define F_MA_REQADDRVLD7    V_MA_REQADDRVLD7(1U)
+
+#define A_PCIE_CDEBUG_REG_0XC 0xc
+#define A_PCIE_CDEBUG_REG_0XD 0xd
+#define A_PCIE_CDEBUG_REG_0XE 0xe
+#define A_PCIE_CDEBUG_REG_0XF 0xf
+#define A_PCIE_CDEBUG_REG_0X10 0x10
+#define A_PCIE_CDEBUG_REG_0X11 0x11
+#define A_PCIE_CDEBUG_REG_0X12 0x12
+#define A_PCIE_CDEBUG_REG_0X13 0x13
+#define A_PCIE_CDEBUG_REG_0X14 0x14
+#define A_PCIE_CDEBUG_REG_0X15 0x15
+
+#define S_PLM_REQVLD    19
+#define M_PLM_REQVLD    0x1fffU
+#define V_PLM_REQVLD(x) ((x) << S_PLM_REQVLD)
+#define G_PLM_REQVLD(x) (((x) >> S_PLM_REQVLD) & M_PLM_REQVLD)
+
+#define S_PLM_REQVLD2    18
+#define V_PLM_REQVLD2(x) ((x) << S_PLM_REQVLD2)
+#define F_PLM_REQVLD2    V_PLM_REQVLD2(1U)
+
+#define S_PLM_RSPVLD3    17
+#define V_PLM_RSPVLD3(x) ((x) << S_PLM_RSPVLD3)
+#define F_PLM_RSPVLD3    V_PLM_RSPVLD3(1U)
+
+#define S_PLM_REQVLD4    16
+#define V_PLM_REQVLD4(x) ((x) << S_PLM_REQVLD4)
+#define F_PLM_REQVLD4    V_PLM_REQVLD4(1U)
+
+#define S_PLM_REQVLD5    15
+#define V_PLM_REQVLD5(x) ((x) << S_PLM_REQVLD5)
+#define F_PLM_REQVLD5    V_PLM_REQVLD5(1U)
+
+#define S_PLM_REQVLD6    14
+#define V_PLM_REQVLD6(x) ((x) << S_PLM_REQVLD6)
+#define F_PLM_REQVLD6    V_PLM_REQVLD6(1U)
+
+#define S_PLM_REQVLD7    13
+#define V_PLM_REQVLD7(x) ((x) << S_PLM_REQVLD7)
+#define F_PLM_REQVLD7    V_PLM_REQVLD7(1U)
+
+#define S_PLM_REQVLD8    12
+#define V_PLM_REQVLD8(x) ((x) << S_PLM_REQVLD8)
+#define F_PLM_REQVLD8    V_PLM_REQVLD8(1U)
+
+#define S_PLM_REQVLD9    4
+#define M_PLM_REQVLD9    0xffU
+#define V_PLM_REQVLD9(x) ((x) << S_PLM_REQVLD9)
+#define G_PLM_REQVLD9(x) (((x) >> S_PLM_REQVLD9) & M_PLM_REQVLD9)
+
+#define S_PLM_REQVLDA    1
+#define M_PLM_REQVLDA    0x7U
+#define V_PLM_REQVLDA(x) ((x) << S_PLM_REQVLDA)
+#define G_PLM_REQVLDA(x) (((x) >> S_PLM_REQVLDA) & M_PLM_REQVLDA)
+
+#define S_PLM_REQVLDB    0
+#define V_PLM_REQVLDB(x) ((x) << S_PLM_REQVLDB)
+#define F_PLM_REQVLDB    V_PLM_REQVLDB(1U)
+
+#define A_PCIE_CDEBUG_REG_0X16 0x16
+#define A_PCIE_CDEBUG_REG_0X17 0x17
+#define A_PCIE_CDEBUG_REG_0X18 0x18
+#define A_PCIE_CDEBUG_REG_0X19 0x19
+#define A_PCIE_CDEBUG_REG_0X1A 0x1a
+#define A_PCIE_CDEBUG_REG_0X1B 0x1b
+#define A_PCIE_CDEBUG_REG_0X1C 0x1c
+#define A_PCIE_CDEBUG_REG_0X1D 0x1d
+#define A_PCIE_CDEBUG_REG_0X1E 0x1e
+#define A_PCIE_CDEBUG_REG_0X1F 0x1f
+#define A_PCIE_CDEBUG_REG_0X20 0x20
+#define A_PCIE_CDEBUG_REG_0X21 0x21
+#define A_PCIE_CDEBUG_REG_0X22 0x22
+#define A_PCIE_CDEBUG_REG_0X23 0x23
+#define A_PCIE_CDEBUG_REG_0X24 0x24
+#define A_PCIE_CDEBUG_REG_0X25 0x25
+#define A_PCIE_CDEBUG_REG_0X26 0x26
+#define A_PCIE_CDEBUG_REG_0X27 0x27
+#define A_PCIE_CDEBUG_REG_0X28 0x28
+#define A_PCIE_CDEBUG_REG_0X29 0x29
+#define A_PCIE_CDEBUG_REG_0X2A 0x2a
+#define A_PCIE_CDEBUG_REG_0X2B 0x2b
+#define A_PCIE_CDEBUG_REG_0X2C 0x2c
+#define A_PCIE_CDEBUG_REG_0X2D 0x2d
+#define A_PCIE_CDEBUG_REG_0X2E 0x2e
+#define A_PCIE_CDEBUG_REG_0X2F 0x2f
+#define A_PCIE_CDEBUG_REG_0X30 0x30
+#define A_PCIE_CDEBUG_REG_0X31 0x31
+#define A_PCIE_CDEBUG_REG_0X32 0x32
+#define A_PCIE_CDEBUG_REG_0X33 0x33
+#define A_PCIE_CDEBUG_REG_0X34 0x34
+#define A_PCIE_CDEBUG_REG_0X35 0x35
+#define A_PCIE_CDEBUG_REG_0X36 0x36
+#define A_PCIE_CDEBUG_REG_0X37 0x37
+
 /* registers for module DBG */
 #define DBG_BASE_ADDR 0x6000
 
@@ -3490,6 +11534,11 @@
 
 #define A_DBG_DBG1_CFG 0x6008
 #define A_DBG_DBG1_EN 0x600c
+
+#define S_CLK_EN_ON_DBG1    20
+#define V_CLK_EN_ON_DBG1(x) ((x) << S_CLK_EN_ON_DBG1)
+#define F_CLK_EN_ON_DBG1    V_CLK_EN_ON_DBG1(1U)
+
 #define A_DBG_GPIO_EN 0x6010
 
 #define S_GPIO15_OEN    31
@@ -3856,6 +11905,22 @@
 #define V_GPIO0(x) ((x) << S_GPIO0)
 #define F_GPIO0    V_GPIO0(1U)
 
+#define S_GPIO19    29
+#define V_GPIO19(x) ((x) << S_GPIO19)
+#define F_GPIO19    V_GPIO19(1U)
+
+#define S_GPIO18    28
+#define V_GPIO18(x) ((x) << S_GPIO18)
+#define F_GPIO18    V_GPIO18(1U)
+
+#define S_GPIO17    27
+#define V_GPIO17(x) ((x) << S_GPIO17)
+#define F_GPIO17    V_GPIO17(1U)
+
+#define S_GPIO16    26
+#define V_GPIO16(x) ((x) << S_GPIO16)
+#define F_GPIO16    V_GPIO16(1U)
+
 #define A_DBG_INT_CAUSE 0x601c
 
 #define S_IBM_FDL_FAIL_INT_CAUSE    25
@@ -4029,6 +12094,22 @@
 #define V_GPIO0_ACT_LOW(x) ((x) << S_GPIO0_ACT_LOW)
 #define F_GPIO0_ACT_LOW    V_GPIO0_ACT_LOW(1U)
 
+#define S_GPIO19_ACT_LOW    25
+#define V_GPIO19_ACT_LOW(x) ((x) << S_GPIO19_ACT_LOW)
+#define F_GPIO19_ACT_LOW    V_GPIO19_ACT_LOW(1U)
+
+#define S_GPIO18_ACT_LOW    24
+#define V_GPIO18_ACT_LOW(x) ((x) << S_GPIO18_ACT_LOW)
+#define F_GPIO18_ACT_LOW    V_GPIO18_ACT_LOW(1U)
+
+#define S_GPIO17_ACT_LOW    23
+#define V_GPIO17_ACT_LOW(x) ((x) << S_GPIO17_ACT_LOW)
+#define F_GPIO17_ACT_LOW    V_GPIO17_ACT_LOW(1U)
+
+#define S_GPIO16_ACT_LOW    22
+#define V_GPIO16_ACT_LOW(x) ((x) << S_GPIO16_ACT_LOW)
+#define F_GPIO16_ACT_LOW    V_GPIO16_ACT_LOW(1U)
+
 #define A_DBG_EFUSE_BYTE0_3 0x6034
 #define A_DBG_EFUSE_BYTE4_7 0x6038
 #define A_DBG_EFUSE_BYTE8_11 0x603c
@@ -4294,6 +12375,16 @@
 #define V_KR_OCLK_MUXSEL(x) ((x) << S_KR_OCLK_MUXSEL)
 #define G_KR_OCLK_MUXSEL(x) (((x) >> S_KR_OCLK_MUXSEL) & M_KR_OCLK_MUXSEL)
 
+#define S_T5_P_OCLK_MUXSEL    13
+#define M_T5_P_OCLK_MUXSEL    0xfU
+#define V_T5_P_OCLK_MUXSEL(x) ((x) << S_T5_P_OCLK_MUXSEL)
+#define G_T5_P_OCLK_MUXSEL(x) (((x) >> S_T5_P_OCLK_MUXSEL) & M_T5_P_OCLK_MUXSEL)
+
+#define S_T6_P_OCLK_MUXSEL    13
+#define M_T6_P_OCLK_MUXSEL    0xfU
+#define V_T6_P_OCLK_MUXSEL(x) ((x) << S_T6_P_OCLK_MUXSEL)
+#define G_T6_P_OCLK_MUXSEL(x) (((x) >> S_T6_P_OCLK_MUXSEL) & M_T6_P_OCLK_MUXSEL)
+
 #define A_DBG_TRACE0_CONF_COMPREG0 0x6060
 #define A_DBG_TRACE0_CONF_COMPREG1 0x6064
 #define A_DBG_TRACE1_CONF_COMPREG0 0x6068
@@ -4367,6 +12458,26 @@
 #define V_RD_EN0(x) ((x) << S_RD_EN0)
 #define F_RD_EN0    V_RD_EN0(1U)
 
+#define S_T5_RD_ADDR1    11
+#define M_T5_RD_ADDR1    0x1ffU
+#define V_T5_RD_ADDR1(x) ((x) << S_T5_RD_ADDR1)
+#define G_T5_RD_ADDR1(x) (((x) >> S_T5_RD_ADDR1) & M_T5_RD_ADDR1)
+
+#define S_T5_RD_ADDR0    2
+#define M_T5_RD_ADDR0    0x1ffU
+#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0)
+#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0)
+
+#define S_T6_RD_ADDR1    11
+#define M_T6_RD_ADDR1    0x1ffU
+#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1)
+#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1)
+
+#define S_T6_RD_ADDR0    2
+#define M_T6_RD_ADDR0    0x1ffU
+#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0)
+#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0)
+
 #define A_DBG_TRACE_WRADDR 0x6090
 
 #define S_WR_POINTER_ADDR1    16
@@ -4379,8 +12490,1030 @@
 #define V_WR_POINTER_ADDR0(x) ((x) << S_WR_POINTER_ADDR0)
 #define G_WR_POINTER_ADDR0(x) (((x) >> S_WR_POINTER_ADDR0) & M_WR_POINTER_ADDR0)
 
+#define S_T5_WR_POINTER_ADDR1    16
+#define M_T5_WR_POINTER_ADDR1    0x1ffU
+#define V_T5_WR_POINTER_ADDR1(x) ((x) << S_T5_WR_POINTER_ADDR1)
+#define G_T5_WR_POINTER_ADDR1(x) (((x) >> S_T5_WR_POINTER_ADDR1) & M_T5_WR_POINTER_ADDR1)
+
+#define S_T5_WR_POINTER_ADDR0    0
+#define M_T5_WR_POINTER_ADDR0    0x1ffU
+#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0)
+#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0)
+
+#define S_T6_WR_POINTER_ADDR1    16
+#define M_T6_WR_POINTER_ADDR1    0x1ffU
+#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1)
+#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1)
+
+#define S_T6_WR_POINTER_ADDR0    0
+#define M_T6_WR_POINTER_ADDR0    0x1ffU
+#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0)
+#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0)
+
 #define A_DBG_TRACE0_DATA_OUT 0x6094
 #define A_DBG_TRACE1_DATA_OUT 0x6098
+#define A_DBG_FUSE_SENSE_DONE 0x609c
+
+#define S_STATIC_JTAG_VERSIONNR    5
+#define M_STATIC_JTAG_VERSIONNR    0xfU
+#define V_STATIC_JTAG_VERSIONNR(x) ((x) << S_STATIC_JTAG_VERSIONNR)
+#define G_STATIC_JTAG_VERSIONNR(x) (((x) >> S_STATIC_JTAG_VERSIONNR) & M_STATIC_JTAG_VERSIONNR)
+
+#define S_UNQ0    1
+#define M_UNQ0    0xfU
+#define V_UNQ0(x) ((x) << S_UNQ0)
+#define G_UNQ0(x) (((x) >> S_UNQ0) & M_UNQ0)
+
+#define S_FUSE_DONE_SENSE    0
+#define V_FUSE_DONE_SENSE(x) ((x) << S_FUSE_DONE_SENSE)
+#define F_FUSE_DONE_SENSE    V_FUSE_DONE_SENSE(1U)
+
+#define A_DBG_TVSENSE_EN 0x60a8
+
+#define S_MCIMPED1_OUT    29
+#define V_MCIMPED1_OUT(x) ((x) << S_MCIMPED1_OUT)
+#define F_MCIMPED1_OUT    V_MCIMPED1_OUT(1U)
+
+#define S_MCIMPED2_OUT    28
+#define V_MCIMPED2_OUT(x) ((x) << S_MCIMPED2_OUT)
+#define F_MCIMPED2_OUT    V_MCIMPED2_OUT(1U)
+
+#define S_TVSENSE_SNSOUT    17
+#define M_TVSENSE_SNSOUT    0x1ffU
+#define V_TVSENSE_SNSOUT(x) ((x) << S_TVSENSE_SNSOUT)
+#define G_TVSENSE_SNSOUT(x) (((x) >> S_TVSENSE_SNSOUT) & M_TVSENSE_SNSOUT)
+
+#define S_TVSENSE_OUTPUTVALID    16
+#define V_TVSENSE_OUTPUTVALID(x) ((x) << S_TVSENSE_OUTPUTVALID)
+#define F_TVSENSE_OUTPUTVALID    V_TVSENSE_OUTPUTVALID(1U)
+
+#define S_TVSENSE_SLEEP    10
+#define V_TVSENSE_SLEEP(x) ((x) << S_TVSENSE_SLEEP)
+#define F_TVSENSE_SLEEP    V_TVSENSE_SLEEP(1U)
+
+#define S_TVSENSE_SENSV    9
+#define V_TVSENSE_SENSV(x) ((x) << S_TVSENSE_SENSV)
+#define F_TVSENSE_SENSV    V_TVSENSE_SENSV(1U)
+
+#define S_TVSENSE_RST    8
+#define V_TVSENSE_RST(x) ((x) << S_TVSENSE_RST)
+#define F_TVSENSE_RST    V_TVSENSE_RST(1U)
+
+#define S_TVSENSE_RATIO    0
+#define M_TVSENSE_RATIO    0xffU
+#define V_TVSENSE_RATIO(x) ((x) << S_TVSENSE_RATIO)
+#define G_TVSENSE_RATIO(x) (((x) >> S_TVSENSE_RATIO) & M_TVSENSE_RATIO)
+
+#define S_T6_TVSENSE_SLEEP    11
+#define V_T6_TVSENSE_SLEEP(x) ((x) << S_T6_TVSENSE_SLEEP)
+#define F_T6_TVSENSE_SLEEP    V_T6_TVSENSE_SLEEP(1U)
+
+#define S_T6_TVSENSE_SENSV    10
+#define V_T6_TVSENSE_SENSV(x) ((x) << S_T6_TVSENSE_SENSV)
+#define F_T6_TVSENSE_SENSV    V_T6_TVSENSE_SENSV(1U)
+
+#define S_T6_TVSENSE_RST    9
+#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST)
+#define F_T6_TVSENSE_RST    V_T6_TVSENSE_RST(1U)
+
+#define A_DBG_CUST_EFUSE_OUT_EN 0x60ac
+#define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0
+#define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4
+
+#define S_DBG_FEENABLE    29
+#define V_DBG_FEENABLE(x) ((x) << S_DBG_FEENABLE)
+#define F_DBG_FEENABLE    V_DBG_FEENABLE(1U)
+
+#define S_DBG_FEF    23
+#define M_DBG_FEF    0x3fU
+#define V_DBG_FEF(x) ((x) << S_DBG_FEF)
+#define G_DBG_FEF(x) (((x) >> S_DBG_FEF) & M_DBG_FEF)
+
+#define S_DBG_FEMIMICN    22
+#define V_DBG_FEMIMICN(x) ((x) << S_DBG_FEMIMICN)
+#define F_DBG_FEMIMICN    V_DBG_FEMIMICN(1U)
+
+#define S_DBG_FEGATEC    21
+#define V_DBG_FEGATEC(x) ((x) << S_DBG_FEGATEC)
+#define F_DBG_FEGATEC    V_DBG_FEGATEC(1U)
+
+#define S_DBG_FEPROGP    20
+#define V_DBG_FEPROGP(x) ((x) << S_DBG_FEPROGP)
+#define F_DBG_FEPROGP    V_DBG_FEPROGP(1U)
+
+#define S_DBG_FEREADCLK    19
+#define V_DBG_FEREADCLK(x) ((x) << S_DBG_FEREADCLK)
+#define F_DBG_FEREADCLK    V_DBG_FEREADCLK(1U)
+
+#define S_DBG_FERSEL    3
+#define M_DBG_FERSEL    0xffffU
+#define V_DBG_FERSEL(x) ((x) << S_DBG_FERSEL)
+#define G_DBG_FERSEL(x) (((x) >> S_DBG_FERSEL) & M_DBG_FERSEL)
+
+#define S_DBG_FETIME    0
+#define M_DBG_FETIME    0x7U
+#define V_DBG_FETIME(x) ((x) << S_DBG_FETIME)
+#define G_DBG_FETIME(x) (((x) >> S_DBG_FETIME) & M_DBG_FETIME)
+
+#define A_DBG_T5_STATIC_M_PLL_CONF1 0x60b8
+
+#define S_T5_STATIC_M_PLL_MULTFRAC    8
+#define M_T5_STATIC_M_PLL_MULTFRAC    0xffffffU
+#define V_T5_STATIC_M_PLL_MULTFRAC(x) ((x) << S_T5_STATIC_M_PLL_MULTFRAC)
+#define G_T5_STATIC_M_PLL_MULTFRAC(x) (((x) >> S_T5_STATIC_M_PLL_MULTFRAC) & M_T5_STATIC_M_PLL_MULTFRAC)
+
+#define S_T5_STATIC_M_PLL_FFSLEWRATE    0
+#define M_T5_STATIC_M_PLL_FFSLEWRATE    0xffU
+#define V_T5_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_M_PLL_FFSLEWRATE)
+#define G_T5_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_M_PLL_FFSLEWRATE) & M_T5_STATIC_M_PLL_FFSLEWRATE)
+
+#define A_DBG_STATIC_M_PLL_CONF1 0x60b8
+
+#define S_STATIC_M_PLL_MULTFRAC    8
+#define M_STATIC_M_PLL_MULTFRAC    0xffffffU
+#define V_STATIC_M_PLL_MULTFRAC(x) ((x) << S_STATIC_M_PLL_MULTFRAC)
+#define G_STATIC_M_PLL_MULTFRAC(x) (((x) >> S_STATIC_M_PLL_MULTFRAC) & M_STATIC_M_PLL_MULTFRAC)
+
+#define S_STATIC_M_PLL_FFSLEWRATE    0
+#define M_STATIC_M_PLL_FFSLEWRATE    0xffU
+#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE)
+#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE)
+
+#define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc
+
+#define S_T5_STATIC_M_PLL_DCO_BYPASS    23
+#define V_T5_STATIC_M_PLL_DCO_BYPASS(x) ((x) << S_T5_STATIC_M_PLL_DCO_BYPASS)
+#define F_T5_STATIC_M_PLL_DCO_BYPASS    V_T5_STATIC_M_PLL_DCO_BYPASS(1U)
+
+#define S_T5_STATIC_M_PLL_SDORDER    21
+#define M_T5_STATIC_M_PLL_SDORDER    0x3U
+#define V_T5_STATIC_M_PLL_SDORDER(x) ((x) << S_T5_STATIC_M_PLL_SDORDER)
+#define G_T5_STATIC_M_PLL_SDORDER(x) (((x) >> S_T5_STATIC_M_PLL_SDORDER) & M_T5_STATIC_M_PLL_SDORDER)
+
+#define S_T5_STATIC_M_PLL_FFENABLE    20
+#define V_T5_STATIC_M_PLL_FFENABLE(x) ((x) << S_T5_STATIC_M_PLL_FFENABLE)
+#define F_T5_STATIC_M_PLL_FFENABLE    V_T5_STATIC_M_PLL_FFENABLE(1U)
+
+#define S_T5_STATIC_M_PLL_STOPCLKB    19
+#define V_T5_STATIC_M_PLL_STOPCLKB(x) ((x) << S_T5_STATIC_M_PLL_STOPCLKB)
+#define F_T5_STATIC_M_PLL_STOPCLKB    V_T5_STATIC_M_PLL_STOPCLKB(1U)
+
+#define S_T5_STATIC_M_PLL_STOPCLKA    18
+#define V_T5_STATIC_M_PLL_STOPCLKA(x) ((x) << S_T5_STATIC_M_PLL_STOPCLKA)
+#define F_T5_STATIC_M_PLL_STOPCLKA    V_T5_STATIC_M_PLL_STOPCLKA(1U)
+
+#define S_T5_STATIC_M_PLL_SLEEP    17
+#define V_T5_STATIC_M_PLL_SLEEP(x) ((x) << S_T5_STATIC_M_PLL_SLEEP)
+#define F_T5_STATIC_M_PLL_SLEEP    V_T5_STATIC_M_PLL_SLEEP(1U)
+
+#define S_T5_STATIC_M_PLL_BYPASS    16
+#define V_T5_STATIC_M_PLL_BYPASS(x) ((x) << S_T5_STATIC_M_PLL_BYPASS)
+#define F_T5_STATIC_M_PLL_BYPASS    V_T5_STATIC_M_PLL_BYPASS(1U)
+
+#define S_T5_STATIC_M_PLL_LOCKTUNE    0
+#define M_T5_STATIC_M_PLL_LOCKTUNE    0xffffU
+#define V_T5_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_M_PLL_LOCKTUNE)
+#define G_T5_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_M_PLL_LOCKTUNE) & M_T5_STATIC_M_PLL_LOCKTUNE)
+
+#define A_DBG_STATIC_M_PLL_CONF2 0x60bc
+
+#define S_T6_STATIC_M_PLL_PREDIV    24
+#define M_T6_STATIC_M_PLL_PREDIV    0x3fU
+#define V_T6_STATIC_M_PLL_PREDIV(x) ((x) << S_T6_STATIC_M_PLL_PREDIV)
+#define G_T6_STATIC_M_PLL_PREDIV(x) (((x) >> S_T6_STATIC_M_PLL_PREDIV) & M_T6_STATIC_M_PLL_PREDIV)
+
+#define S_STATIC_M_PLL_DCO_BYPASS    23
+#define V_STATIC_M_PLL_DCO_BYPASS(x) ((x) << S_STATIC_M_PLL_DCO_BYPASS)
+#define F_STATIC_M_PLL_DCO_BYPASS    V_STATIC_M_PLL_DCO_BYPASS(1U)
+
+#define S_STATIC_M_PLL_SDORDER    21
+#define M_STATIC_M_PLL_SDORDER    0x3U
+#define V_STATIC_M_PLL_SDORDER(x) ((x) << S_STATIC_M_PLL_SDORDER)
+#define G_STATIC_M_PLL_SDORDER(x) (((x) >> S_STATIC_M_PLL_SDORDER) & M_STATIC_M_PLL_SDORDER)
+
+#define S_STATIC_M_PLL_FFENABLE    20
+#define V_STATIC_M_PLL_FFENABLE(x) ((x) << S_STATIC_M_PLL_FFENABLE)
+#define F_STATIC_M_PLL_FFENABLE    V_STATIC_M_PLL_FFENABLE(1U)
+
+#define S_STATIC_M_PLL_STOPCLKB    19
+#define V_STATIC_M_PLL_STOPCLKB(x) ((x) << S_STATIC_M_PLL_STOPCLKB)
+#define F_STATIC_M_PLL_STOPCLKB    V_STATIC_M_PLL_STOPCLKB(1U)
+
+#define S_STATIC_M_PLL_STOPCLKA    18
+#define V_STATIC_M_PLL_STOPCLKA(x) ((x) << S_STATIC_M_PLL_STOPCLKA)
+#define F_STATIC_M_PLL_STOPCLKA    V_STATIC_M_PLL_STOPCLKA(1U)
+
+#define S_T6_STATIC_M_PLL_SLEEP    17
+#define V_T6_STATIC_M_PLL_SLEEP(x) ((x) << S_T6_STATIC_M_PLL_SLEEP)
+#define F_T6_STATIC_M_PLL_SLEEP    V_T6_STATIC_M_PLL_SLEEP(1U)
+
+#define S_T6_STATIC_M_PLL_BYPASS    16
+#define V_T6_STATIC_M_PLL_BYPASS(x) ((x) << S_T6_STATIC_M_PLL_BYPASS)
+#define F_T6_STATIC_M_PLL_BYPASS    V_T6_STATIC_M_PLL_BYPASS(1U)
+
+#define S_STATIC_M_PLL_LOCKTUNE    0
+#define M_STATIC_M_PLL_LOCKTUNE    0x1fU
+#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE)
+#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE)
+
+#define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0
+
+#define S_T5_STATIC_M_PLL_MULTPRE    30
+#define M_T5_STATIC_M_PLL_MULTPRE    0x3U
+#define V_T5_STATIC_M_PLL_MULTPRE(x) ((x) << S_T5_STATIC_M_PLL_MULTPRE)
+#define G_T5_STATIC_M_PLL_MULTPRE(x) (((x) >> S_T5_STATIC_M_PLL_MULTPRE) & M_T5_STATIC_M_PLL_MULTPRE)
+
+#define S_T5_STATIC_M_PLL_LOCKSEL    28
+#define M_T5_STATIC_M_PLL_LOCKSEL    0x3U
+#define V_T5_STATIC_M_PLL_LOCKSEL(x) ((x) << S_T5_STATIC_M_PLL_LOCKSEL)
+#define G_T5_STATIC_M_PLL_LOCKSEL(x) (((x) >> S_T5_STATIC_M_PLL_LOCKSEL) & M_T5_STATIC_M_PLL_LOCKSEL)
+
+#define S_T5_STATIC_M_PLL_FFTUNE    12
+#define M_T5_STATIC_M_PLL_FFTUNE    0xffffU
+#define V_T5_STATIC_M_PLL_FFTUNE(x) ((x) << S_T5_STATIC_M_PLL_FFTUNE)
+#define G_T5_STATIC_M_PLL_FFTUNE(x) (((x) >> S_T5_STATIC_M_PLL_FFTUNE) & M_T5_STATIC_M_PLL_FFTUNE)
+
+#define S_T5_STATIC_M_PLL_RANGEPRE    10
+#define M_T5_STATIC_M_PLL_RANGEPRE    0x3U
+#define V_T5_STATIC_M_PLL_RANGEPRE(x) ((x) << S_T5_STATIC_M_PLL_RANGEPRE)
+#define G_T5_STATIC_M_PLL_RANGEPRE(x) (((x) >> S_T5_STATIC_M_PLL_RANGEPRE) & M_T5_STATIC_M_PLL_RANGEPRE)
+
+#define S_T5_STATIC_M_PLL_RANGEB    5
+#define M_T5_STATIC_M_PLL_RANGEB    0x1fU
+#define V_T5_STATIC_M_PLL_RANGEB(x) ((x) << S_T5_STATIC_M_PLL_RANGEB)
+#define G_T5_STATIC_M_PLL_RANGEB(x) (((x) >> S_T5_STATIC_M_PLL_RANGEB) & M_T5_STATIC_M_PLL_RANGEB)
+
+#define S_T5_STATIC_M_PLL_RANGEA    0
+#define M_T5_STATIC_M_PLL_RANGEA    0x1fU
+#define V_T5_STATIC_M_PLL_RANGEA(x) ((x) << S_T5_STATIC_M_PLL_RANGEA)
+#define G_T5_STATIC_M_PLL_RANGEA(x) (((x) >> S_T5_STATIC_M_PLL_RANGEA) & M_T5_STATIC_M_PLL_RANGEA)
+
+#define A_DBG_STATIC_M_PLL_CONF3 0x60c0
+
+#define S_STATIC_M_PLL_MULTPRE    30
+#define M_STATIC_M_PLL_MULTPRE    0x3U
+#define V_STATIC_M_PLL_MULTPRE(x) ((x) << S_STATIC_M_PLL_MULTPRE)
+#define G_STATIC_M_PLL_MULTPRE(x) (((x) >> S_STATIC_M_PLL_MULTPRE) & M_STATIC_M_PLL_MULTPRE)
+
+#define S_STATIC_M_PLL_LOCKSEL    28
+#define V_STATIC_M_PLL_LOCKSEL(x) ((x) << S_STATIC_M_PLL_LOCKSEL)
+#define F_STATIC_M_PLL_LOCKSEL    V_STATIC_M_PLL_LOCKSEL(1U)
+
+#define S_STATIC_M_PLL_FFTUNE    12
+#define M_STATIC_M_PLL_FFTUNE    0xffffU
+#define V_STATIC_M_PLL_FFTUNE(x) ((x) << S_STATIC_M_PLL_FFTUNE)
+#define G_STATIC_M_PLL_FFTUNE(x) (((x) >> S_STATIC_M_PLL_FFTUNE) & M_STATIC_M_PLL_FFTUNE)
+
+#define S_STATIC_M_PLL_RANGEPRE    10
+#define M_STATIC_M_PLL_RANGEPRE    0x3U
+#define V_STATIC_M_PLL_RANGEPRE(x) ((x) << S_STATIC_M_PLL_RANGEPRE)
+#define G_STATIC_M_PLL_RANGEPRE(x) (((x) >> S_STATIC_M_PLL_RANGEPRE) & M_STATIC_M_PLL_RANGEPRE)
+
+#define S_T6_STATIC_M_PLL_RANGEB    5
+#define M_T6_STATIC_M_PLL_RANGEB    0x1fU
+#define V_T6_STATIC_M_PLL_RANGEB(x) ((x) << S_T6_STATIC_M_PLL_RANGEB)
+#define G_T6_STATIC_M_PLL_RANGEB(x) (((x) >> S_T6_STATIC_M_PLL_RANGEB) & M_T6_STATIC_M_PLL_RANGEB)
+
+#define S_T6_STATIC_M_PLL_RANGEA    0
+#define M_T6_STATIC_M_PLL_RANGEA    0x1fU
+#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA)
+#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA)
+
+#define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8
+
+#define S_T5_STATIC_M_PLL_VCVTUNE    24
+#define M_T5_STATIC_M_PLL_VCVTUNE    0x7U
+#define V_T5_STATIC_M_PLL_VCVTUNE(x) ((x) << S_T5_STATIC_M_PLL_VCVTUNE)
+#define G_T5_STATIC_M_PLL_VCVTUNE(x) (((x) >> S_T5_STATIC_M_PLL_VCVTUNE) & M_T5_STATIC_M_PLL_VCVTUNE)
+
+#define S_T5_STATIC_M_PLL_RESET    23
+#define V_T5_STATIC_M_PLL_RESET(x) ((x) << S_T5_STATIC_M_PLL_RESET)
+#define F_T5_STATIC_M_PLL_RESET    V_T5_STATIC_M_PLL_RESET(1U)
+
+#define S_T5_STATIC_MPLL_REFCLK_SEL    22
+#define V_T5_STATIC_MPLL_REFCLK_SEL(x) ((x) << S_T5_STATIC_MPLL_REFCLK_SEL)
+#define F_T5_STATIC_MPLL_REFCLK_SEL    V_T5_STATIC_MPLL_REFCLK_SEL(1U)
+
+#define S_T5_STATIC_M_PLL_LFTUNE_32_40    13
+#define M_T5_STATIC_M_PLL_LFTUNE_32_40    0x1ffU
+#define V_T5_STATIC_M_PLL_LFTUNE_32_40(x) ((x) << S_T5_STATIC_M_PLL_LFTUNE_32_40)
+#define G_T5_STATIC_M_PLL_LFTUNE_32_40(x) (((x) >> S_T5_STATIC_M_PLL_LFTUNE_32_40) & M_T5_STATIC_M_PLL_LFTUNE_32_40)
+
+#define S_T5_STATIC_M_PLL_PREDIV    8
+#define M_T5_STATIC_M_PLL_PREDIV    0x1fU
+#define V_T5_STATIC_M_PLL_PREDIV(x) ((x) << S_T5_STATIC_M_PLL_PREDIV)
+#define G_T5_STATIC_M_PLL_PREDIV(x) (((x) >> S_T5_STATIC_M_PLL_PREDIV) & M_T5_STATIC_M_PLL_PREDIV)
+
+#define S_T5_STATIC_M_PLL_MULT    0
+#define M_T5_STATIC_M_PLL_MULT    0xffU
+#define V_T5_STATIC_M_PLL_MULT(x) ((x) << S_T5_STATIC_M_PLL_MULT)
+#define G_T5_STATIC_M_PLL_MULT(x) (((x) >> S_T5_STATIC_M_PLL_MULT) & M_T5_STATIC_M_PLL_MULT)
+
+#define A_DBG_STATIC_M_PLL_CONF5 0x60c8
+
+#define S_STATIC_M_PLL_VCVTUNE    24
+#define M_STATIC_M_PLL_VCVTUNE    0x7U
+#define V_STATIC_M_PLL_VCVTUNE(x) ((x) << S_STATIC_M_PLL_VCVTUNE)
+#define G_STATIC_M_PLL_VCVTUNE(x) (((x) >> S_STATIC_M_PLL_VCVTUNE) & M_STATIC_M_PLL_VCVTUNE)
+
+#define S_T6_STATIC_M_PLL_RESET    23
+#define V_T6_STATIC_M_PLL_RESET(x) ((x) << S_T6_STATIC_M_PLL_RESET)
+#define F_T6_STATIC_M_PLL_RESET    V_T6_STATIC_M_PLL_RESET(1U)
+
+#define S_STATIC_MPLL_REFCLK_SEL    22
+#define V_STATIC_MPLL_REFCLK_SEL(x) ((x) << S_STATIC_MPLL_REFCLK_SEL)
+#define F_STATIC_MPLL_REFCLK_SEL    V_STATIC_MPLL_REFCLK_SEL(1U)
+
+#define S_STATIC_M_PLL_LFTUNE_32_40    13
+#define M_STATIC_M_PLL_LFTUNE_32_40    0x1ffU
+#define V_STATIC_M_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_M_PLL_LFTUNE_32_40)
+#define G_STATIC_M_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_M_PLL_LFTUNE_32_40) & M_STATIC_M_PLL_LFTUNE_32_40)
+
+#define S_T6_STATIC_M_PLL_MULT    0
+#define M_T6_STATIC_M_PLL_MULT    0xffU
+#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT)
+#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT)
+
+#define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc
+
+#define S_T5_STATIC_PHY0RECRST_    5
+#define V_T5_STATIC_PHY0RECRST_(x) ((x) << S_T5_STATIC_PHY0RECRST_)
+#define F_T5_STATIC_PHY0RECRST_    V_T5_STATIC_PHY0RECRST_(1U)
+
+#define S_T5_STATIC_PHY1RECRST_    4
+#define V_T5_STATIC_PHY1RECRST_(x) ((x) << S_T5_STATIC_PHY1RECRST_)
+#define F_T5_STATIC_PHY1RECRST_    V_T5_STATIC_PHY1RECRST_(1U)
+
+#define S_T5_STATIC_SWMC0RST_    3
+#define V_T5_STATIC_SWMC0RST_(x) ((x) << S_T5_STATIC_SWMC0RST_)
+#define F_T5_STATIC_SWMC0RST_    V_T5_STATIC_SWMC0RST_(1U)
+
+#define S_T5_STATIC_SWMC0CFGRST_    2
+#define V_T5_STATIC_SWMC0CFGRST_(x) ((x) << S_T5_STATIC_SWMC0CFGRST_)
+#define F_T5_STATIC_SWMC0CFGRST_    V_T5_STATIC_SWMC0CFGRST_(1U)
+
+#define S_T5_STATIC_SWMC1RST_    1
+#define V_T5_STATIC_SWMC1RST_(x) ((x) << S_T5_STATIC_SWMC1RST_)
+#define F_T5_STATIC_SWMC1RST_    V_T5_STATIC_SWMC1RST_(1U)
+
+#define S_T5_STATIC_SWMC1CFGRST_    0
+#define V_T5_STATIC_SWMC1CFGRST_(x) ((x) << S_T5_STATIC_SWMC1CFGRST_)
+#define F_T5_STATIC_SWMC1CFGRST_    V_T5_STATIC_SWMC1CFGRST_(1U)
+
+#define A_DBG_STATIC_M_PLL_CONF6 0x60cc
+
+#define S_STATIC_M_PLL_DIVCHANGE    30
+#define V_STATIC_M_PLL_DIVCHANGE(x) ((x) << S_STATIC_M_PLL_DIVCHANGE)
+#define F_STATIC_M_PLL_DIVCHANGE    V_STATIC_M_PLL_DIVCHANGE(1U)
+
+#define S_STATIC_M_PLL_FRAMESTOP    29
+#define V_STATIC_M_PLL_FRAMESTOP(x) ((x) << S_STATIC_M_PLL_FRAMESTOP)
+#define F_STATIC_M_PLL_FRAMESTOP    V_STATIC_M_PLL_FRAMESTOP(1U)
+
+#define S_STATIC_M_PLL_FASTSTOP    28
+#define V_STATIC_M_PLL_FASTSTOP(x) ((x) << S_STATIC_M_PLL_FASTSTOP)
+#define F_STATIC_M_PLL_FASTSTOP    V_STATIC_M_PLL_FASTSTOP(1U)
+
+#define S_STATIC_M_PLL_FFBYPASS    27
+#define V_STATIC_M_PLL_FFBYPASS(x) ((x) << S_STATIC_M_PLL_FFBYPASS)
+#define F_STATIC_M_PLL_FFBYPASS    V_STATIC_M_PLL_FFBYPASS(1U)
+
+#define S_STATIC_M_PLL_STARTUP    25
+#define M_STATIC_M_PLL_STARTUP    0x3U
+#define V_STATIC_M_PLL_STARTUP(x) ((x) << S_STATIC_M_PLL_STARTUP)
+#define G_STATIC_M_PLL_STARTUP(x) (((x) >> S_STATIC_M_PLL_STARTUP) & M_STATIC_M_PLL_STARTUP)
+
+#define S_STATIC_M_PLL_VREGTUNE    6
+#define M_STATIC_M_PLL_VREGTUNE    0x7ffffU
+#define V_STATIC_M_PLL_VREGTUNE(x) ((x) << S_STATIC_M_PLL_VREGTUNE)
+#define G_STATIC_M_PLL_VREGTUNE(x) (((x) >> S_STATIC_M_PLL_VREGTUNE) & M_STATIC_M_PLL_VREGTUNE)
+
+#define S_STATIC_PHY0RECRST_    5
+#define V_STATIC_PHY0RECRST_(x) ((x) << S_STATIC_PHY0RECRST_)
+#define F_STATIC_PHY0RECRST_    V_STATIC_PHY0RECRST_(1U)
+
+#define S_STATIC_PHY1RECRST_    4
+#define V_STATIC_PHY1RECRST_(x) ((x) << S_STATIC_PHY1RECRST_)
+#define F_STATIC_PHY1RECRST_    V_STATIC_PHY1RECRST_(1U)
+
+#define S_STATIC_SWMC0RST_    3
+#define V_STATIC_SWMC0RST_(x) ((x) << S_STATIC_SWMC0RST_)
+#define F_STATIC_SWMC0RST_    V_STATIC_SWMC0RST_(1U)
+
+#define S_STATIC_SWMC0CFGRST_    2
+#define V_STATIC_SWMC0CFGRST_(x) ((x) << S_STATIC_SWMC0CFGRST_)
+#define F_STATIC_SWMC0CFGRST_    V_STATIC_SWMC0CFGRST_(1U)
+
+#define S_STATIC_SWMC1RST_    1
+#define V_STATIC_SWMC1RST_(x) ((x) << S_STATIC_SWMC1RST_)
+#define F_STATIC_SWMC1RST_    V_STATIC_SWMC1RST_(1U)
+
+#define S_STATIC_SWMC1CFGRST_    0
+#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_)
+#define F_STATIC_SWMC1CFGRST_    V_STATIC_SWMC1CFGRST_(1U)
+
+#define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0
+
+#define S_T5_STATIC_C_PLL_MULTFRAC    8
+#define M_T5_STATIC_C_PLL_MULTFRAC    0xffffffU
+#define V_T5_STATIC_C_PLL_MULTFRAC(x) ((x) << S_T5_STATIC_C_PLL_MULTFRAC)
+#define G_T5_STATIC_C_PLL_MULTFRAC(x) (((x) >> S_T5_STATIC_C_PLL_MULTFRAC) & M_T5_STATIC_C_PLL_MULTFRAC)
+
+#define S_T5_STATIC_C_PLL_FFSLEWRATE    0
+#define M_T5_STATIC_C_PLL_FFSLEWRATE    0xffU
+#define V_T5_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_C_PLL_FFSLEWRATE)
+#define G_T5_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_C_PLL_FFSLEWRATE) & M_T5_STATIC_C_PLL_FFSLEWRATE)
+
+#define A_DBG_STATIC_C_PLL_CONF1 0x60d0
+
+#define S_STATIC_C_PLL_MULTFRAC    8
+#define M_STATIC_C_PLL_MULTFRAC    0xffffffU
+#define V_STATIC_C_PLL_MULTFRAC(x) ((x) << S_STATIC_C_PLL_MULTFRAC)
+#define G_STATIC_C_PLL_MULTFRAC(x) (((x) >> S_STATIC_C_PLL_MULTFRAC) & M_STATIC_C_PLL_MULTFRAC)
+
+#define S_STATIC_C_PLL_FFSLEWRATE    0
+#define M_STATIC_C_PLL_FFSLEWRATE    0xffU
+#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE)
+#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE)
+
+#define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4
+
+#define S_T5_STATIC_C_PLL_DCO_BYPASS    23
+#define V_T5_STATIC_C_PLL_DCO_BYPASS(x) ((x) << S_T5_STATIC_C_PLL_DCO_BYPASS)
+#define F_T5_STATIC_C_PLL_DCO_BYPASS    V_T5_STATIC_C_PLL_DCO_BYPASS(1U)
+
+#define S_T5_STATIC_C_PLL_SDORDER    21
+#define M_T5_STATIC_C_PLL_SDORDER    0x3U
+#define V_T5_STATIC_C_PLL_SDORDER(x) ((x) << S_T5_STATIC_C_PLL_SDORDER)
+#define G_T5_STATIC_C_PLL_SDORDER(x) (((x) >> S_T5_STATIC_C_PLL_SDORDER) & M_T5_STATIC_C_PLL_SDORDER)
+
+#define S_T5_STATIC_C_PLL_FFENABLE    20
+#define V_T5_STATIC_C_PLL_FFENABLE(x) ((x) << S_T5_STATIC_C_PLL_FFENABLE)
+#define F_T5_STATIC_C_PLL_FFENABLE    V_T5_STATIC_C_PLL_FFENABLE(1U)
+
+#define S_T5_STATIC_C_PLL_STOPCLKB    19
+#define V_T5_STATIC_C_PLL_STOPCLKB(x) ((x) << S_T5_STATIC_C_PLL_STOPCLKB)
+#define F_T5_STATIC_C_PLL_STOPCLKB    V_T5_STATIC_C_PLL_STOPCLKB(1U)
+
+#define S_T5_STATIC_C_PLL_STOPCLKA    18
+#define V_T5_STATIC_C_PLL_STOPCLKA(x) ((x) << S_T5_STATIC_C_PLL_STOPCLKA)
+#define F_T5_STATIC_C_PLL_STOPCLKA    V_T5_STATIC_C_PLL_STOPCLKA(1U)
+
+#define S_T5_STATIC_C_PLL_SLEEP    17
+#define V_T5_STATIC_C_PLL_SLEEP(x) ((x) << S_T5_STATIC_C_PLL_SLEEP)
+#define F_T5_STATIC_C_PLL_SLEEP    V_T5_STATIC_C_PLL_SLEEP(1U)
+
+#define S_T5_STATIC_C_PLL_BYPASS    16
+#define V_T5_STATIC_C_PLL_BYPASS(x) ((x) << S_T5_STATIC_C_PLL_BYPASS)
+#define F_T5_STATIC_C_PLL_BYPASS    V_T5_STATIC_C_PLL_BYPASS(1U)
+
+#define S_T5_STATIC_C_PLL_LOCKTUNE    0
+#define M_T5_STATIC_C_PLL_LOCKTUNE    0xffffU
+#define V_T5_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_C_PLL_LOCKTUNE)
+#define G_T5_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_C_PLL_LOCKTUNE) & M_T5_STATIC_C_PLL_LOCKTUNE)
+
+#define A_DBG_STATIC_C_PLL_CONF2 0x60d4
+
+#define S_T6_STATIC_C_PLL_PREDIV    26
+#define M_T6_STATIC_C_PLL_PREDIV    0x3fU
+#define V_T6_STATIC_C_PLL_PREDIV(x) ((x) << S_T6_STATIC_C_PLL_PREDIV)
+#define G_T6_STATIC_C_PLL_PREDIV(x) (((x) >> S_T6_STATIC_C_PLL_PREDIV) & M_T6_STATIC_C_PLL_PREDIV)
+
+#define S_STATIC_C_PLL_STARTUP    24
+#define M_STATIC_C_PLL_STARTUP    0x3U
+#define V_STATIC_C_PLL_STARTUP(x) ((x) << S_STATIC_C_PLL_STARTUP)
+#define G_STATIC_C_PLL_STARTUP(x) (((x) >> S_STATIC_C_PLL_STARTUP) & M_STATIC_C_PLL_STARTUP)
+
+#define S_STATIC_C_PLL_DCO_BYPASS    23
+#define V_STATIC_C_PLL_DCO_BYPASS(x) ((x) << S_STATIC_C_PLL_DCO_BYPASS)
+#define F_STATIC_C_PLL_DCO_BYPASS    V_STATIC_C_PLL_DCO_BYPASS(1U)
+
+#define S_STATIC_C_PLL_SDORDER    21
+#define M_STATIC_C_PLL_SDORDER    0x3U
+#define V_STATIC_C_PLL_SDORDER(x) ((x) << S_STATIC_C_PLL_SDORDER)
+#define G_STATIC_C_PLL_SDORDER(x) (((x) >> S_STATIC_C_PLL_SDORDER) & M_STATIC_C_PLL_SDORDER)
+
+#define S_STATIC_C_PLL_DIVCHANGE    20
+#define V_STATIC_C_PLL_DIVCHANGE(x) ((x) << S_STATIC_C_PLL_DIVCHANGE)
+#define F_STATIC_C_PLL_DIVCHANGE    V_STATIC_C_PLL_DIVCHANGE(1U)
+
+#define S_STATIC_C_PLL_STOPCLKB    19
+#define V_STATIC_C_PLL_STOPCLKB(x) ((x) << S_STATIC_C_PLL_STOPCLKB)
+#define F_STATIC_C_PLL_STOPCLKB    V_STATIC_C_PLL_STOPCLKB(1U)
+
+#define S_STATIC_C_PLL_STOPCLKA    18
+#define V_STATIC_C_PLL_STOPCLKA(x) ((x) << S_STATIC_C_PLL_STOPCLKA)
+#define F_STATIC_C_PLL_STOPCLKA    V_STATIC_C_PLL_STOPCLKA(1U)
+
+#define S_T6_STATIC_C_PLL_SLEEP    17
+#define V_T6_STATIC_C_PLL_SLEEP(x) ((x) << S_T6_STATIC_C_PLL_SLEEP)
+#define F_T6_STATIC_C_PLL_SLEEP    V_T6_STATIC_C_PLL_SLEEP(1U)
+
+#define S_T6_STATIC_C_PLL_BYPASS    16
+#define V_T6_STATIC_C_PLL_BYPASS(x) ((x) << S_T6_STATIC_C_PLL_BYPASS)
+#define F_T6_STATIC_C_PLL_BYPASS    V_T6_STATIC_C_PLL_BYPASS(1U)
+
+#define S_STATIC_C_PLL_LOCKTUNE    0
+#define M_STATIC_C_PLL_LOCKTUNE    0x1fU
+#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE)
+#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE)
+
+#define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8
+
+#define S_T5_STATIC_C_PLL_MULTPRE    30
+#define M_T5_STATIC_C_PLL_MULTPRE    0x3U
+#define V_T5_STATIC_C_PLL_MULTPRE(x) ((x) << S_T5_STATIC_C_PLL_MULTPRE)
+#define G_T5_STATIC_C_PLL_MULTPRE(x) (((x) >> S_T5_STATIC_C_PLL_MULTPRE) & M_T5_STATIC_C_PLL_MULTPRE)
+
+#define S_T5_STATIC_C_PLL_LOCKSEL    28
+#define M_T5_STATIC_C_PLL_LOCKSEL    0x3U
+#define V_T5_STATIC_C_PLL_LOCKSEL(x) ((x) << S_T5_STATIC_C_PLL_LOCKSEL)
+#define G_T5_STATIC_C_PLL_LOCKSEL(x) (((x) >> S_T5_STATIC_C_PLL_LOCKSEL) & M_T5_STATIC_C_PLL_LOCKSEL)
+
+#define S_T5_STATIC_C_PLL_FFTUNE    12
+#define M_T5_STATIC_C_PLL_FFTUNE    0xffffU
+#define V_T5_STATIC_C_PLL_FFTUNE(x) ((x) << S_T5_STATIC_C_PLL_FFTUNE)
+#define G_T5_STATIC_C_PLL_FFTUNE(x) (((x) >> S_T5_STATIC_C_PLL_FFTUNE) & M_T5_STATIC_C_PLL_FFTUNE)
+
+#define S_T5_STATIC_C_PLL_RANGEPRE    10
+#define M_T5_STATIC_C_PLL_RANGEPRE    0x3U
+#define V_T5_STATIC_C_PLL_RANGEPRE(x) ((x) << S_T5_STATIC_C_PLL_RANGEPRE)
+#define G_T5_STATIC_C_PLL_RANGEPRE(x) (((x) >> S_T5_STATIC_C_PLL_RANGEPRE) & M_T5_STATIC_C_PLL_RANGEPRE)
+
+#define S_T5_STATIC_C_PLL_RANGEB    5
+#define M_T5_STATIC_C_PLL_RANGEB    0x1fU
+#define V_T5_STATIC_C_PLL_RANGEB(x) ((x) << S_T5_STATIC_C_PLL_RANGEB)
+#define G_T5_STATIC_C_PLL_RANGEB(x) (((x) >> S_T5_STATIC_C_PLL_RANGEB) & M_T5_STATIC_C_PLL_RANGEB)
+
+#define S_T5_STATIC_C_PLL_RANGEA    0
+#define M_T5_STATIC_C_PLL_RANGEA    0x1fU
+#define V_T5_STATIC_C_PLL_RANGEA(x) ((x) << S_T5_STATIC_C_PLL_RANGEA)
+#define G_T5_STATIC_C_PLL_RANGEA(x) (((x) >> S_T5_STATIC_C_PLL_RANGEA) & M_T5_STATIC_C_PLL_RANGEA)
+
+#define A_DBG_STATIC_C_PLL_CONF3 0x60d8
+
+#define S_STATIC_C_PLL_MULTPRE    30
+#define M_STATIC_C_PLL_MULTPRE    0x3U
+#define V_STATIC_C_PLL_MULTPRE(x) ((x) << S_STATIC_C_PLL_MULTPRE)
+#define G_STATIC_C_PLL_MULTPRE(x) (((x) >> S_STATIC_C_PLL_MULTPRE) & M_STATIC_C_PLL_MULTPRE)
+
+#define S_STATIC_C_PLL_LOCKSEL    28
+#define V_STATIC_C_PLL_LOCKSEL(x) ((x) << S_STATIC_C_PLL_LOCKSEL)
+#define F_STATIC_C_PLL_LOCKSEL    V_STATIC_C_PLL_LOCKSEL(1U)
+
+#define S_STATIC_C_PLL_FFTUNE    12
+#define M_STATIC_C_PLL_FFTUNE    0xffffU
+#define V_STATIC_C_PLL_FFTUNE(x) ((x) << S_STATIC_C_PLL_FFTUNE)
+#define G_STATIC_C_PLL_FFTUNE(x) (((x) >> S_STATIC_C_PLL_FFTUNE) & M_STATIC_C_PLL_FFTUNE)
+
+#define S_STATIC_C_PLL_RANGEPRE    10
+#define M_STATIC_C_PLL_RANGEPRE    0x3U
+#define V_STATIC_C_PLL_RANGEPRE(x) ((x) << S_STATIC_C_PLL_RANGEPRE)
+#define G_STATIC_C_PLL_RANGEPRE(x) (((x) >> S_STATIC_C_PLL_RANGEPRE) & M_STATIC_C_PLL_RANGEPRE)
+
+#define S_T6_STATIC_C_PLL_RANGEB    5
+#define M_T6_STATIC_C_PLL_RANGEB    0x1fU
+#define V_T6_STATIC_C_PLL_RANGEB(x) ((x) << S_T6_STATIC_C_PLL_RANGEB)
+#define G_T6_STATIC_C_PLL_RANGEB(x) (((x) >> S_T6_STATIC_C_PLL_RANGEB) & M_T6_STATIC_C_PLL_RANGEB)
+
+#define S_T6_STATIC_C_PLL_RANGEA    0
+#define M_T6_STATIC_C_PLL_RANGEA    0x1fU
+#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA)
+#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA)
+
+#define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0
+
+#define S_T5_STATIC_C_PLL_VCVTUNE    22
+#define M_T5_STATIC_C_PLL_VCVTUNE    0x7U
+#define V_T5_STATIC_C_PLL_VCVTUNE(x) ((x) << S_T5_STATIC_C_PLL_VCVTUNE)
+#define G_T5_STATIC_C_PLL_VCVTUNE(x) (((x) >> S_T5_STATIC_C_PLL_VCVTUNE) & M_T5_STATIC_C_PLL_VCVTUNE)
+
+#define S_T5_STATIC_C_PLL_LFTUNE_32_40    13
+#define M_T5_STATIC_C_PLL_LFTUNE_32_40    0x1ffU
+#define V_T5_STATIC_C_PLL_LFTUNE_32_40(x) ((x) << S_T5_STATIC_C_PLL_LFTUNE_32_40)
+#define G_T5_STATIC_C_PLL_LFTUNE_32_40(x) (((x) >> S_T5_STATIC_C_PLL_LFTUNE_32_40) & M_T5_STATIC_C_PLL_LFTUNE_32_40)
+
+#define S_T5_STATIC_C_PLL_PREDIV    8
+#define M_T5_STATIC_C_PLL_PREDIV    0x1fU
+#define V_T5_STATIC_C_PLL_PREDIV(x) ((x) << S_T5_STATIC_C_PLL_PREDIV)
+#define G_T5_STATIC_C_PLL_PREDIV(x) (((x) >> S_T5_STATIC_C_PLL_PREDIV) & M_T5_STATIC_C_PLL_PREDIV)
+
+#define S_T5_STATIC_C_PLL_MULT    0
+#define M_T5_STATIC_C_PLL_MULT    0xffU
+#define V_T5_STATIC_C_PLL_MULT(x) ((x) << S_T5_STATIC_C_PLL_MULT)
+#define G_T5_STATIC_C_PLL_MULT(x) (((x) >> S_T5_STATIC_C_PLL_MULT) & M_T5_STATIC_C_PLL_MULT)
+
+#define A_DBG_STATIC_C_PLL_CONF5 0x60e0
+
+#define S_STATIC_C_PLL_FFBYPASS    27
+#define V_STATIC_C_PLL_FFBYPASS(x) ((x) << S_STATIC_C_PLL_FFBYPASS)
+#define F_STATIC_C_PLL_FFBYPASS    V_STATIC_C_PLL_FFBYPASS(1U)
+
+#define S_STATIC_C_PLL_FASTSTOP    26
+#define V_STATIC_C_PLL_FASTSTOP(x) ((x) << S_STATIC_C_PLL_FASTSTOP)
+#define F_STATIC_C_PLL_FASTSTOP    V_STATIC_C_PLL_FASTSTOP(1U)
+
+#define S_STATIC_C_PLL_FRAMESTOP    25
+#define V_STATIC_C_PLL_FRAMESTOP(x) ((x) << S_STATIC_C_PLL_FRAMESTOP)
+#define F_STATIC_C_PLL_FRAMESTOP    V_STATIC_C_PLL_FRAMESTOP(1U)
+
+#define S_STATIC_C_PLL_VCVTUNE    22
+#define M_STATIC_C_PLL_VCVTUNE    0x7U
+#define V_STATIC_C_PLL_VCVTUNE(x) ((x) << S_STATIC_C_PLL_VCVTUNE)
+#define G_STATIC_C_PLL_VCVTUNE(x) (((x) >> S_STATIC_C_PLL_VCVTUNE) & M_STATIC_C_PLL_VCVTUNE)
+
+#define S_STATIC_C_PLL_LFTUNE_32_40    13
+#define M_STATIC_C_PLL_LFTUNE_32_40    0x1ffU
+#define V_STATIC_C_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_C_PLL_LFTUNE_32_40)
+#define G_STATIC_C_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_C_PLL_LFTUNE_32_40) & M_STATIC_C_PLL_LFTUNE_32_40)
+
+#define S_STATIC_C_PLL_PREDIV_CNF5    8
+#define M_STATIC_C_PLL_PREDIV_CNF5    0x1fU
+#define V_STATIC_C_PLL_PREDIV_CNF5(x) ((x) << S_STATIC_C_PLL_PREDIV_CNF5)
+#define G_STATIC_C_PLL_PREDIV_CNF5(x) (((x) >> S_STATIC_C_PLL_PREDIV_CNF5) & M_STATIC_C_PLL_PREDIV_CNF5)
+
+#define S_T6_STATIC_C_PLL_MULT    0
+#define M_T6_STATIC_C_PLL_MULT    0xffU
+#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT)
+#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT)
+
+#define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4
+
+#define S_T5_STATIC_U_PLL_MULTFRAC    8
+#define M_T5_STATIC_U_PLL_MULTFRAC    0xffffffU
+#define V_T5_STATIC_U_PLL_MULTFRAC(x) ((x) << S_T5_STATIC_U_PLL_MULTFRAC)
+#define G_T5_STATIC_U_PLL_MULTFRAC(x) (((x) >> S_T5_STATIC_U_PLL_MULTFRAC) & M_T5_STATIC_U_PLL_MULTFRAC)
+
+#define S_T5_STATIC_U_PLL_FFSLEWRATE    0
+#define M_T5_STATIC_U_PLL_FFSLEWRATE    0xffU
+#define V_T5_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_U_PLL_FFSLEWRATE)
+#define G_T5_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_U_PLL_FFSLEWRATE) & M_T5_STATIC_U_PLL_FFSLEWRATE)
+
+#define A_DBG_STATIC_U_PLL_CONF1 0x60e4
+
+#define S_STATIC_U_PLL_MULTFRAC    8
+#define M_STATIC_U_PLL_MULTFRAC    0xffffffU
+#define V_STATIC_U_PLL_MULTFRAC(x) ((x) << S_STATIC_U_PLL_MULTFRAC)
+#define G_STATIC_U_PLL_MULTFRAC(x) (((x) >> S_STATIC_U_PLL_MULTFRAC) & M_STATIC_U_PLL_MULTFRAC)
+
+#define S_STATIC_U_PLL_FFSLEWRATE    0
+#define M_STATIC_U_PLL_FFSLEWRATE    0xffU
+#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE)
+#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE)
+
+#define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8
+
+#define S_T5_STATIC_U_PLL_DCO_BYPASS    23
+#define V_T5_STATIC_U_PLL_DCO_BYPASS(x) ((x) << S_T5_STATIC_U_PLL_DCO_BYPASS)
+#define F_T5_STATIC_U_PLL_DCO_BYPASS    V_T5_STATIC_U_PLL_DCO_BYPASS(1U)
+
+#define S_T5_STATIC_U_PLL_SDORDER    21
+#define M_T5_STATIC_U_PLL_SDORDER    0x3U
+#define V_T5_STATIC_U_PLL_SDORDER(x) ((x) << S_T5_STATIC_U_PLL_SDORDER)
+#define G_T5_STATIC_U_PLL_SDORDER(x) (((x) >> S_T5_STATIC_U_PLL_SDORDER) & M_T5_STATIC_U_PLL_SDORDER)
+
+#define S_T5_STATIC_U_PLL_FFENABLE    20
+#define V_T5_STATIC_U_PLL_FFENABLE(x) ((x) << S_T5_STATIC_U_PLL_FFENABLE)
+#define F_T5_STATIC_U_PLL_FFENABLE    V_T5_STATIC_U_PLL_FFENABLE(1U)
+
+#define S_T5_STATIC_U_PLL_STOPCLKB    19
+#define V_T5_STATIC_U_PLL_STOPCLKB(x) ((x) << S_T5_STATIC_U_PLL_STOPCLKB)
+#define F_T5_STATIC_U_PLL_STOPCLKB    V_T5_STATIC_U_PLL_STOPCLKB(1U)
+
+#define S_T5_STATIC_U_PLL_STOPCLKA    18
+#define V_T5_STATIC_U_PLL_STOPCLKA(x) ((x) << S_T5_STATIC_U_PLL_STOPCLKA)
+#define F_T5_STATIC_U_PLL_STOPCLKA    V_T5_STATIC_U_PLL_STOPCLKA(1U)
+
+#define S_T5_STATIC_U_PLL_SLEEP    17
+#define V_T5_STATIC_U_PLL_SLEEP(x) ((x) << S_T5_STATIC_U_PLL_SLEEP)
+#define F_T5_STATIC_U_PLL_SLEEP    V_T5_STATIC_U_PLL_SLEEP(1U)
+
+#define S_T5_STATIC_U_PLL_BYPASS    16
+#define V_T5_STATIC_U_PLL_BYPASS(x) ((x) << S_T5_STATIC_U_PLL_BYPASS)
+#define F_T5_STATIC_U_PLL_BYPASS    V_T5_STATIC_U_PLL_BYPASS(1U)
+
+#define S_T5_STATIC_U_PLL_LOCKTUNE    0
+#define M_T5_STATIC_U_PLL_LOCKTUNE    0xffffU
+#define V_T5_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_U_PLL_LOCKTUNE)
+#define G_T5_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_U_PLL_LOCKTUNE) & M_T5_STATIC_U_PLL_LOCKTUNE)
+
+#define A_DBG_STATIC_U_PLL_CONF2 0x60e8
+
+#define S_T6_STATIC_U_PLL_PREDIV    26
+#define M_T6_STATIC_U_PLL_PREDIV    0x3fU
+#define V_T6_STATIC_U_PLL_PREDIV(x) ((x) << S_T6_STATIC_U_PLL_PREDIV)
+#define G_T6_STATIC_U_PLL_PREDIV(x) (((x) >> S_T6_STATIC_U_PLL_PREDIV) & M_T6_STATIC_U_PLL_PREDIV)
+
+#define S_STATIC_U_PLL_STARTUP    24
+#define M_STATIC_U_PLL_STARTUP    0x3U
+#define V_STATIC_U_PLL_STARTUP(x) ((x) << S_STATIC_U_PLL_STARTUP)
+#define G_STATIC_U_PLL_STARTUP(x) (((x) >> S_STATIC_U_PLL_STARTUP) & M_STATIC_U_PLL_STARTUP)
+
+#define S_STATIC_U_PLL_DCO_BYPASS    23
+#define V_STATIC_U_PLL_DCO_BYPASS(x) ((x) << S_STATIC_U_PLL_DCO_BYPASS)
+#define F_STATIC_U_PLL_DCO_BYPASS    V_STATIC_U_PLL_DCO_BYPASS(1U)
+
+#define S_STATIC_U_PLL_SDORDER    21
+#define M_STATIC_U_PLL_SDORDER    0x3U
+#define V_STATIC_U_PLL_SDORDER(x) ((x) << S_STATIC_U_PLL_SDORDER)
+#define G_STATIC_U_PLL_SDORDER(x) (((x) >> S_STATIC_U_PLL_SDORDER) & M_STATIC_U_PLL_SDORDER)
+
+#define S_STATIC_U_PLL_DIVCHANGE    20
+#define V_STATIC_U_PLL_DIVCHANGE(x) ((x) << S_STATIC_U_PLL_DIVCHANGE)
+#define F_STATIC_U_PLL_DIVCHANGE    V_STATIC_U_PLL_DIVCHANGE(1U)
+
+#define S_STATIC_U_PLL_STOPCLKB    19
+#define V_STATIC_U_PLL_STOPCLKB(x) ((x) << S_STATIC_U_PLL_STOPCLKB)
+#define F_STATIC_U_PLL_STOPCLKB    V_STATIC_U_PLL_STOPCLKB(1U)
+
+#define S_STATIC_U_PLL_STOPCLKA    18
+#define V_STATIC_U_PLL_STOPCLKA(x) ((x) << S_STATIC_U_PLL_STOPCLKA)
+#define F_STATIC_U_PLL_STOPCLKA    V_STATIC_U_PLL_STOPCLKA(1U)
+
+#define S_T6_STATIC_U_PLL_SLEEP    17
+#define V_T6_STATIC_U_PLL_SLEEP(x) ((x) << S_T6_STATIC_U_PLL_SLEEP)
+#define F_T6_STATIC_U_PLL_SLEEP    V_T6_STATIC_U_PLL_SLEEP(1U)
+
+#define S_T6_STATIC_U_PLL_BYPASS    16
+#define V_T6_STATIC_U_PLL_BYPASS(x) ((x) << S_T6_STATIC_U_PLL_BYPASS)
+#define F_T6_STATIC_U_PLL_BYPASS    V_T6_STATIC_U_PLL_BYPASS(1U)
+
+#define S_STATIC_U_PLL_LOCKTUNE    0
+#define M_STATIC_U_PLL_LOCKTUNE    0x1fU
+#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE)
+#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE)
+
+#define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec
+
+#define S_T5_STATIC_U_PLL_MULTPRE    30
+#define M_T5_STATIC_U_PLL_MULTPRE    0x3U
+#define V_T5_STATIC_U_PLL_MULTPRE(x) ((x) << S_T5_STATIC_U_PLL_MULTPRE)
+#define G_T5_STATIC_U_PLL_MULTPRE(x) (((x) >> S_T5_STATIC_U_PLL_MULTPRE) & M_T5_STATIC_U_PLL_MULTPRE)
+
+#define S_T5_STATIC_U_PLL_LOCKSEL    28
+#define M_T5_STATIC_U_PLL_LOCKSEL    0x3U
+#define V_T5_STATIC_U_PLL_LOCKSEL(x) ((x) << S_T5_STATIC_U_PLL_LOCKSEL)
+#define G_T5_STATIC_U_PLL_LOCKSEL(x) (((x) >> S_T5_STATIC_U_PLL_LOCKSEL) & M_T5_STATIC_U_PLL_LOCKSEL)
+
+#define S_T5_STATIC_U_PLL_FFTUNE    12
+#define M_T5_STATIC_U_PLL_FFTUNE    0xffffU
+#define V_T5_STATIC_U_PLL_FFTUNE(x) ((x) << S_T5_STATIC_U_PLL_FFTUNE)
+#define G_T5_STATIC_U_PLL_FFTUNE(x) (((x) >> S_T5_STATIC_U_PLL_FFTUNE) & M_T5_STATIC_U_PLL_FFTUNE)
+
+#define S_T5_STATIC_U_PLL_RANGEPRE    10
+#define M_T5_STATIC_U_PLL_RANGEPRE    0x3U
+#define V_T5_STATIC_U_PLL_RANGEPRE(x) ((x) << S_T5_STATIC_U_PLL_RANGEPRE)
+#define G_T5_STATIC_U_PLL_RANGEPRE(x) (((x) >> S_T5_STATIC_U_PLL_RANGEPRE) & M_T5_STATIC_U_PLL_RANGEPRE)
+
+#define S_T5_STATIC_U_PLL_RANGEB    5
+#define M_T5_STATIC_U_PLL_RANGEB    0x1fU
+#define V_T5_STATIC_U_PLL_RANGEB(x) ((x) << S_T5_STATIC_U_PLL_RANGEB)
+#define G_T5_STATIC_U_PLL_RANGEB(x) (((x) >> S_T5_STATIC_U_PLL_RANGEB) & M_T5_STATIC_U_PLL_RANGEB)
+
+#define S_T5_STATIC_U_PLL_RANGEA    0
+#define M_T5_STATIC_U_PLL_RANGEA    0x1fU
+#define V_T5_STATIC_U_PLL_RANGEA(x) ((x) << S_T5_STATIC_U_PLL_RANGEA)
+#define G_T5_STATIC_U_PLL_RANGEA(x) (((x) >> S_T5_STATIC_U_PLL_RANGEA) & M_T5_STATIC_U_PLL_RANGEA)
+
+#define A_DBG_STATIC_U_PLL_CONF3 0x60ec
+
+#define S_STATIC_U_PLL_MULTPRE    30
+#define M_STATIC_U_PLL_MULTPRE    0x3U
+#define V_STATIC_U_PLL_MULTPRE(x) ((x) << S_STATIC_U_PLL_MULTPRE)
+#define G_STATIC_U_PLL_MULTPRE(x) (((x) >> S_STATIC_U_PLL_MULTPRE) & M_STATIC_U_PLL_MULTPRE)
+
+#define S_STATIC_U_PLL_LOCKSEL    28
+#define V_STATIC_U_PLL_LOCKSEL(x) ((x) << S_STATIC_U_PLL_LOCKSEL)
+#define F_STATIC_U_PLL_LOCKSEL    V_STATIC_U_PLL_LOCKSEL(1U)
+
+#define S_STATIC_U_PLL_FFTUNE    12
+#define M_STATIC_U_PLL_FFTUNE    0xffffU
+#define V_STATIC_U_PLL_FFTUNE(x) ((x) << S_STATIC_U_PLL_FFTUNE)
+#define G_STATIC_U_PLL_FFTUNE(x) (((x) >> S_STATIC_U_PLL_FFTUNE) & M_STATIC_U_PLL_FFTUNE)
+
+#define S_STATIC_U_PLL_RANGEPRE    10
+#define M_STATIC_U_PLL_RANGEPRE    0x3U
+#define V_STATIC_U_PLL_RANGEPRE(x) ((x) << S_STATIC_U_PLL_RANGEPRE)
+#define G_STATIC_U_PLL_RANGEPRE(x) (((x) >> S_STATIC_U_PLL_RANGEPRE) & M_STATIC_U_PLL_RANGEPRE)
+
+#define S_T6_STATIC_U_PLL_RANGEB    5
+#define M_T6_STATIC_U_PLL_RANGEB    0x1fU
+#define V_T6_STATIC_U_PLL_RANGEB(x) ((x) << S_T6_STATIC_U_PLL_RANGEB)
+#define G_T6_STATIC_U_PLL_RANGEB(x) (((x) >> S_T6_STATIC_U_PLL_RANGEB) & M_T6_STATIC_U_PLL_RANGEB)
+
+#define S_T6_STATIC_U_PLL_RANGEA    0
+#define M_T6_STATIC_U_PLL_RANGEA    0x1fU
+#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA)
+#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA)
+
+#define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0
+#define A_DBG_STATIC_U_PLL_CONF4 0x60f0
+#define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4
+
+#define S_T5_STATIC_U_PLL_VCVTUNE    22
+#define M_T5_STATIC_U_PLL_VCVTUNE    0x7U
+#define V_T5_STATIC_U_PLL_VCVTUNE(x) ((x) << S_T5_STATIC_U_PLL_VCVTUNE)
+#define G_T5_STATIC_U_PLL_VCVTUNE(x) (((x) >> S_T5_STATIC_U_PLL_VCVTUNE) & M_T5_STATIC_U_PLL_VCVTUNE)
+
+#define S_T5_STATIC_U_PLL_LFTUNE_32_40    13
+#define M_T5_STATIC_U_PLL_LFTUNE_32_40    0x1ffU
+#define V_T5_STATIC_U_PLL_LFTUNE_32_40(x) ((x) << S_T5_STATIC_U_PLL_LFTUNE_32_40)
+#define G_T5_STATIC_U_PLL_LFTUNE_32_40(x) (((x) >> S_T5_STATIC_U_PLL_LFTUNE_32_40) & M_T5_STATIC_U_PLL_LFTUNE_32_40)
+
+#define S_T5_STATIC_U_PLL_PREDIV    8
+#define M_T5_STATIC_U_PLL_PREDIV    0x1fU
+#define V_T5_STATIC_U_PLL_PREDIV(x) ((x) << S_T5_STATIC_U_PLL_PREDIV)
+#define G_T5_STATIC_U_PLL_PREDIV(x) (((x) >> S_T5_STATIC_U_PLL_PREDIV) & M_T5_STATIC_U_PLL_PREDIV)
+
+#define S_T5_STATIC_U_PLL_MULT    0
+#define M_T5_STATIC_U_PLL_MULT    0xffU
+#define V_T5_STATIC_U_PLL_MULT(x) ((x) << S_T5_STATIC_U_PLL_MULT)
+#define G_T5_STATIC_U_PLL_MULT(x) (((x) >> S_T5_STATIC_U_PLL_MULT) & M_T5_STATIC_U_PLL_MULT)
+
+#define A_DBG_STATIC_U_PLL_CONF5 0x60f4
+
+#define S_STATIC_U_PLL_FFBYPASS    27
+#define V_STATIC_U_PLL_FFBYPASS(x) ((x) << S_STATIC_U_PLL_FFBYPASS)
+#define F_STATIC_U_PLL_FFBYPASS    V_STATIC_U_PLL_FFBYPASS(1U)
+
+#define S_STATIC_U_PLL_FASTSTOP    26
+#define V_STATIC_U_PLL_FASTSTOP(x) ((x) << S_STATIC_U_PLL_FASTSTOP)
+#define F_STATIC_U_PLL_FASTSTOP    V_STATIC_U_PLL_FASTSTOP(1U)
+
+#define S_STATIC_U_PLL_FRAMESTOP    25
+#define V_STATIC_U_PLL_FRAMESTOP(x) ((x) << S_STATIC_U_PLL_FRAMESTOP)
+#define F_STATIC_U_PLL_FRAMESTOP    V_STATIC_U_PLL_FRAMESTOP(1U)
+
+#define S_STATIC_U_PLL_VCVTUNE    22
+#define M_STATIC_U_PLL_VCVTUNE    0x7U
+#define V_STATIC_U_PLL_VCVTUNE(x) ((x) << S_STATIC_U_PLL_VCVTUNE)
+#define G_STATIC_U_PLL_VCVTUNE(x) (((x) >> S_STATIC_U_PLL_VCVTUNE) & M_STATIC_U_PLL_VCVTUNE)
+
+#define S_STATIC_U_PLL_LFTUNE_32_40    13
+#define M_STATIC_U_PLL_LFTUNE_32_40    0x1ffU
+#define V_STATIC_U_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_U_PLL_LFTUNE_32_40)
+#define G_STATIC_U_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_U_PLL_LFTUNE_32_40) & M_STATIC_U_PLL_LFTUNE_32_40)
+
+#define S_STATIC_U_PLL_PREDIV_CNF5    8
+#define M_STATIC_U_PLL_PREDIV_CNF5    0x1fU
+#define V_STATIC_U_PLL_PREDIV_CNF5(x) ((x) << S_STATIC_U_PLL_PREDIV_CNF5)
+#define G_STATIC_U_PLL_PREDIV_CNF5(x) (((x) >> S_STATIC_U_PLL_PREDIV_CNF5) & M_STATIC_U_PLL_PREDIV_CNF5)
+
+#define S_T6_STATIC_U_PLL_MULT    0
+#define M_T6_STATIC_U_PLL_MULT    0xffU
+#define V_T6_STATIC_U_PLL_MULT(x) ((x) << S_T6_STATIC_U_PLL_MULT)
+#define G_T6_STATIC_U_PLL_MULT(x) (((x) >> S_T6_STATIC_U_PLL_MULT) & M_T6_STATIC_U_PLL_MULT)
+
+#define A_DBG_T5_STATIC_KR_PLL_CONF1 0x60f8
+
+#define S_T5_STATIC_KR_PLL_BYPASS    30
+#define V_T5_STATIC_KR_PLL_BYPASS(x) ((x) << S_T5_STATIC_KR_PLL_BYPASS)
+#define F_T5_STATIC_KR_PLL_BYPASS    V_T5_STATIC_KR_PLL_BYPASS(1U)
+
+#define S_T5_STATIC_KR_PLL_VBOOSTDIV    27
+#define M_T5_STATIC_KR_PLL_VBOOSTDIV    0x7U
+#define V_T5_STATIC_KR_PLL_VBOOSTDIV(x) ((x) << S_T5_STATIC_KR_PLL_VBOOSTDIV)
+#define G_T5_STATIC_KR_PLL_VBOOSTDIV(x) (((x) >> S_T5_STATIC_KR_PLL_VBOOSTDIV) & M_T5_STATIC_KR_PLL_VBOOSTDIV)
+
+#define S_T5_STATIC_KR_PLL_CPISEL    24
+#define M_T5_STATIC_KR_PLL_CPISEL    0x7U
+#define V_T5_STATIC_KR_PLL_CPISEL(x) ((x) << S_T5_STATIC_KR_PLL_CPISEL)
+#define G_T5_STATIC_KR_PLL_CPISEL(x) (((x) >> S_T5_STATIC_KR_PLL_CPISEL) & M_T5_STATIC_KR_PLL_CPISEL)
+
+#define S_T5_STATIC_KR_PLL_CCALMETHOD    23
+#define V_T5_STATIC_KR_PLL_CCALMETHOD(x) ((x) << S_T5_STATIC_KR_PLL_CCALMETHOD)
+#define F_T5_STATIC_KR_PLL_CCALMETHOD    V_T5_STATIC_KR_PLL_CCALMETHOD(1U)
+
+#define S_T5_STATIC_KR_PLL_CCALLOAD    22
+#define V_T5_STATIC_KR_PLL_CCALLOAD(x) ((x) << S_T5_STATIC_KR_PLL_CCALLOAD)
+#define F_T5_STATIC_KR_PLL_CCALLOAD    V_T5_STATIC_KR_PLL_CCALLOAD(1U)
+
+#define S_T5_STATIC_KR_PLL_CCALFMIN    21
+#define V_T5_STATIC_KR_PLL_CCALFMIN(x) ((x) << S_T5_STATIC_KR_PLL_CCALFMIN)
+#define F_T5_STATIC_KR_PLL_CCALFMIN    V_T5_STATIC_KR_PLL_CCALFMIN(1U)
+
+#define S_T5_STATIC_KR_PLL_CCALFMAX    20
+#define V_T5_STATIC_KR_PLL_CCALFMAX(x) ((x) << S_T5_STATIC_KR_PLL_CCALFMAX)
+#define F_T5_STATIC_KR_PLL_CCALFMAX    V_T5_STATIC_KR_PLL_CCALFMAX(1U)
+
+#define S_T5_STATIC_KR_PLL_CCALCVHOLD    19
+#define V_T5_STATIC_KR_PLL_CCALCVHOLD(x) ((x) << S_T5_STATIC_KR_PLL_CCALCVHOLD)
+#define F_T5_STATIC_KR_PLL_CCALCVHOLD    V_T5_STATIC_KR_PLL_CCALCVHOLD(1U)
+
+#define S_T5_STATIC_KR_PLL_CCALBANDSEL    15
+#define M_T5_STATIC_KR_PLL_CCALBANDSEL    0xfU
+#define V_T5_STATIC_KR_PLL_CCALBANDSEL(x) ((x) << S_T5_STATIC_KR_PLL_CCALBANDSEL)
+#define G_T5_STATIC_KR_PLL_CCALBANDSEL(x) (((x) >> S_T5_STATIC_KR_PLL_CCALBANDSEL) & M_T5_STATIC_KR_PLL_CCALBANDSEL)
+
+#define S_T5_STATIC_KR_PLL_BGOFFSET    11
+#define M_T5_STATIC_KR_PLL_BGOFFSET    0xfU
+#define V_T5_STATIC_KR_PLL_BGOFFSET(x) ((x) << S_T5_STATIC_KR_PLL_BGOFFSET)
+#define G_T5_STATIC_KR_PLL_BGOFFSET(x) (((x) >> S_T5_STATIC_KR_PLL_BGOFFSET) & M_T5_STATIC_KR_PLL_BGOFFSET)
+
+#define S_T5_STATIC_KR_PLL_P    8
+#define M_T5_STATIC_KR_PLL_P    0x7U
+#define V_T5_STATIC_KR_PLL_P(x) ((x) << S_T5_STATIC_KR_PLL_P)
+#define G_T5_STATIC_KR_PLL_P(x) (((x) >> S_T5_STATIC_KR_PLL_P) & M_T5_STATIC_KR_PLL_P)
+
+#define S_T5_STATIC_KR_PLL_N2    4
+#define M_T5_STATIC_KR_PLL_N2    0xfU
+#define V_T5_STATIC_KR_PLL_N2(x) ((x) << S_T5_STATIC_KR_PLL_N2)
+#define G_T5_STATIC_KR_PLL_N2(x) (((x) >> S_T5_STATIC_KR_PLL_N2) & M_T5_STATIC_KR_PLL_N2)
+
+#define S_T5_STATIC_KR_PLL_N1    0
+#define M_T5_STATIC_KR_PLL_N1    0xfU
+#define V_T5_STATIC_KR_PLL_N1(x) ((x) << S_T5_STATIC_KR_PLL_N1)
+#define G_T5_STATIC_KR_PLL_N1(x) (((x) >> S_T5_STATIC_KR_PLL_N1) & M_T5_STATIC_KR_PLL_N1)
+
+#define A_DBG_STATIC_KR_PLL_CONF1 0x60f8
+
+#define S_T6_STATIC_KR_PLL_BYPASS    30
+#define V_T6_STATIC_KR_PLL_BYPASS(x) ((x) << S_T6_STATIC_KR_PLL_BYPASS)
+#define F_T6_STATIC_KR_PLL_BYPASS    V_T6_STATIC_KR_PLL_BYPASS(1U)
+
+#define S_STATIC_KR_PLL_VBOOSTDIV    27
+#define M_STATIC_KR_PLL_VBOOSTDIV    0x7U
+#define V_STATIC_KR_PLL_VBOOSTDIV(x) ((x) << S_STATIC_KR_PLL_VBOOSTDIV)
+#define G_STATIC_KR_PLL_VBOOSTDIV(x) (((x) >> S_STATIC_KR_PLL_VBOOSTDIV) & M_STATIC_KR_PLL_VBOOSTDIV)
+
+#define S_STATIC_KR_PLL_CPISEL    24
+#define M_STATIC_KR_PLL_CPISEL    0x7U
+#define V_STATIC_KR_PLL_CPISEL(x) ((x) << S_STATIC_KR_PLL_CPISEL)
+#define G_STATIC_KR_PLL_CPISEL(x) (((x) >> S_STATIC_KR_PLL_CPISEL) & M_STATIC_KR_PLL_CPISEL)
+
+#define S_STATIC_KR_PLL_CCALMETHOD    23
+#define V_STATIC_KR_PLL_CCALMETHOD(x) ((x) << S_STATIC_KR_PLL_CCALMETHOD)
+#define F_STATIC_KR_PLL_CCALMETHOD    V_STATIC_KR_PLL_CCALMETHOD(1U)
+
+#define S_STATIC_KR_PLL_CCALLOAD    22
+#define V_STATIC_KR_PLL_CCALLOAD(x) ((x) << S_STATIC_KR_PLL_CCALLOAD)
+#define F_STATIC_KR_PLL_CCALLOAD    V_STATIC_KR_PLL_CCALLOAD(1U)
+
+#define S_STATIC_KR_PLL_CCALFMIN    21
+#define V_STATIC_KR_PLL_CCALFMIN(x) ((x) << S_STATIC_KR_PLL_CCALFMIN)
+#define F_STATIC_KR_PLL_CCALFMIN    V_STATIC_KR_PLL_CCALFMIN(1U)
+
+#define S_STATIC_KR_PLL_CCALFMAX    20
+#define V_STATIC_KR_PLL_CCALFMAX(x) ((x) << S_STATIC_KR_PLL_CCALFMAX)
+#define F_STATIC_KR_PLL_CCALFMAX    V_STATIC_KR_PLL_CCALFMAX(1U)
+
+#define S_STATIC_KR_PLL_CCALCVHOLD    19
+#define V_STATIC_KR_PLL_CCALCVHOLD(x) ((x) << S_STATIC_KR_PLL_CCALCVHOLD)
+#define F_STATIC_KR_PLL_CCALCVHOLD    V_STATIC_KR_PLL_CCALCVHOLD(1U)
+
+#define S_STATIC_KR_PLL_CCALBANDSEL    15
+#define M_STATIC_KR_PLL_CCALBANDSEL    0xfU
+#define V_STATIC_KR_PLL_CCALBANDSEL(x) ((x) << S_STATIC_KR_PLL_CCALBANDSEL)
+#define G_STATIC_KR_PLL_CCALBANDSEL(x) (((x) >> S_STATIC_KR_PLL_CCALBANDSEL) & M_STATIC_KR_PLL_CCALBANDSEL)
+
+#define S_STATIC_KR_PLL_BGOFFSET    11
+#define M_STATIC_KR_PLL_BGOFFSET    0xfU
+#define V_STATIC_KR_PLL_BGOFFSET(x) ((x) << S_STATIC_KR_PLL_BGOFFSET)
+#define G_STATIC_KR_PLL_BGOFFSET(x) (((x) >> S_STATIC_KR_PLL_BGOFFSET) & M_STATIC_KR_PLL_BGOFFSET)
+
+#define S_T6_STATIC_KR_PLL_P    8
+#define M_T6_STATIC_KR_PLL_P    0x7U
+#define V_T6_STATIC_KR_PLL_P(x) ((x) << S_T6_STATIC_KR_PLL_P)
+#define G_T6_STATIC_KR_PLL_P(x) (((x) >> S_T6_STATIC_KR_PLL_P) & M_T6_STATIC_KR_PLL_P)
+
+#define S_T6_STATIC_KR_PLL_N2    4
+#define M_T6_STATIC_KR_PLL_N2    0xfU
+#define V_T6_STATIC_KR_PLL_N2(x) ((x) << S_T6_STATIC_KR_PLL_N2)
+#define G_T6_STATIC_KR_PLL_N2(x) (((x) >> S_T6_STATIC_KR_PLL_N2) & M_T6_STATIC_KR_PLL_N2)
+
+#define S_T6_STATIC_KR_PLL_N1    0
+#define M_T6_STATIC_KR_PLL_N1    0xfU
+#define V_T6_STATIC_KR_PLL_N1(x) ((x) << S_T6_STATIC_KR_PLL_N1)
+#define G_T6_STATIC_KR_PLL_N1(x) (((x) >> S_T6_STATIC_KR_PLL_N1) & M_T6_STATIC_KR_PLL_N1)
+
+#define A_DBG_T5_STATIC_KR_PLL_CONF2 0x60fc
+
+#define S_T5_STATIC_KR_PLL_M    11
+#define M_T5_STATIC_KR_PLL_M    0x1ffU
+#define V_T5_STATIC_KR_PLL_M(x) ((x) << S_T5_STATIC_KR_PLL_M)
+#define G_T5_STATIC_KR_PLL_M(x) (((x) >> S_T5_STATIC_KR_PLL_M) & M_T5_STATIC_KR_PLL_M)
+
+#define S_T5_STATIC_KR_PLL_ANALOGTUNE    0
+#define M_T5_STATIC_KR_PLL_ANALOGTUNE    0x7ffU
+#define V_T5_STATIC_KR_PLL_ANALOGTUNE(x) ((x) << S_T5_STATIC_KR_PLL_ANALOGTUNE)
+#define G_T5_STATIC_KR_PLL_ANALOGTUNE(x) (((x) >> S_T5_STATIC_KR_PLL_ANALOGTUNE) & M_T5_STATIC_KR_PLL_ANALOGTUNE)
+
+#define A_DBG_STATIC_KR_PLL_CONF2 0x60fc
+
+#define S_T6_STATIC_KR_PLL_M    11
+#define M_T6_STATIC_KR_PLL_M    0x1ffU
+#define V_T6_STATIC_KR_PLL_M(x) ((x) << S_T6_STATIC_KR_PLL_M)
+#define G_T6_STATIC_KR_PLL_M(x) (((x) >> S_T6_STATIC_KR_PLL_M) & M_T6_STATIC_KR_PLL_M)
+
+#define S_STATIC_KR_PLL_ANALOGTUNE    0
+#define M_STATIC_KR_PLL_ANALOGTUNE    0x7ffU
+#define V_STATIC_KR_PLL_ANALOGTUNE(x) ((x) << S_STATIC_KR_PLL_ANALOGTUNE)
+#define G_STATIC_KR_PLL_ANALOGTUNE(x) (((x) >> S_STATIC_KR_PLL_ANALOGTUNE) & M_STATIC_KR_PLL_ANALOGTUNE)
+
 #define A_DBG_PVT_REG_CALIBRATE_CTL 0x6100
 
 #define S_HALT_CALIBRATE    1
@@ -4391,6 +13524,40 @@
 #define V_RESET_CALIBRATE(x) ((x) << S_RESET_CALIBRATE)
 #define F_RESET_CALIBRATE    V_RESET_CALIBRATE(1U)
 
+#define A_DBG_GPIO_EN_NEW 0x6100
+
+#define S_GPIO16_OEN    7
+#define V_GPIO16_OEN(x) ((x) << S_GPIO16_OEN)
+#define F_GPIO16_OEN    V_GPIO16_OEN(1U)
+
+#define S_GPIO17_OEN    6
+#define V_GPIO17_OEN(x) ((x) << S_GPIO17_OEN)
+#define F_GPIO17_OEN    V_GPIO17_OEN(1U)
+
+#define S_GPIO18_OEN    5
+#define V_GPIO18_OEN(x) ((x) << S_GPIO18_OEN)
+#define F_GPIO18_OEN    V_GPIO18_OEN(1U)
+
+#define S_GPIO19_OEN    4
+#define V_GPIO19_OEN(x) ((x) << S_GPIO19_OEN)
+#define F_GPIO19_OEN    V_GPIO19_OEN(1U)
+
+#define S_GPIO16_OUT_VAL    3
+#define V_GPIO16_OUT_VAL(x) ((x) << S_GPIO16_OUT_VAL)
+#define F_GPIO16_OUT_VAL    V_GPIO16_OUT_VAL(1U)
+
+#define S_GPIO17_OUT_VAL    2
+#define V_GPIO17_OUT_VAL(x) ((x) << S_GPIO17_OUT_VAL)
+#define F_GPIO17_OUT_VAL    V_GPIO17_OUT_VAL(1U)
+
+#define S_GPIO18_OUT_VAL    1
+#define V_GPIO18_OUT_VAL(x) ((x) << S_GPIO18_OUT_VAL)
+#define F_GPIO18_OUT_VAL    V_GPIO18_OUT_VAL(1U)
+
+#define S_GPIO19_OUT_VAL    0
+#define V_GPIO19_OUT_VAL(x) ((x) << S_GPIO19_OUT_VAL)
+#define F_GPIO19_OUT_VAL    V_GPIO19_OUT_VAL(1U)
+
 #define A_DBG_PVT_REG_UPDATE_CTL 0x6104
 
 #define S_FAST_UPDATE    8
@@ -4405,6 +13572,40 @@
 #define V_HALT_UPDATE(x) ((x) << S_HALT_UPDATE)
 #define F_HALT_UPDATE    V_HALT_UPDATE(1U)
 
+#define A_DBG_GPIO_IN_NEW 0x6104
+
+#define S_GPIO16_CHG_DET    7
+#define V_GPIO16_CHG_DET(x) ((x) << S_GPIO16_CHG_DET)
+#define F_GPIO16_CHG_DET    V_GPIO16_CHG_DET(1U)
+
+#define S_GPIO17_CHG_DET    6
+#define V_GPIO17_CHG_DET(x) ((x) << S_GPIO17_CHG_DET)
+#define F_GPIO17_CHG_DET    V_GPIO17_CHG_DET(1U)
+
+#define S_GPIO18_CHG_DET    5
+#define V_GPIO18_CHG_DET(x) ((x) << S_GPIO18_CHG_DET)
+#define F_GPIO18_CHG_DET    V_GPIO18_CHG_DET(1U)
+
+#define S_GPIO19_CHG_DET    4
+#define V_GPIO19_CHG_DET(x) ((x) << S_GPIO19_CHG_DET)
+#define F_GPIO19_CHG_DET    V_GPIO19_CHG_DET(1U)
+
+#define S_GPIO19_IN    3
+#define V_GPIO19_IN(x) ((x) << S_GPIO19_IN)
+#define F_GPIO19_IN    V_GPIO19_IN(1U)
+
+#define S_GPIO18_IN    2
+#define V_GPIO18_IN(x) ((x) << S_GPIO18_IN)
+#define F_GPIO18_IN    V_GPIO18_IN(1U)
+
+#define S_GPIO17_IN    1
+#define V_GPIO17_IN(x) ((x) << S_GPIO17_IN)
+#define F_GPIO17_IN    V_GPIO17_IN(1U)
+
+#define S_GPIO16_IN    0
+#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN)
+#define F_GPIO16_IN    V_GPIO16_IN(1U)
+
 #define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
 
 #define S_LAST_MEASUREMENT_SELECT    8
@@ -4422,6 +13623,128 @@
 #define V_LAST_MEASUREMENT_RESULT_BANK_A(x) ((x) << S_LAST_MEASUREMENT_RESULT_BANK_A)
 #define G_LAST_MEASUREMENT_RESULT_BANK_A(x) (((x) >> S_LAST_MEASUREMENT_RESULT_BANK_A) & M_LAST_MEASUREMENT_RESULT_BANK_A)
 
+#define A_DBG_T5_STATIC_KX_PLL_CONF1 0x6108
+
+#define S_T5_STATIC_KX_PLL_BYPASS    30
+#define V_T5_STATIC_KX_PLL_BYPASS(x) ((x) << S_T5_STATIC_KX_PLL_BYPASS)
+#define F_T5_STATIC_KX_PLL_BYPASS    V_T5_STATIC_KX_PLL_BYPASS(1U)
+
+#define S_T5_STATIC_KX_PLL_VBOOSTDIV    27
+#define M_T5_STATIC_KX_PLL_VBOOSTDIV    0x7U
+#define V_T5_STATIC_KX_PLL_VBOOSTDIV(x) ((x) << S_T5_STATIC_KX_PLL_VBOOSTDIV)
+#define G_T5_STATIC_KX_PLL_VBOOSTDIV(x) (((x) >> S_T5_STATIC_KX_PLL_VBOOSTDIV) & M_T5_STATIC_KX_PLL_VBOOSTDIV)
+
+#define S_T5_STATIC_KX_PLL_CPISEL    24
+#define M_T5_STATIC_KX_PLL_CPISEL    0x7U
+#define V_T5_STATIC_KX_PLL_CPISEL(x) ((x) << S_T5_STATIC_KX_PLL_CPISEL)
+#define G_T5_STATIC_KX_PLL_CPISEL(x) (((x) >> S_T5_STATIC_KX_PLL_CPISEL) & M_T5_STATIC_KX_PLL_CPISEL)
+
+#define S_T5_STATIC_KX_PLL_CCALMETHOD    23
+#define V_T5_STATIC_KX_PLL_CCALMETHOD(x) ((x) << S_T5_STATIC_KX_PLL_CCALMETHOD)
+#define F_T5_STATIC_KX_PLL_CCALMETHOD    V_T5_STATIC_KX_PLL_CCALMETHOD(1U)
+
+#define S_T5_STATIC_KX_PLL_CCALLOAD    22
+#define V_T5_STATIC_KX_PLL_CCALLOAD(x) ((x) << S_T5_STATIC_KX_PLL_CCALLOAD)
+#define F_T5_STATIC_KX_PLL_CCALLOAD    V_T5_STATIC_KX_PLL_CCALLOAD(1U)
+
+#define S_T5_STATIC_KX_PLL_CCALFMIN    21
+#define V_T5_STATIC_KX_PLL_CCALFMIN(x) ((x) << S_T5_STATIC_KX_PLL_CCALFMIN)
+#define F_T5_STATIC_KX_PLL_CCALFMIN    V_T5_STATIC_KX_PLL_CCALFMIN(1U)
+
+#define S_T5_STATIC_KX_PLL_CCALFMAX    20
+#define V_T5_STATIC_KX_PLL_CCALFMAX(x) ((x) << S_T5_STATIC_KX_PLL_CCALFMAX)
+#define F_T5_STATIC_KX_PLL_CCALFMAX    V_T5_STATIC_KX_PLL_CCALFMAX(1U)
+
+#define S_T5_STATIC_KX_PLL_CCALCVHOLD    19
+#define V_T5_STATIC_KX_PLL_CCALCVHOLD(x) ((x) << S_T5_STATIC_KX_PLL_CCALCVHOLD)
+#define F_T5_STATIC_KX_PLL_CCALCVHOLD    V_T5_STATIC_KX_PLL_CCALCVHOLD(1U)
+
+#define S_T5_STATIC_KX_PLL_CCALBANDSEL    15
+#define M_T5_STATIC_KX_PLL_CCALBANDSEL    0xfU
+#define V_T5_STATIC_KX_PLL_CCALBANDSEL(x) ((x) << S_T5_STATIC_KX_PLL_CCALBANDSEL)
+#define G_T5_STATIC_KX_PLL_CCALBANDSEL(x) (((x) >> S_T5_STATIC_KX_PLL_CCALBANDSEL) & M_T5_STATIC_KX_PLL_CCALBANDSEL)
+
+#define S_T5_STATIC_KX_PLL_BGOFFSET    11
+#define M_T5_STATIC_KX_PLL_BGOFFSET    0xfU
+#define V_T5_STATIC_KX_PLL_BGOFFSET(x) ((x) << S_T5_STATIC_KX_PLL_BGOFFSET)
+#define G_T5_STATIC_KX_PLL_BGOFFSET(x) (((x) >> S_T5_STATIC_KX_PLL_BGOFFSET) & M_T5_STATIC_KX_PLL_BGOFFSET)
+
+#define S_T5_STATIC_KX_PLL_P    8
+#define M_T5_STATIC_KX_PLL_P    0x7U
+#define V_T5_STATIC_KX_PLL_P(x) ((x) << S_T5_STATIC_KX_PLL_P)
+#define G_T5_STATIC_KX_PLL_P(x) (((x) >> S_T5_STATIC_KX_PLL_P) & M_T5_STATIC_KX_PLL_P)
+
+#define S_T5_STATIC_KX_PLL_N2    4
+#define M_T5_STATIC_KX_PLL_N2    0xfU
+#define V_T5_STATIC_KX_PLL_N2(x) ((x) << S_T5_STATIC_KX_PLL_N2)
+#define G_T5_STATIC_KX_PLL_N2(x) (((x) >> S_T5_STATIC_KX_PLL_N2) & M_T5_STATIC_KX_PLL_N2)
+
+#define S_T5_STATIC_KX_PLL_N1    0
+#define M_T5_STATIC_KX_PLL_N1    0xfU
+#define V_T5_STATIC_KX_PLL_N1(x) ((x) << S_T5_STATIC_KX_PLL_N1)
+#define G_T5_STATIC_KX_PLL_N1(x) (((x) >> S_T5_STATIC_KX_PLL_N1) & M_T5_STATIC_KX_PLL_N1)
+
+#define A_DBG_STATIC_KX_PLL_CONF1 0x6108
+
+#define S_T6_STATIC_KX_PLL_BYPASS    30
+#define V_T6_STATIC_KX_PLL_BYPASS(x) ((x) << S_T6_STATIC_KX_PLL_BYPASS)
+#define F_T6_STATIC_KX_PLL_BYPASS    V_T6_STATIC_KX_PLL_BYPASS(1U)
+
+#define S_STATIC_KX_PLL_VBOOSTDIV    27
+#define M_STATIC_KX_PLL_VBOOSTDIV    0x7U
+#define V_STATIC_KX_PLL_VBOOSTDIV(x) ((x) << S_STATIC_KX_PLL_VBOOSTDIV)
+#define G_STATIC_KX_PLL_VBOOSTDIV(x) (((x) >> S_STATIC_KX_PLL_VBOOSTDIV) & M_STATIC_KX_PLL_VBOOSTDIV)
+
+#define S_STATIC_KX_PLL_CPISEL    24
+#define M_STATIC_KX_PLL_CPISEL    0x7U
+#define V_STATIC_KX_PLL_CPISEL(x) ((x) << S_STATIC_KX_PLL_CPISEL)
+#define G_STATIC_KX_PLL_CPISEL(x) (((x) >> S_STATIC_KX_PLL_CPISEL) & M_STATIC_KX_PLL_CPISEL)
+
+#define S_STATIC_KX_PLL_CCALMETHOD    23
+#define V_STATIC_KX_PLL_CCALMETHOD(x) ((x) << S_STATIC_KX_PLL_CCALMETHOD)
+#define F_STATIC_KX_PLL_CCALMETHOD    V_STATIC_KX_PLL_CCALMETHOD(1U)
+
+#define S_STATIC_KX_PLL_CCALLOAD    22
+#define V_STATIC_KX_PLL_CCALLOAD(x) ((x) << S_STATIC_KX_PLL_CCALLOAD)
+#define F_STATIC_KX_PLL_CCALLOAD    V_STATIC_KX_PLL_CCALLOAD(1U)
+
+#define S_STATIC_KX_PLL_CCALFMIN    21
+#define V_STATIC_KX_PLL_CCALFMIN(x) ((x) << S_STATIC_KX_PLL_CCALFMIN)
+#define F_STATIC_KX_PLL_CCALFMIN    V_STATIC_KX_PLL_CCALFMIN(1U)
+
+#define S_STATIC_KX_PLL_CCALFMAX    20
+#define V_STATIC_KX_PLL_CCALFMAX(x) ((x) << S_STATIC_KX_PLL_CCALFMAX)
+#define F_STATIC_KX_PLL_CCALFMAX    V_STATIC_KX_PLL_CCALFMAX(1U)
+
+#define S_STATIC_KX_PLL_CCALCVHOLD    19
+#define V_STATIC_KX_PLL_CCALCVHOLD(x) ((x) << S_STATIC_KX_PLL_CCALCVHOLD)
+#define F_STATIC_KX_PLL_CCALCVHOLD    V_STATIC_KX_PLL_CCALCVHOLD(1U)
+
+#define S_STATIC_KX_PLL_CCALBANDSEL    15
+#define M_STATIC_KX_PLL_CCALBANDSEL    0xfU
+#define V_STATIC_KX_PLL_CCALBANDSEL(x) ((x) << S_STATIC_KX_PLL_CCALBANDSEL)
+#define G_STATIC_KX_PLL_CCALBANDSEL(x) (((x) >> S_STATIC_KX_PLL_CCALBANDSEL) & M_STATIC_KX_PLL_CCALBANDSEL)
+
+#define S_STATIC_KX_PLL_BGOFFSET    11
+#define M_STATIC_KX_PLL_BGOFFSET    0xfU
+#define V_STATIC_KX_PLL_BGOFFSET(x) ((x) << S_STATIC_KX_PLL_BGOFFSET)
+#define G_STATIC_KX_PLL_BGOFFSET(x) (((x) >> S_STATIC_KX_PLL_BGOFFSET) & M_STATIC_KX_PLL_BGOFFSET)
+
+#define S_T6_STATIC_KX_PLL_P    8
+#define M_T6_STATIC_KX_PLL_P    0x7U
+#define V_T6_STATIC_KX_PLL_P(x) ((x) << S_T6_STATIC_KX_PLL_P)
+#define G_T6_STATIC_KX_PLL_P(x) (((x) >> S_T6_STATIC_KX_PLL_P) & M_T6_STATIC_KX_PLL_P)
+
+#define S_T6_STATIC_KX_PLL_N2    4
+#define M_T6_STATIC_KX_PLL_N2    0xfU
+#define V_T6_STATIC_KX_PLL_N2(x) ((x) << S_T6_STATIC_KX_PLL_N2)
+#define G_T6_STATIC_KX_PLL_N2(x) (((x) >> S_T6_STATIC_KX_PLL_N2) & M_T6_STATIC_KX_PLL_N2)
+
+#define S_T6_STATIC_KX_PLL_N1    0
+#define M_T6_STATIC_KX_PLL_N1    0xfU
+#define V_T6_STATIC_KX_PLL_N1(x) ((x) << S_T6_STATIC_KX_PLL_N1)
+#define G_T6_STATIC_KX_PLL_N1(x) (((x) >> S_T6_STATIC_KX_PLL_N1) & M_T6_STATIC_KX_PLL_N1)
+
 #define A_DBG_PVT_REG_DRVN 0x610c
 
 #define S_PVT_REG_DRVN_EN    8
@@ -4438,6 +13761,30 @@
 #define V_PVT_REG_DRVN_A(x) ((x) << S_PVT_REG_DRVN_A)
 #define G_PVT_REG_DRVN_A(x) (((x) >> S_PVT_REG_DRVN_A) & M_PVT_REG_DRVN_A)
 
+#define A_DBG_T5_STATIC_KX_PLL_CONF2 0x610c
+
+#define S_T5_STATIC_KX_PLL_M    11
+#define M_T5_STATIC_KX_PLL_M    0x1ffU
+#define V_T5_STATIC_KX_PLL_M(x) ((x) << S_T5_STATIC_KX_PLL_M)
+#define G_T5_STATIC_KX_PLL_M(x) (((x) >> S_T5_STATIC_KX_PLL_M) & M_T5_STATIC_KX_PLL_M)
+
+#define S_T5_STATIC_KX_PLL_ANALOGTUNE    0
+#define M_T5_STATIC_KX_PLL_ANALOGTUNE    0x7ffU
+#define V_T5_STATIC_KX_PLL_ANALOGTUNE(x) ((x) << S_T5_STATIC_KX_PLL_ANALOGTUNE)
+#define G_T5_STATIC_KX_PLL_ANALOGTUNE(x) (((x) >> S_T5_STATIC_KX_PLL_ANALOGTUNE) & M_T5_STATIC_KX_PLL_ANALOGTUNE)
+
+#define A_DBG_STATIC_KX_PLL_CONF2 0x610c
+
+#define S_T6_STATIC_KX_PLL_M    11
+#define M_T6_STATIC_KX_PLL_M    0x1ffU
+#define V_T6_STATIC_KX_PLL_M(x) ((x) << S_T6_STATIC_KX_PLL_M)
+#define G_T6_STATIC_KX_PLL_M(x) (((x) >> S_T6_STATIC_KX_PLL_M) & M_T6_STATIC_KX_PLL_M)
+
+#define S_STATIC_KX_PLL_ANALOGTUNE    0
+#define M_STATIC_KX_PLL_ANALOGTUNE    0x7ffU
+#define V_STATIC_KX_PLL_ANALOGTUNE(x) ((x) << S_STATIC_KX_PLL_ANALOGTUNE)
+#define G_STATIC_KX_PLL_ANALOGTUNE(x) (((x) >> S_STATIC_KX_PLL_ANALOGTUNE) & M_STATIC_KX_PLL_ANALOGTUNE)
+
 #define A_DBG_PVT_REG_DRVP 0x6110
 
 #define S_PVT_REG_DRVP_EN    8
@@ -4454,6 +13801,31 @@
 #define V_PVT_REG_DRVP_A(x) ((x) << S_PVT_REG_DRVP_A)
 #define G_PVT_REG_DRVP_A(x) (((x) >> S_PVT_REG_DRVP_A) & M_PVT_REG_DRVP_A)
 
+#define A_DBG_T5_STATIC_C_DFS_CONF 0x6110
+
+#define S_STATIC_C_DFS_RANGEA    8
+#define M_STATIC_C_DFS_RANGEA    0x1fU
+#define V_STATIC_C_DFS_RANGEA(x) ((x) << S_STATIC_C_DFS_RANGEA)
+#define G_STATIC_C_DFS_RANGEA(x) (((x) >> S_STATIC_C_DFS_RANGEA) & M_STATIC_C_DFS_RANGEA)
+
+#define S_STATIC_C_DFS_RANGEB    3
+#define M_STATIC_C_DFS_RANGEB    0x1fU
+#define V_STATIC_C_DFS_RANGEB(x) ((x) << S_STATIC_C_DFS_RANGEB)
+#define G_STATIC_C_DFS_RANGEB(x) (((x) >> S_STATIC_C_DFS_RANGEB) & M_STATIC_C_DFS_RANGEB)
+
+#define S_STATIC_C_DFS_FFTUNE4    2
+#define V_STATIC_C_DFS_FFTUNE4(x) ((x) << S_STATIC_C_DFS_FFTUNE4)
+#define F_STATIC_C_DFS_FFTUNE4    V_STATIC_C_DFS_FFTUNE4(1U)
+
+#define S_STATIC_C_DFS_FFTUNE5    1
+#define V_STATIC_C_DFS_FFTUNE5(x) ((x) << S_STATIC_C_DFS_FFTUNE5)
+#define F_STATIC_C_DFS_FFTUNE5    V_STATIC_C_DFS_FFTUNE5(1U)
+
+#define S_STATIC_C_DFS_ENABLE    0
+#define V_STATIC_C_DFS_ENABLE(x) ((x) << S_STATIC_C_DFS_ENABLE)
+#define F_STATIC_C_DFS_ENABLE    V_STATIC_C_DFS_ENABLE(1U)
+
+#define A_DBG_STATIC_C_DFS_CONF 0x6110
 #define A_DBG_PVT_REG_TERMN 0x6114
 
 #define S_PVT_REG_TERMN_EN    8
@@ -4470,6 +13842,31 @@
 #define V_PVT_REG_TERMN_A(x) ((x) << S_PVT_REG_TERMN_A)
 #define G_PVT_REG_TERMN_A(x) (((x) >> S_PVT_REG_TERMN_A) & M_PVT_REG_TERMN_A)
 
+#define A_DBG_T5_STATIC_U_DFS_CONF 0x6114
+
+#define S_STATIC_U_DFS_RANGEA    8
+#define M_STATIC_U_DFS_RANGEA    0x1fU
+#define V_STATIC_U_DFS_RANGEA(x) ((x) << S_STATIC_U_DFS_RANGEA)
+#define G_STATIC_U_DFS_RANGEA(x) (((x) >> S_STATIC_U_DFS_RANGEA) & M_STATIC_U_DFS_RANGEA)
+
+#define S_STATIC_U_DFS_RANGEB    3
+#define M_STATIC_U_DFS_RANGEB    0x1fU
+#define V_STATIC_U_DFS_RANGEB(x) ((x) << S_STATIC_U_DFS_RANGEB)
+#define G_STATIC_U_DFS_RANGEB(x) (((x) >> S_STATIC_U_DFS_RANGEB) & M_STATIC_U_DFS_RANGEB)
+
+#define S_STATIC_U_DFS_FFTUNE4    2
+#define V_STATIC_U_DFS_FFTUNE4(x) ((x) << S_STATIC_U_DFS_FFTUNE4)
+#define F_STATIC_U_DFS_FFTUNE4    V_STATIC_U_DFS_FFTUNE4(1U)
+
+#define S_STATIC_U_DFS_FFTUNE5    1
+#define V_STATIC_U_DFS_FFTUNE5(x) ((x) << S_STATIC_U_DFS_FFTUNE5)
+#define F_STATIC_U_DFS_FFTUNE5    V_STATIC_U_DFS_FFTUNE5(1U)
+
+#define S_STATIC_U_DFS_ENABLE    0
+#define V_STATIC_U_DFS_ENABLE(x) ((x) << S_STATIC_U_DFS_ENABLE)
+#define F_STATIC_U_DFS_ENABLE    V_STATIC_U_DFS_ENABLE(1U)
+
+#define A_DBG_STATIC_U_DFS_CONF 0x6114
 #define A_DBG_PVT_REG_TERMP 0x6118
 
 #define S_PVT_REG_TERMP_EN    8
@@ -4486,6 +13883,88 @@
 #define V_PVT_REG_TERMP_A(x) ((x) << S_PVT_REG_TERMP_A)
 #define G_PVT_REG_TERMP_A(x) (((x) >> S_PVT_REG_TERMP_A) & M_PVT_REG_TERMP_A)
 
+#define A_DBG_GPIO_PE_EN 0x6118
+
+#define S_GPIO19_PE_EN    19
+#define V_GPIO19_PE_EN(x) ((x) << S_GPIO19_PE_EN)
+#define F_GPIO19_PE_EN    V_GPIO19_PE_EN(1U)
+
+#define S_GPIO18_PE_EN    18
+#define V_GPIO18_PE_EN(x) ((x) << S_GPIO18_PE_EN)
+#define F_GPIO18_PE_EN    V_GPIO18_PE_EN(1U)
+
+#define S_GPIO17_PE_EN    17
+#define V_GPIO17_PE_EN(x) ((x) << S_GPIO17_PE_EN)
+#define F_GPIO17_PE_EN    V_GPIO17_PE_EN(1U)
+
+#define S_GPIO16_PE_EN    16
+#define V_GPIO16_PE_EN(x) ((x) << S_GPIO16_PE_EN)
+#define F_GPIO16_PE_EN    V_GPIO16_PE_EN(1U)
+
+#define S_GPIO15_PE_EN    15
+#define V_GPIO15_PE_EN(x) ((x) << S_GPIO15_PE_EN)
+#define F_GPIO15_PE_EN    V_GPIO15_PE_EN(1U)
+
+#define S_GPIO14_PE_EN    14
+#define V_GPIO14_PE_EN(x) ((x) << S_GPIO14_PE_EN)
+#define F_GPIO14_PE_EN    V_GPIO14_PE_EN(1U)
+
+#define S_GPIO13_PE_EN    13
+#define V_GPIO13_PE_EN(x) ((x) << S_GPIO13_PE_EN)
+#define F_GPIO13_PE_EN    V_GPIO13_PE_EN(1U)
+
+#define S_GPIO12_PE_EN    12
+#define V_GPIO12_PE_EN(x) ((x) << S_GPIO12_PE_EN)
+#define F_GPIO12_PE_EN    V_GPIO12_PE_EN(1U)
+
+#define S_GPIO11_PE_EN    11
+#define V_GPIO11_PE_EN(x) ((x) << S_GPIO11_PE_EN)
+#define F_GPIO11_PE_EN    V_GPIO11_PE_EN(1U)
+
+#define S_GPIO10_PE_EN    10
+#define V_GPIO10_PE_EN(x) ((x) << S_GPIO10_PE_EN)
+#define F_GPIO10_PE_EN    V_GPIO10_PE_EN(1U)
+
+#define S_GPIO9_PE_EN    9
+#define V_GPIO9_PE_EN(x) ((x) << S_GPIO9_PE_EN)
+#define F_GPIO9_PE_EN    V_GPIO9_PE_EN(1U)
+
+#define S_GPIO8_PE_EN    8
+#define V_GPIO8_PE_EN(x) ((x) << S_GPIO8_PE_EN)
+#define F_GPIO8_PE_EN    V_GPIO8_PE_EN(1U)
+
+#define S_GPIO7_PE_EN    7
+#define V_GPIO7_PE_EN(x) ((x) << S_GPIO7_PE_EN)
+#define F_GPIO7_PE_EN    V_GPIO7_PE_EN(1U)
+
+#define S_GPIO6_PE_EN    6
+#define V_GPIO6_PE_EN(x) ((x) << S_GPIO6_PE_EN)
+#define F_GPIO6_PE_EN    V_GPIO6_PE_EN(1U)
+
+#define S_GPIO5_PE_EN    5
+#define V_GPIO5_PE_EN(x) ((x) << S_GPIO5_PE_EN)
+#define F_GPIO5_PE_EN    V_GPIO5_PE_EN(1U)
+
+#define S_GPIO4_PE_EN    4
+#define V_GPIO4_PE_EN(x) ((x) << S_GPIO4_PE_EN)
+#define F_GPIO4_PE_EN    V_GPIO4_PE_EN(1U)
+
+#define S_GPIO3_PE_EN    3
+#define V_GPIO3_PE_EN(x) ((x) << S_GPIO3_PE_EN)
+#define F_GPIO3_PE_EN    V_GPIO3_PE_EN(1U)
+
+#define S_GPIO2_PE_EN    2
+#define V_GPIO2_PE_EN(x) ((x) << S_GPIO2_PE_EN)
+#define F_GPIO2_PE_EN    V_GPIO2_PE_EN(1U)
+
+#define S_GPIO1_PE_EN    1
+#define V_GPIO1_PE_EN(x) ((x) << S_GPIO1_PE_EN)
+#define F_GPIO1_PE_EN    V_GPIO1_PE_EN(1U)
+
+#define S_GPIO0_PE_EN    0
+#define V_GPIO0_PE_EN(x) ((x) << S_GPIO0_PE_EN)
+#define F_GPIO0_PE_EN    V_GPIO0_PE_EN(1U)
+
 #define A_DBG_PVT_REG_THRESHOLD 0x611c
 
 #define S_PVT_CALIBRATION_DONE    8
@@ -4524,6 +14003,88 @@
 #define V_THRESHOLD_DRVN_MIN_SYNC(x) ((x) << S_THRESHOLD_DRVN_MIN_SYNC)
 #define F_THRESHOLD_DRVN_MIN_SYNC    V_THRESHOLD_DRVN_MIN_SYNC(1U)
 
+#define A_DBG_GPIO_PS_EN 0x611c
+
+#define S_GPIO19_PS_EN    19
+#define V_GPIO19_PS_EN(x) ((x) << S_GPIO19_PS_EN)
+#define F_GPIO19_PS_EN    V_GPIO19_PS_EN(1U)
+
+#define S_GPIO18_PS_EN    18
+#define V_GPIO18_PS_EN(x) ((x) << S_GPIO18_PS_EN)
+#define F_GPIO18_PS_EN    V_GPIO18_PS_EN(1U)
+
+#define S_GPIO17_PS_EN    17
+#define V_GPIO17_PS_EN(x) ((x) << S_GPIO17_PS_EN)
+#define F_GPIO17_PS_EN    V_GPIO17_PS_EN(1U)
+
+#define S_GPIO16_PS_EN    16
+#define V_GPIO16_PS_EN(x) ((x) << S_GPIO16_PS_EN)
+#define F_GPIO16_PS_EN    V_GPIO16_PS_EN(1U)
+
+#define S_GPIO15_PS_EN    15
+#define V_GPIO15_PS_EN(x) ((x) << S_GPIO15_PS_EN)
+#define F_GPIO15_PS_EN    V_GPIO15_PS_EN(1U)
+
+#define S_GPIO14_PS_EN    14
+#define V_GPIO14_PS_EN(x) ((x) << S_GPIO14_PS_EN)
+#define F_GPIO14_PS_EN    V_GPIO14_PS_EN(1U)
+
+#define S_GPIO13_PS_EN    13
+#define V_GPIO13_PS_EN(x) ((x) << S_GPIO13_PS_EN)
+#define F_GPIO13_PS_EN    V_GPIO13_PS_EN(1U)
+
+#define S_GPIO12_PS_EN    12
+#define V_GPIO12_PS_EN(x) ((x) << S_GPIO12_PS_EN)
+#define F_GPIO12_PS_EN    V_GPIO12_PS_EN(1U)
+
+#define S_GPIO11_PS_EN    11
+#define V_GPIO11_PS_EN(x) ((x) << S_GPIO11_PS_EN)
+#define F_GPIO11_PS_EN    V_GPIO11_PS_EN(1U)
+
+#define S_GPIO10_PS_EN    10
+#define V_GPIO10_PS_EN(x) ((x) << S_GPIO10_PS_EN)
+#define F_GPIO10_PS_EN    V_GPIO10_PS_EN(1U)
+
+#define S_GPIO9_PS_EN    9
+#define V_GPIO9_PS_EN(x) ((x) << S_GPIO9_PS_EN)
+#define F_GPIO9_PS_EN    V_GPIO9_PS_EN(1U)
+
+#define S_GPIO8_PS_EN    8
+#define V_GPIO8_PS_EN(x) ((x) << S_GPIO8_PS_EN)
+#define F_GPIO8_PS_EN    V_GPIO8_PS_EN(1U)
+
+#define S_GPIO7_PS_EN    7
+#define V_GPIO7_PS_EN(x) ((x) << S_GPIO7_PS_EN)
+#define F_GPIO7_PS_EN    V_GPIO7_PS_EN(1U)
+
+#define S_GPIO6_PS_EN    6
+#define V_GPIO6_PS_EN(x) ((x) << S_GPIO6_PS_EN)
+#define F_GPIO6_PS_EN    V_GPIO6_PS_EN(1U)
+
+#define S_GPIO5_PS_EN    5
+#define V_GPIO5_PS_EN(x) ((x) << S_GPIO5_PS_EN)
+#define F_GPIO5_PS_EN    V_GPIO5_PS_EN(1U)
+
+#define S_GPIO4_PS_EN    4
+#define V_GPIO4_PS_EN(x) ((x) << S_GPIO4_PS_EN)
+#define F_GPIO4_PS_EN    V_GPIO4_PS_EN(1U)
+
+#define S_GPIO3_PS_EN    3
+#define V_GPIO3_PS_EN(x) ((x) << S_GPIO3_PS_EN)
+#define F_GPIO3_PS_EN    V_GPIO3_PS_EN(1U)
+
+#define S_GPIO2_PS_EN    2
+#define V_GPIO2_PS_EN(x) ((x) << S_GPIO2_PS_EN)
+#define F_GPIO2_PS_EN    V_GPIO2_PS_EN(1U)
+
+#define S_GPIO1_PS_EN    1
+#define V_GPIO1_PS_EN(x) ((x) << S_GPIO1_PS_EN)
+#define F_GPIO1_PS_EN    V_GPIO1_PS_EN(1U)
+
+#define S_GPIO0_PS_EN    0
+#define V_GPIO0_PS_EN(x) ((x) << S_GPIO0_PS_EN)
+#define F_GPIO0_PS_EN    V_GPIO0_PS_EN(1U)
+
 #define A_DBG_PVT_REG_IN_TERMP 0x6120
 
 #define S_REG_IN_TERMP_B    4
@@ -4536,6 +14097,7 @@
 #define V_REG_IN_TERMP_A(x) ((x) << S_REG_IN_TERMP_A)
 #define G_REG_IN_TERMP_A(x) (((x) >> S_REG_IN_TERMP_A) & M_REG_IN_TERMP_A)
 
+#define A_DBG_EFUSE_BYTE16_19 0x6120
 #define A_DBG_PVT_REG_IN_TERMN 0x6124
 
 #define S_REG_IN_TERMN_B    4
@@ -4548,6 +14110,7 @@
 #define V_REG_IN_TERMN_A(x) ((x) << S_REG_IN_TERMN_A)
 #define G_REG_IN_TERMN_A(x) (((x) >> S_REG_IN_TERMN_A) & M_REG_IN_TERMN_A)
 
+#define A_DBG_EFUSE_BYTE20_23 0x6124
 #define A_DBG_PVT_REG_IN_DRVP 0x6128
 
 #define S_REG_IN_DRVP_B    4
@@ -4560,6 +14123,7 @@
 #define V_REG_IN_DRVP_A(x) ((x) << S_REG_IN_DRVP_A)
 #define G_REG_IN_DRVP_A(x) (((x) >> S_REG_IN_DRVP_A) & M_REG_IN_DRVP_A)
 
+#define A_DBG_EFUSE_BYTE24_27 0x6128
 #define A_DBG_PVT_REG_IN_DRVN 0x612c
 
 #define S_REG_IN_DRVN_B    4
@@ -4572,6 +14136,7 @@
 #define V_REG_IN_DRVN_A(x) ((x) << S_REG_IN_DRVN_A)
 #define G_REG_IN_DRVN_A(x) (((x) >> S_REG_IN_DRVN_A) & M_REG_IN_DRVN_A)
 
+#define A_DBG_EFUSE_BYTE28_31 0x612c
 #define A_DBG_PVT_REG_OUT_TERMP 0x6130
 
 #define S_REG_OUT_TERMP_B    4
@@ -4584,6 +14149,7 @@
 #define V_REG_OUT_TERMP_A(x) ((x) << S_REG_OUT_TERMP_A)
 #define G_REG_OUT_TERMP_A(x) (((x) >> S_REG_OUT_TERMP_A) & M_REG_OUT_TERMP_A)
 
+#define A_DBG_EFUSE_BYTE32_35 0x6130
 #define A_DBG_PVT_REG_OUT_TERMN 0x6134
 
 #define S_REG_OUT_TERMN_B    4
@@ -4596,6 +14162,7 @@
 #define V_REG_OUT_TERMN_A(x) ((x) << S_REG_OUT_TERMN_A)
 #define G_REG_OUT_TERMN_A(x) (((x) >> S_REG_OUT_TERMN_A) & M_REG_OUT_TERMN_A)
 
+#define A_DBG_EFUSE_BYTE36_39 0x6134
 #define A_DBG_PVT_REG_OUT_DRVP 0x6138
 
 #define S_REG_OUT_DRVP_B    4
@@ -4608,6 +14175,7 @@
 #define V_REG_OUT_DRVP_A(x) ((x) << S_REG_OUT_DRVP_A)
 #define G_REG_OUT_DRVP_A(x) (((x) >> S_REG_OUT_DRVP_A) & M_REG_OUT_DRVP_A)
 
+#define A_DBG_EFUSE_BYTE40_43 0x6138
 #define A_DBG_PVT_REG_OUT_DRVN 0x613c
 
 #define S_REG_OUT_DRVN_B    4
@@ -4620,6 +14188,7 @@
 #define V_REG_OUT_DRVN_A(x) ((x) << S_REG_OUT_DRVN_A)
 #define G_REG_OUT_DRVN_A(x) (((x) >> S_REG_OUT_DRVN_A) & M_REG_OUT_DRVN_A)
 
+#define A_DBG_EFUSE_BYTE44_47 0x613c
 #define A_DBG_PVT_REG_HISTORY_TERMP 0x6140
 
 #define S_TERMP_B_HISTORY    4
@@ -4632,6 +14201,7 @@
 #define V_TERMP_A_HISTORY(x) ((x) << S_TERMP_A_HISTORY)
 #define G_TERMP_A_HISTORY(x) (((x) >> S_TERMP_A_HISTORY) & M_TERMP_A_HISTORY)
 
+#define A_DBG_EFUSE_BYTE48_51 0x6140
 #define A_DBG_PVT_REG_HISTORY_TERMN 0x6144
 
 #define S_TERMN_B_HISTORY    4
@@ -4644,6 +14214,7 @@
 #define V_TERMN_A_HISTORY(x) ((x) << S_TERMN_A_HISTORY)
 #define G_TERMN_A_HISTORY(x) (((x) >> S_TERMN_A_HISTORY) & M_TERMN_A_HISTORY)
 
+#define A_DBG_EFUSE_BYTE52_55 0x6144
 #define A_DBG_PVT_REG_HISTORY_DRVP 0x6148
 
 #define S_DRVP_B_HISTORY    4
@@ -4656,6 +14227,7 @@
 #define V_DRVP_A_HISTORY(x) ((x) << S_DRVP_A_HISTORY)
 #define G_DRVP_A_HISTORY(x) (((x) >> S_DRVP_A_HISTORY) & M_DRVP_A_HISTORY)
 
+#define A_DBG_EFUSE_BYTE56_59 0x6148
 #define A_DBG_PVT_REG_HISTORY_DRVN 0x614c
 
 #define S_DRVN_B_HISTORY    4
@@ -4668,6 +14240,7 @@
 #define V_DRVN_A_HISTORY(x) ((x) << S_DRVN_A_HISTORY)
 #define G_DRVN_A_HISTORY(x) (((x) >> S_DRVN_A_HISTORY) & M_DRVN_A_HISTORY)
 
+#define A_DBG_EFUSE_BYTE60_63 0x614c
 #define A_DBG_PVT_REG_SAMPLE_WAIT_CLKS 0x6150
 
 #define S_SAMPLE_WAIT_CLKS    0
@@ -4675,6 +14248,70 @@
 #define V_SAMPLE_WAIT_CLKS(x) ((x) << S_SAMPLE_WAIT_CLKS)
 #define G_SAMPLE_WAIT_CLKS(x) (((x) >> S_SAMPLE_WAIT_CLKS) & M_SAMPLE_WAIT_CLKS)
 
+#define A_DBG_STATIC_U_PLL_CONF6 0x6150
+
+#define S_STATIC_U_PLL_VREGTUNE    0
+#define M_STATIC_U_PLL_VREGTUNE    0x7ffffU
+#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE)
+#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE)
+
+#define A_DBG_STATIC_C_PLL_CONF6 0x6154
+
+#define S_STATIC_C_PLL_VREGTUNE    0
+#define M_STATIC_C_PLL_VREGTUNE    0x7ffffU
+#define V_STATIC_C_PLL_VREGTUNE(x) ((x) << S_STATIC_C_PLL_VREGTUNE)
+#define G_STATIC_C_PLL_VREGTUNE(x) (((x) >> S_STATIC_C_PLL_VREGTUNE) & M_STATIC_C_PLL_VREGTUNE)
+
+#define A_DBG_CUST_EFUSE_PROGRAM 0x6158
+
+#define S_EFUSE_PROG_PERIOD    16
+#define M_EFUSE_PROG_PERIOD    0xffffU
+#define V_EFUSE_PROG_PERIOD(x) ((x) << S_EFUSE_PROG_PERIOD)
+#define G_EFUSE_PROG_PERIOD(x) (((x) >> S_EFUSE_PROG_PERIOD) & M_EFUSE_PROG_PERIOD)
+
+#define S_EFUSE_OPER_TYP    14
+#define M_EFUSE_OPER_TYP    0x3U
+#define V_EFUSE_OPER_TYP(x) ((x) << S_EFUSE_OPER_TYP)
+#define G_EFUSE_OPER_TYP(x) (((x) >> S_EFUSE_OPER_TYP) & M_EFUSE_OPER_TYP)
+
+#define S_EFUSE_ADDR    8
+#define M_EFUSE_ADDR    0x3fU
+#define V_EFUSE_ADDR(x) ((x) << S_EFUSE_ADDR)
+#define G_EFUSE_ADDR(x) (((x) >> S_EFUSE_ADDR) & M_EFUSE_ADDR)
+
+#define S_EFUSE_DIN    0
+#define M_EFUSE_DIN    0xffU
+#define V_EFUSE_DIN(x) ((x) << S_EFUSE_DIN)
+#define G_EFUSE_DIN(x) (((x) >> S_EFUSE_DIN) & M_EFUSE_DIN)
+
+#define A_DBG_CUST_EFUSE_OUT 0x615c
+
+#define S_EFUSE_OPER_DONE    8
+#define V_EFUSE_OPER_DONE(x) ((x) << S_EFUSE_OPER_DONE)
+#define F_EFUSE_OPER_DONE    V_EFUSE_OPER_DONE(1U)
+
+#define S_EFUSE_DOUT    0
+#define M_EFUSE_DOUT    0xffU
+#define V_EFUSE_DOUT(x) ((x) << S_EFUSE_DOUT)
+#define G_EFUSE_DOUT(x) (((x) >> S_EFUSE_DOUT) & M_EFUSE_DOUT)
+
+#define A_DBG_CUST_EFUSE_BYTE0_3 0x6160
+#define A_DBG_CUST_EFUSE_BYTE4_7 0x6164
+#define A_DBG_CUST_EFUSE_BYTE8_11 0x6168
+#define A_DBG_CUST_EFUSE_BYTE12_15 0x616c
+#define A_DBG_CUST_EFUSE_BYTE16_19 0x6170
+#define A_DBG_CUST_EFUSE_BYTE20_23 0x6174
+#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178
+#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c
+#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180
+#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184
+#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188
+#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c
+#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190
+#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194
+#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198
+#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c
+
 /* registers for module MC */
 #define MC_BASE_ADDR 0x6200
 
@@ -6507,6 +16144,18 @@
 #define V_EXT_MEM_SIZE(x) ((x) << S_EXT_MEM_SIZE)
 #define G_EXT_MEM_SIZE(x) (((x) >> S_EXT_MEM_SIZE) & M_EXT_MEM_SIZE)
 
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_BASE    16
+#define M_EXT_MEM0_BASE    0xfffU
+#define V_EXT_MEM0_BASE(x) ((x) << S_EXT_MEM0_BASE)
+#define G_EXT_MEM0_BASE(x) (((x) >> S_EXT_MEM0_BASE) & M_EXT_MEM0_BASE)
+
+#define S_EXT_MEM0_SIZE    0
+#define M_EXT_MEM0_SIZE    0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
 #define A_MA_HOST_MEMORY_BAR 0x77cc
 
 #define S_HMA_BASE    16
@@ -6530,6 +16179,28 @@
 #define V_EXT_MEM_PAGE_SIZE(x) ((x) << S_EXT_MEM_PAGE_SIZE)
 #define G_EXT_MEM_PAGE_SIZE(x) (((x) >> S_EXT_MEM_PAGE_SIZE) & M_EXT_MEM_PAGE_SIZE)
 
+#define S_BRC_MODE1    6
+#define V_BRC_MODE1(x) ((x) << S_BRC_MODE1)
+#define F_BRC_MODE1    V_BRC_MODE1(1U)
+
+#define S_EXT_MEM_PAGE_SIZE1    4
+#define M_EXT_MEM_PAGE_SIZE1    0x3U
+#define V_EXT_MEM_PAGE_SIZE1(x) ((x) << S_EXT_MEM_PAGE_SIZE1)
+#define G_EXT_MEM_PAGE_SIZE1(x) (((x) >> S_EXT_MEM_PAGE_SIZE1) & M_EXT_MEM_PAGE_SIZE1)
+
+#define S_BRBC_MODE    4
+#define V_BRBC_MODE(x) ((x) << S_BRBC_MODE)
+#define F_BRBC_MODE    V_BRBC_MODE(1U)
+
+#define S_T6_BRC_MODE    3
+#define V_T6_BRC_MODE(x) ((x) << S_T6_BRC_MODE)
+#define F_T6_BRC_MODE    V_T6_BRC_MODE(1U)
+
+#define S_T6_EXT_MEM_PAGE_SIZE    0
+#define M_T6_EXT_MEM_PAGE_SIZE    0x7U
+#define V_T6_EXT_MEM_PAGE_SIZE(x) ((x) << S_T6_EXT_MEM_PAGE_SIZE)
+#define G_T6_EXT_MEM_PAGE_SIZE(x) (((x) >> S_T6_EXT_MEM_PAGE_SIZE) & M_T6_EXT_MEM_PAGE_SIZE)
+
 #define A_MA_ARB_CTRL 0x77d4
 
 #define S_DIS_PAGE_HINT    1
@@ -6540,6 +16211,52 @@
 #define V_DIS_ADV_ARB(x) ((x) << S_DIS_ADV_ARB)
 #define F_DIS_ADV_ARB    V_DIS_ADV_ARB(1U)
 
+#define S_DIS_BANK_FAIR    2
+#define V_DIS_BANK_FAIR(x) ((x) << S_DIS_BANK_FAIR)
+#define F_DIS_BANK_FAIR    V_DIS_BANK_FAIR(1U)
+
+#define S_HMA_WRT_EN    26
+#define V_HMA_WRT_EN(x) ((x) << S_HMA_WRT_EN)
+#define F_HMA_WRT_EN    V_HMA_WRT_EN(1U)
+
+#define S_HMA_NUM_PG_128B_FDBK    21
+#define M_HMA_NUM_PG_128B_FDBK    0x1fU
+#define V_HMA_NUM_PG_128B_FDBK(x) ((x) << S_HMA_NUM_PG_128B_FDBK)
+#define G_HMA_NUM_PG_128B_FDBK(x) (((x) >> S_HMA_NUM_PG_128B_FDBK) & M_HMA_NUM_PG_128B_FDBK)
+
+#define S_HMA_DIS_128B_PG_CNT_FDBK    20
+#define V_HMA_DIS_128B_PG_CNT_FDBK(x) ((x) << S_HMA_DIS_128B_PG_CNT_FDBK)
+#define F_HMA_DIS_128B_PG_CNT_FDBK    V_HMA_DIS_128B_PG_CNT_FDBK(1U)
+
+#define S_HMA_DIS_BG_ARB    19
+#define V_HMA_DIS_BG_ARB(x) ((x) << S_HMA_DIS_BG_ARB)
+#define F_HMA_DIS_BG_ARB    V_HMA_DIS_BG_ARB(1U)
+
+#define S_HMA_DIS_BANK_FAIR    18
+#define V_HMA_DIS_BANK_FAIR(x) ((x) << S_HMA_DIS_BANK_FAIR)
+#define F_HMA_DIS_BANK_FAIR    V_HMA_DIS_BANK_FAIR(1U)
+
+#define S_HMA_DIS_PAGE_HINT    17
+#define V_HMA_DIS_PAGE_HINT(x) ((x) << S_HMA_DIS_PAGE_HINT)
+#define F_HMA_DIS_PAGE_HINT    V_HMA_DIS_PAGE_HINT(1U)
+
+#define S_HMA_DIS_ADV_ARB    16
+#define V_HMA_DIS_ADV_ARB(x) ((x) << S_HMA_DIS_ADV_ARB)
+#define F_HMA_DIS_ADV_ARB    V_HMA_DIS_ADV_ARB(1U)
+
+#define S_NUM_PG_128B_FDBK    5
+#define M_NUM_PG_128B_FDBK    0x1fU
+#define V_NUM_PG_128B_FDBK(x) ((x) << S_NUM_PG_128B_FDBK)
+#define G_NUM_PG_128B_FDBK(x) (((x) >> S_NUM_PG_128B_FDBK) & M_NUM_PG_128B_FDBK)
+
+#define S_DIS_128B_PG_CNT_FDBK    4
+#define V_DIS_128B_PG_CNT_FDBK(x) ((x) << S_DIS_128B_PG_CNT_FDBK)
+#define F_DIS_128B_PG_CNT_FDBK    V_DIS_128B_PG_CNT_FDBK(1U)
+
+#define S_DIS_BG_ARB    3
+#define V_DIS_BG_ARB(x) ((x) << S_DIS_BG_ARB)
+#define F_DIS_BG_ARB    V_DIS_BG_ARB(1U)
+
 #define A_MA_TARGET_MEM_ENABLE 0x77d8
 
 #define S_HMA_ENABLE    3
@@ -6558,6 +16275,22 @@
 #define V_EDRAM0_ENABLE(x) ((x) << S_EDRAM0_ENABLE)
 #define F_EDRAM0_ENABLE    V_EDRAM0_ENABLE(1U)
 
+#define S_HMA_MUX    5
+#define V_HMA_MUX(x) ((x) << S_HMA_MUX)
+#define F_HMA_MUX    V_HMA_MUX(1U)
+
+#define S_EXT_MEM1_ENABLE    4
+#define V_EXT_MEM1_ENABLE(x) ((x) << S_EXT_MEM1_ENABLE)
+#define F_EXT_MEM1_ENABLE    V_EXT_MEM1_ENABLE(1U)
+
+#define S_EXT_MEM0_ENABLE    2
+#define V_EXT_MEM0_ENABLE(x) ((x) << S_EXT_MEM0_ENABLE)
+#define F_EXT_MEM0_ENABLE    V_EXT_MEM0_ENABLE(1U)
+
+#define S_MC_SPLIT    6
+#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT)
+#define F_MC_SPLIT    V_MC_SPLIT(1U)
+
 #define A_MA_INT_ENABLE 0x77dc
 
 #define S_MEM_PERR_INT_ENABLE    1
@@ -6568,6 +16301,10 @@
 #define V_MEM_WRAP_INT_ENABLE(x) ((x) << S_MEM_WRAP_INT_ENABLE)
 #define F_MEM_WRAP_INT_ENABLE    V_MEM_WRAP_INT_ENABLE(1U)
 
+#define S_MEM_TO_INT_ENABLE    2
+#define V_MEM_TO_INT_ENABLE(x) ((x) << S_MEM_TO_INT_ENABLE)
+#define F_MEM_TO_INT_ENABLE    V_MEM_TO_INT_ENABLE(1U)
+
 #define A_MA_INT_CAUSE 0x77e0
 
 #define S_MEM_PERR_INT_CAUSE    1
@@ -6578,6 +16315,10 @@
 #define V_MEM_WRAP_INT_CAUSE(x) ((x) << S_MEM_WRAP_INT_CAUSE)
 #define F_MEM_WRAP_INT_CAUSE    V_MEM_WRAP_INT_CAUSE(1U)
 
+#define S_MEM_TO_INT_CAUSE    2
+#define V_MEM_TO_INT_CAUSE(x) ((x) << S_MEM_TO_INT_CAUSE)
+#define F_MEM_TO_INT_CAUSE    V_MEM_TO_INT_CAUSE(1U)
+
 #define A_MA_INT_WRAP_STATUS 0x77e4
 
 #define S_MEM_WRAP_ADDRESS    4
@@ -6734,6 +16475,7 @@
 #define V_CL0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_PAR_RDQUEUE_ERROR_EN)
 #define F_CL0_PAR_RDQUEUE_ERROR_EN    V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
 
+#define A_MA_PARITY_ERROR_ENABLE1 0x77f0
 #define A_MA_PARITY_ERROR_STATUS 0x77f4
 
 #define S_TP_DMARBT_PAR_ERROR    31
@@ -6864,6 +16606,7 @@
 #define V_CL0_PAR_RDQUEUE_ERROR(x) ((x) << S_CL0_PAR_RDQUEUE_ERROR)
 #define F_CL0_PAR_RDQUEUE_ERROR    V_CL0_PAR_RDQUEUE_ERROR(1U)
 
+#define A_MA_PARITY_ERROR_STATUS1 0x77f4
 #define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
 
 #define S_BONUS_REG    6
@@ -6891,6 +16634,3636 @@
 #define V_UE_ENABLE(x) ((x) << S_UE_ENABLE)
 #define F_UE_ENABLE    V_UE_ENABLE(1U)
 
+#define S_FUTURE_EXPANSION    1
+#define M_FUTURE_EXPANSION    0x7fffffffU
+#define V_FUTURE_EXPANSION(x) ((x) << S_FUTURE_EXPANSION)
+#define G_FUTURE_EXPANSION(x) (((x) >> S_FUTURE_EXPANSION) & M_FUTURE_EXPANSION)
+
+#define S_FUTURE_EXPANSION_EE    1
+#define M_FUTURE_EXPANSION_EE    0x7fffffffU
+#define V_FUTURE_EXPANSION_EE(x) ((x) << S_FUTURE_EXPANSION_EE)
+#define G_FUTURE_EXPANSION_EE(x) (((x) >> S_FUTURE_EXPANSION_EE) & M_FUTURE_EXPANSION_EE)
+
+#define A_MA_PARITY_ERROR_ENABLE2 0x7800
+
+#define S_ARB4_PAR_WRQUEUE_ERROR_EN    1
+#define V_ARB4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_WRQUEUE_ERROR_EN)
+#define F_ARB4_PAR_WRQUEUE_ERROR_EN    V_ARB4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB4_PAR_RDQUEUE_ERROR_EN    0
+#define V_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR_EN)
+#define F_ARB4_PAR_RDQUEUE_ERROR_EN    V_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_PARITY_ERROR_STATUS2 0x7804
+
+#define S_ARB4_PAR_WRQUEUE_ERROR    1
+#define V_ARB4_PAR_WRQUEUE_ERROR(x) ((x) << S_ARB4_PAR_WRQUEUE_ERROR)
+#define F_ARB4_PAR_WRQUEUE_ERROR    V_ARB4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_ARB4_PAR_RDQUEUE_ERROR    0
+#define V_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR)
+#define F_ARB4_PAR_RDQUEUE_ERROR    V_ARB4_PAR_RDQUEUE_ERROR(1U)
+
+#define A_MA_EXT_MEMORY1_BAR 0x7808
+
+#define S_EXT_MEM1_BASE    16
+#define M_EXT_MEM1_BASE    0xfffU
+#define V_EXT_MEM1_BASE(x) ((x) << S_EXT_MEM1_BASE)
+#define G_EXT_MEM1_BASE(x) (((x) >> S_EXT_MEM1_BASE) & M_EXT_MEM1_BASE)
+
+#define S_EXT_MEM1_SIZE    0
+#define M_EXT_MEM1_SIZE    0xfffU
+#define V_EXT_MEM1_SIZE(x) ((x) << S_EXT_MEM1_SIZE)
+#define G_EXT_MEM1_SIZE(x) (((x) >> S_EXT_MEM1_SIZE) & M_EXT_MEM1_SIZE)
+
+#define A_MA_PMTX_THROTTLE 0x780c
+
+#define S_FL_ENABLE    31
+#define V_FL_ENABLE(x) ((x) << S_FL_ENABLE)
+#define F_FL_ENABLE    V_FL_ENABLE(1U)
+
+#define S_FL_LIMIT    0
+#define M_FL_LIMIT    0xffU
+#define V_FL_LIMIT(x) ((x) << S_FL_LIMIT)
+#define G_FL_LIMIT(x) (((x) >> S_FL_LIMIT) & M_FL_LIMIT)
+
+#define A_MA_PMRX_THROTTLE 0x7810
+#define A_MA_SGE_TH0_WRDATA_CNT 0x7814
+#define A_MA_SGE_TH1_WRDATA_CNT 0x7818
+#define A_MA_ULPTX_WRDATA_CNT 0x781c
+#define A_MA_ULPRX_WRDATA_CNT 0x7820
+#define A_MA_ULPTXRX_WRDATA_CNT 0x7824
+#define A_MA_TP_TH0_WRDATA_CNT 0x7828
+#define A_MA_TP_TH1_WRDATA_CNT 0x782c
+#define A_MA_LE_WRDATA_CNT 0x7830
+#define A_MA_CIM_WRDATA_CNT 0x7834
+#define A_MA_PCIE_WRDATA_CNT 0x7838
+#define A_MA_PMTX_WRDATA_CNT 0x783c
+#define A_MA_PMRX_WRDATA_CNT 0x7840
+#define A_MA_HMA_WRDATA_CNT 0x7844
+#define A_MA_SGE_TH0_RDDATA_CNT 0x7848
+#define A_MA_SGE_TH1_RDDATA_CNT 0x784c
+#define A_MA_ULPTX_RDDATA_CNT 0x7850
+#define A_MA_ULPRX_RDDATA_CNT 0x7854
+#define A_MA_ULPTXRX_RDDATA_CNT 0x7858
+#define A_MA_TP_TH0_RDDATA_CNT 0x785c
+#define A_MA_TP_TH1_RDDATA_CNT 0x7860
+#define A_MA_LE_RDDATA_CNT 0x7864
+#define A_MA_CIM_RDDATA_CNT 0x7868
+#define A_MA_PCIE_RDDATA_CNT 0x786c
+#define A_MA_PMTX_RDDATA_CNT 0x7870
+#define A_MA_PMRX_RDDATA_CNT 0x7874
+#define A_MA_HMA_RDDATA_CNT 0x7878
+#define A_MA_EDRAM0_WRDATA_CNT1 0x787c
+#define A_MA_EXIT_ADDR_FAULT 0x787c
+
+#define S_EXIT_ADDR_FAULT    0
+#define V_EXIT_ADDR_FAULT(x) ((x) << S_EXIT_ADDR_FAULT)
+#define F_EXIT_ADDR_FAULT    V_EXIT_ADDR_FAULT(1U)
+
+#define A_MA_EDRAM0_WRDATA_CNT0 0x7880
+#define A_MA_DDR_DEVICE_CFG 0x7880
+
+#define S_MEM_WIDTH    1
+#define M_MEM_WIDTH    0x7U
+#define V_MEM_WIDTH(x) ((x) << S_MEM_WIDTH)
+#define G_MEM_WIDTH(x) (((x) >> S_MEM_WIDTH) & M_MEM_WIDTH)
+
+#define S_DDR_MODE    0
+#define V_DDR_MODE(x) ((x) << S_DDR_MODE)
+#define F_DDR_MODE    V_DDR_MODE(1U)
+
+#define A_MA_EDRAM1_WRDATA_CNT1 0x7884
+#define A_MA_EDRAM1_WRDATA_CNT0 0x7888
+#define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c
+#define A_MA_EXT_MEMORY0_WRDATA_CNT0 0x7890
+#define A_MA_HOST_MEMORY_WRDATA_CNT1 0x7894
+#define A_MA_HOST_MEMORY_WRDATA_CNT0 0x7898
+#define A_MA_EXT_MEMORY1_WRDATA_CNT1 0x789c
+#define A_MA_EXT_MEMORY1_WRDATA_CNT0 0x78a0
+#define A_MA_EDRAM0_RDDATA_CNT1 0x78a4
+#define A_MA_EDRAM0_RDDATA_CNT0 0x78a8
+#define A_MA_EDRAM1_RDDATA_CNT1 0x78ac
+#define A_MA_EDRAM1_RDDATA_CNT0 0x78b0
+#define A_MA_EXT_MEMORY0_RDDATA_CNT1 0x78b4
+#define A_MA_EXT_MEMORY0_RDDATA_CNT0 0x78b8
+#define A_MA_HOST_MEMORY_RDDATA_CNT1 0x78bc
+#define A_MA_HOST_MEMORY_RDDATA_CNT0 0x78c0
+#define A_MA_EXT_MEMORY1_RDDATA_CNT1 0x78c4
+#define A_MA_EXT_MEMORY1_RDDATA_CNT0 0x78c8
+#define A_MA_TIMEOUT_CFG 0x78cc
+
+#define S_CLR    31
+#define V_CLR(x) ((x) << S_CLR)
+#define F_CLR    V_CLR(1U)
+
+#define S_CNT_LOCK    30
+#define V_CNT_LOCK(x) ((x) << S_CNT_LOCK)
+#define F_CNT_LOCK    V_CNT_LOCK(1U)
+
+#define S_WRN    24
+#define V_WRN(x) ((x) << S_WRN)
+#define F_WRN    V_WRN(1U)
+
+#define S_DIR    23
+#define V_DIR(x) ((x) << S_DIR)
+#define F_DIR    V_DIR(1U)
+
+#define S_TO_BUS    22
+#define V_TO_BUS(x) ((x) << S_TO_BUS)
+#define F_TO_BUS    V_TO_BUS(1U)
+
+#define S_CLIENT    16
+#define M_CLIENT    0xfU
+#define V_CLIENT(x) ((x) << S_CLIENT)
+#define G_CLIENT(x) (((x) >> S_CLIENT) & M_CLIENT)
+
+#define S_DELAY    0
+#define M_DELAY    0xffffU
+#define V_DELAY(x) ((x) << S_DELAY)
+#define G_DELAY(x) (((x) >> S_DELAY) & M_DELAY)
+
+#define A_MA_TIMEOUT_CNT 0x78d0
+
+#define S_CNT_VAL    0
+#define M_CNT_VAL    0xffffU
+#define V_CNT_VAL(x) ((x) << S_CNT_VAL)
+#define G_CNT_VAL(x) (((x) >> S_CNT_VAL) & M_CNT_VAL)
+
+#define A_MA_WRITE_TIMEOUT_ERROR_ENABLE 0x78d4
+
+#define S_FUTURE_CEXPANSION    29
+#define M_FUTURE_CEXPANSION    0x7U
+#define V_FUTURE_CEXPANSION(x) ((x) << S_FUTURE_CEXPANSION)
+#define G_FUTURE_CEXPANSION(x) (((x) >> S_FUTURE_CEXPANSION) & M_FUTURE_CEXPANSION)
+
+#define S_CL12_WR_CMD_TO_EN    28
+#define V_CL12_WR_CMD_TO_EN(x) ((x) << S_CL12_WR_CMD_TO_EN)
+#define F_CL12_WR_CMD_TO_EN    V_CL12_WR_CMD_TO_EN(1U)
+
+#define S_CL11_WR_CMD_TO_EN    27
+#define V_CL11_WR_CMD_TO_EN(x) ((x) << S_CL11_WR_CMD_TO_EN)
+#define F_CL11_WR_CMD_TO_EN    V_CL11_WR_CMD_TO_EN(1U)
+
+#define S_CL10_WR_CMD_TO_EN    26
+#define V_CL10_WR_CMD_TO_EN(x) ((x) << S_CL10_WR_CMD_TO_EN)
+#define F_CL10_WR_CMD_TO_EN    V_CL10_WR_CMD_TO_EN(1U)
+
+#define S_CL9_WR_CMD_TO_EN    25
+#define V_CL9_WR_CMD_TO_EN(x) ((x) << S_CL9_WR_CMD_TO_EN)
+#define F_CL9_WR_CMD_TO_EN    V_CL9_WR_CMD_TO_EN(1U)
+
+#define S_CL8_WR_CMD_TO_EN    24
+#define V_CL8_WR_CMD_TO_EN(x) ((x) << S_CL8_WR_CMD_TO_EN)
+#define F_CL8_WR_CMD_TO_EN    V_CL8_WR_CMD_TO_EN(1U)
+
+#define S_CL7_WR_CMD_TO_EN    23
+#define V_CL7_WR_CMD_TO_EN(x) ((x) << S_CL7_WR_CMD_TO_EN)
+#define F_CL7_WR_CMD_TO_EN    V_CL7_WR_CMD_TO_EN(1U)
+
+#define S_CL6_WR_CMD_TO_EN    22
+#define V_CL6_WR_CMD_TO_EN(x) ((x) << S_CL6_WR_CMD_TO_EN)
+#define F_CL6_WR_CMD_TO_EN    V_CL6_WR_CMD_TO_EN(1U)
+
+#define S_CL5_WR_CMD_TO_EN    21
+#define V_CL5_WR_CMD_TO_EN(x) ((x) << S_CL5_WR_CMD_TO_EN)
+#define F_CL5_WR_CMD_TO_EN    V_CL5_WR_CMD_TO_EN(1U)
+
+#define S_CL4_WR_CMD_TO_EN    20
+#define V_CL4_WR_CMD_TO_EN(x) ((x) << S_CL4_WR_CMD_TO_EN)
+#define F_CL4_WR_CMD_TO_EN    V_CL4_WR_CMD_TO_EN(1U)
+
+#define S_CL3_WR_CMD_TO_EN    19
+#define V_CL3_WR_CMD_TO_EN(x) ((x) << S_CL3_WR_CMD_TO_EN)
+#define F_CL3_WR_CMD_TO_EN    V_CL3_WR_CMD_TO_EN(1U)
+
+#define S_CL2_WR_CMD_TO_EN    18
+#define V_CL2_WR_CMD_TO_EN(x) ((x) << S_CL2_WR_CMD_TO_EN)
+#define F_CL2_WR_CMD_TO_EN    V_CL2_WR_CMD_TO_EN(1U)
+
+#define S_CL1_WR_CMD_TO_EN    17
+#define V_CL1_WR_CMD_TO_EN(x) ((x) << S_CL1_WR_CMD_TO_EN)
+#define F_CL1_WR_CMD_TO_EN    V_CL1_WR_CMD_TO_EN(1U)
+
+#define S_CL0_WR_CMD_TO_EN    16
+#define V_CL0_WR_CMD_TO_EN(x) ((x) << S_CL0_WR_CMD_TO_EN)
+#define F_CL0_WR_CMD_TO_EN    V_CL0_WR_CMD_TO_EN(1U)
+
+#define S_FUTURE_DEXPANSION    13
+#define M_FUTURE_DEXPANSION    0x7U
+#define V_FUTURE_DEXPANSION(x) ((x) << S_FUTURE_DEXPANSION)
+#define G_FUTURE_DEXPANSION(x) (((x) >> S_FUTURE_DEXPANSION) & M_FUTURE_DEXPANSION)
+
+#define S_CL12_WR_DATA_TO_EN    12
+#define V_CL12_WR_DATA_TO_EN(x) ((x) << S_CL12_WR_DATA_TO_EN)
+#define F_CL12_WR_DATA_TO_EN    V_CL12_WR_DATA_TO_EN(1U)
+
+#define S_CL11_WR_DATA_TO_EN    11
+#define V_CL11_WR_DATA_TO_EN(x) ((x) << S_CL11_WR_DATA_TO_EN)
+#define F_CL11_WR_DATA_TO_EN    V_CL11_WR_DATA_TO_EN(1U)
+
+#define S_CL10_WR_DATA_TO_EN    10
+#define V_CL10_WR_DATA_TO_EN(x) ((x) << S_CL10_WR_DATA_TO_EN)
+#define F_CL10_WR_DATA_TO_EN    V_CL10_WR_DATA_TO_EN(1U)
+
+#define S_CL9_WR_DATA_TO_EN    9
+#define V_CL9_WR_DATA_TO_EN(x) ((x) << S_CL9_WR_DATA_TO_EN)
+#define F_CL9_WR_DATA_TO_EN    V_CL9_WR_DATA_TO_EN(1U)
+
+#define S_CL8_WR_DATA_TO_EN    8
+#define V_CL8_WR_DATA_TO_EN(x) ((x) << S_CL8_WR_DATA_TO_EN)
+#define F_CL8_WR_DATA_TO_EN    V_CL8_WR_DATA_TO_EN(1U)
+
+#define S_CL7_WR_DATA_TO_EN    7
+#define V_CL7_WR_DATA_TO_EN(x) ((x) << S_CL7_WR_DATA_TO_EN)
+#define F_CL7_WR_DATA_TO_EN    V_CL7_WR_DATA_TO_EN(1U)
+
+#define S_CL6_WR_DATA_TO_EN    6
+#define V_CL6_WR_DATA_TO_EN(x) ((x) << S_CL6_WR_DATA_TO_EN)
+#define F_CL6_WR_DATA_TO_EN    V_CL6_WR_DATA_TO_EN(1U)
+
+#define S_CL5_WR_DATA_TO_EN    5
+#define V_CL5_WR_DATA_TO_EN(x) ((x) << S_CL5_WR_DATA_TO_EN)
+#define F_CL5_WR_DATA_TO_EN    V_CL5_WR_DATA_TO_EN(1U)
+
+#define S_CL4_WR_DATA_TO_EN    4
+#define V_CL4_WR_DATA_TO_EN(x) ((x) << S_CL4_WR_DATA_TO_EN)
+#define F_CL4_WR_DATA_TO_EN    V_CL4_WR_DATA_TO_EN(1U)
+
+#define S_CL3_WR_DATA_TO_EN    3
+#define V_CL3_WR_DATA_TO_EN(x) ((x) << S_CL3_WR_DATA_TO_EN)
+#define F_CL3_WR_DATA_TO_EN    V_CL3_WR_DATA_TO_EN(1U)
+
+#define S_CL2_WR_DATA_TO_EN    2
+#define V_CL2_WR_DATA_TO_EN(x) ((x) << S_CL2_WR_DATA_TO_EN)
+#define F_CL2_WR_DATA_TO_EN    V_CL2_WR_DATA_TO_EN(1U)
+
+#define S_CL1_WR_DATA_TO_EN    1
+#define V_CL1_WR_DATA_TO_EN(x) ((x) << S_CL1_WR_DATA_TO_EN)
+#define F_CL1_WR_DATA_TO_EN    V_CL1_WR_DATA_TO_EN(1U)
+
+#define S_CL0_WR_DATA_TO_EN    0
+#define V_CL0_WR_DATA_TO_EN(x) ((x) << S_CL0_WR_DATA_TO_EN)
+#define F_CL0_WR_DATA_TO_EN    V_CL0_WR_DATA_TO_EN(1U)
+
+#define S_FUTURE_CEXPANSION_WTE    29
+#define M_FUTURE_CEXPANSION_WTE    0x7U
+#define V_FUTURE_CEXPANSION_WTE(x) ((x) << S_FUTURE_CEXPANSION_WTE)
+#define G_FUTURE_CEXPANSION_WTE(x) (((x) >> S_FUTURE_CEXPANSION_WTE) & M_FUTURE_CEXPANSION_WTE)
+
+#define S_FUTURE_DEXPANSION_WTE    13
+#define M_FUTURE_DEXPANSION_WTE    0x7U
+#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE)
+#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE)
+
+#define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8
+
+#define S_CL12_WR_CMD_TO_ERROR    28
+#define V_CL12_WR_CMD_TO_ERROR(x) ((x) << S_CL12_WR_CMD_TO_ERROR)
+#define F_CL12_WR_CMD_TO_ERROR    V_CL12_WR_CMD_TO_ERROR(1U)
+
+#define S_CL11_WR_CMD_TO_ERROR    27
+#define V_CL11_WR_CMD_TO_ERROR(x) ((x) << S_CL11_WR_CMD_TO_ERROR)
+#define F_CL11_WR_CMD_TO_ERROR    V_CL11_WR_CMD_TO_ERROR(1U)
+
+#define S_CL10_WR_CMD_TO_ERROR    26
+#define V_CL10_WR_CMD_TO_ERROR(x) ((x) << S_CL10_WR_CMD_TO_ERROR)
+#define F_CL10_WR_CMD_TO_ERROR    V_CL10_WR_CMD_TO_ERROR(1U)
+
+#define S_CL9_WR_CMD_TO_ERROR    25
+#define V_CL9_WR_CMD_TO_ERROR(x) ((x) << S_CL9_WR_CMD_TO_ERROR)
+#define F_CL9_WR_CMD_TO_ERROR    V_CL9_WR_CMD_TO_ERROR(1U)
+
+#define S_CL8_WR_CMD_TO_ERROR    24
+#define V_CL8_WR_CMD_TO_ERROR(x) ((x) << S_CL8_WR_CMD_TO_ERROR)
+#define F_CL8_WR_CMD_TO_ERROR    V_CL8_WR_CMD_TO_ERROR(1U)
+
+#define S_CL7_WR_CMD_TO_ERROR    23
+#define V_CL7_WR_CMD_TO_ERROR(x) ((x) << S_CL7_WR_CMD_TO_ERROR)
+#define F_CL7_WR_CMD_TO_ERROR    V_CL7_WR_CMD_TO_ERROR(1U)
+
+#define S_CL6_WR_CMD_TO_ERROR    22
+#define V_CL6_WR_CMD_TO_ERROR(x) ((x) << S_CL6_WR_CMD_TO_ERROR)
+#define F_CL6_WR_CMD_TO_ERROR    V_CL6_WR_CMD_TO_ERROR(1U)
+
+#define S_CL5_WR_CMD_TO_ERROR    21
+#define V_CL5_WR_CMD_TO_ERROR(x) ((x) << S_CL5_WR_CMD_TO_ERROR)
+#define F_CL5_WR_CMD_TO_ERROR    V_CL5_WR_CMD_TO_ERROR(1U)
+
+#define S_CL4_WR_CMD_TO_ERROR    20
+#define V_CL4_WR_CMD_TO_ERROR(x) ((x) << S_CL4_WR_CMD_TO_ERROR)
+#define F_CL4_WR_CMD_TO_ERROR    V_CL4_WR_CMD_TO_ERROR(1U)
+
+#define S_CL3_WR_CMD_TO_ERROR    19
+#define V_CL3_WR_CMD_TO_ERROR(x) ((x) << S_CL3_WR_CMD_TO_ERROR)
+#define F_CL3_WR_CMD_TO_ERROR    V_CL3_WR_CMD_TO_ERROR(1U)
+
+#define S_CL2_WR_CMD_TO_ERROR    18
+#define V_CL2_WR_CMD_TO_ERROR(x) ((x) << S_CL2_WR_CMD_TO_ERROR)
+#define F_CL2_WR_CMD_TO_ERROR    V_CL2_WR_CMD_TO_ERROR(1U)
+
+#define S_CL1_WR_CMD_TO_ERROR    17
+#define V_CL1_WR_CMD_TO_ERROR(x) ((x) << S_CL1_WR_CMD_TO_ERROR)
+#define F_CL1_WR_CMD_TO_ERROR    V_CL1_WR_CMD_TO_ERROR(1U)
+
+#define S_CL0_WR_CMD_TO_ERROR    16
+#define V_CL0_WR_CMD_TO_ERROR(x) ((x) << S_CL0_WR_CMD_TO_ERROR)
+#define F_CL0_WR_CMD_TO_ERROR    V_CL0_WR_CMD_TO_ERROR(1U)
+
+#define S_CL12_WR_DATA_TO_ERROR    12
+#define V_CL12_WR_DATA_TO_ERROR(x) ((x) << S_CL12_WR_DATA_TO_ERROR)
+#define F_CL12_WR_DATA_TO_ERROR    V_CL12_WR_DATA_TO_ERROR(1U)
+
+#define S_CL11_WR_DATA_TO_ERROR    11
+#define V_CL11_WR_DATA_TO_ERROR(x) ((x) << S_CL11_WR_DATA_TO_ERROR)
+#define F_CL11_WR_DATA_TO_ERROR    V_CL11_WR_DATA_TO_ERROR(1U)
+
+#define S_CL10_WR_DATA_TO_ERROR    10
+#define V_CL10_WR_DATA_TO_ERROR(x) ((x) << S_CL10_WR_DATA_TO_ERROR)
+#define F_CL10_WR_DATA_TO_ERROR    V_CL10_WR_DATA_TO_ERROR(1U)
+
+#define S_CL9_WR_DATA_TO_ERROR    9
+#define V_CL9_WR_DATA_TO_ERROR(x) ((x) << S_CL9_WR_DATA_TO_ERROR)
+#define F_CL9_WR_DATA_TO_ERROR    V_CL9_WR_DATA_TO_ERROR(1U)
+
+#define S_CL8_WR_DATA_TO_ERROR    8
+#define V_CL8_WR_DATA_TO_ERROR(x) ((x) << S_CL8_WR_DATA_TO_ERROR)
+#define F_CL8_WR_DATA_TO_ERROR    V_CL8_WR_DATA_TO_ERROR(1U)
+
+#define S_CL7_WR_DATA_TO_ERROR    7
+#define V_CL7_WR_DATA_TO_ERROR(x) ((x) << S_CL7_WR_DATA_TO_ERROR)
+#define F_CL7_WR_DATA_TO_ERROR    V_CL7_WR_DATA_TO_ERROR(1U)
+
+#define S_CL6_WR_DATA_TO_ERROR    6
+#define V_CL6_WR_DATA_TO_ERROR(x) ((x) << S_CL6_WR_DATA_TO_ERROR)
+#define F_CL6_WR_DATA_TO_ERROR    V_CL6_WR_DATA_TO_ERROR(1U)
+
+#define S_CL5_WR_DATA_TO_ERROR    5
+#define V_CL5_WR_DATA_TO_ERROR(x) ((x) << S_CL5_WR_DATA_TO_ERROR)
+#define F_CL5_WR_DATA_TO_ERROR    V_CL5_WR_DATA_TO_ERROR(1U)
+
+#define S_CL4_WR_DATA_TO_ERROR    4
+#define V_CL4_WR_DATA_TO_ERROR(x) ((x) << S_CL4_WR_DATA_TO_ERROR)
+#define F_CL4_WR_DATA_TO_ERROR    V_CL4_WR_DATA_TO_ERROR(1U)
+
+#define S_CL3_WR_DATA_TO_ERROR    3
+#define V_CL3_WR_DATA_TO_ERROR(x) ((x) << S_CL3_WR_DATA_TO_ERROR)
+#define F_CL3_WR_DATA_TO_ERROR    V_CL3_WR_DATA_TO_ERROR(1U)
+
+#define S_CL2_WR_DATA_TO_ERROR    2
+#define V_CL2_WR_DATA_TO_ERROR(x) ((x) << S_CL2_WR_DATA_TO_ERROR)
+#define F_CL2_WR_DATA_TO_ERROR    V_CL2_WR_DATA_TO_ERROR(1U)
+
+#define S_CL1_WR_DATA_TO_ERROR    1
+#define V_CL1_WR_DATA_TO_ERROR(x) ((x) << S_CL1_WR_DATA_TO_ERROR)
+#define F_CL1_WR_DATA_TO_ERROR    V_CL1_WR_DATA_TO_ERROR(1U)
+
+#define S_CL0_WR_DATA_TO_ERROR    0
+#define V_CL0_WR_DATA_TO_ERROR(x) ((x) << S_CL0_WR_DATA_TO_ERROR)
+#define F_CL0_WR_DATA_TO_ERROR    V_CL0_WR_DATA_TO_ERROR(1U)
+
+#define S_FUTURE_CEXPANSION_WTS    29
+#define M_FUTURE_CEXPANSION_WTS    0x7U
+#define V_FUTURE_CEXPANSION_WTS(x) ((x) << S_FUTURE_CEXPANSION_WTS)
+#define G_FUTURE_CEXPANSION_WTS(x) (((x) >> S_FUTURE_CEXPANSION_WTS) & M_FUTURE_CEXPANSION_WTS)
+
+#define S_FUTURE_DEXPANSION_WTS    13
+#define M_FUTURE_DEXPANSION_WTS    0x7U
+#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
+#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
+
+#define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc
+
+#define S_CL12_RD_CMD_TO_EN    28
+#define V_CL12_RD_CMD_TO_EN(x) ((x) << S_CL12_RD_CMD_TO_EN)
+#define F_CL12_RD_CMD_TO_EN    V_CL12_RD_CMD_TO_EN(1U)
+
+#define S_CL11_RD_CMD_TO_EN    27
+#define V_CL11_RD_CMD_TO_EN(x) ((x) << S_CL11_RD_CMD_TO_EN)
+#define F_CL11_RD_CMD_TO_EN    V_CL11_RD_CMD_TO_EN(1U)
+
+#define S_CL10_RD_CMD_TO_EN    26
+#define V_CL10_RD_CMD_TO_EN(x) ((x) << S_CL10_RD_CMD_TO_EN)
+#define F_CL10_RD_CMD_TO_EN    V_CL10_RD_CMD_TO_EN(1U)
+
+#define S_CL9_RD_CMD_TO_EN    25
+#define V_CL9_RD_CMD_TO_EN(x) ((x) << S_CL9_RD_CMD_TO_EN)
+#define F_CL9_RD_CMD_TO_EN    V_CL9_RD_CMD_TO_EN(1U)
+
+#define S_CL8_RD_CMD_TO_EN    24
+#define V_CL8_RD_CMD_TO_EN(x) ((x) << S_CL8_RD_CMD_TO_EN)
+#define F_CL8_RD_CMD_TO_EN    V_CL8_RD_CMD_TO_EN(1U)
+
+#define S_CL7_RD_CMD_TO_EN    23
+#define V_CL7_RD_CMD_TO_EN(x) ((x) << S_CL7_RD_CMD_TO_EN)
+#define F_CL7_RD_CMD_TO_EN    V_CL7_RD_CMD_TO_EN(1U)
+
+#define S_CL6_RD_CMD_TO_EN    22
+#define V_CL6_RD_CMD_TO_EN(x) ((x) << S_CL6_RD_CMD_TO_EN)
+#define F_CL6_RD_CMD_TO_EN    V_CL6_RD_CMD_TO_EN(1U)
+
+#define S_CL5_RD_CMD_TO_EN    21
+#define V_CL5_RD_CMD_TO_EN(x) ((x) << S_CL5_RD_CMD_TO_EN)
+#define F_CL5_RD_CMD_TO_EN    V_CL5_RD_CMD_TO_EN(1U)
+
+#define S_CL4_RD_CMD_TO_EN    20
+#define V_CL4_RD_CMD_TO_EN(x) ((x) << S_CL4_RD_CMD_TO_EN)
+#define F_CL4_RD_CMD_TO_EN    V_CL4_RD_CMD_TO_EN(1U)
+
+#define S_CL3_RD_CMD_TO_EN    19
+#define V_CL3_RD_CMD_TO_EN(x) ((x) << S_CL3_RD_CMD_TO_EN)
+#define F_CL3_RD_CMD_TO_EN    V_CL3_RD_CMD_TO_EN(1U)
+
+#define S_CL2_RD_CMD_TO_EN    18
+#define V_CL2_RD_CMD_TO_EN(x) ((x) << S_CL2_RD_CMD_TO_EN)
+#define F_CL2_RD_CMD_TO_EN    V_CL2_RD_CMD_TO_EN(1U)
+
+#define S_CL1_RD_CMD_TO_EN    17
+#define V_CL1_RD_CMD_TO_EN(x) ((x) << S_CL1_RD_CMD_TO_EN)
+#define F_CL1_RD_CMD_TO_EN    V_CL1_RD_CMD_TO_EN(1U)
+
+#define S_CL0_RD_CMD_TO_EN    16
+#define V_CL0_RD_CMD_TO_EN(x) ((x) << S_CL0_RD_CMD_TO_EN)
+#define F_CL0_RD_CMD_TO_EN    V_CL0_RD_CMD_TO_EN(1U)
+
+#define S_CL12_RD_DATA_TO_EN    12
+#define V_CL12_RD_DATA_TO_EN(x) ((x) << S_CL12_RD_DATA_TO_EN)
+#define F_CL12_RD_DATA_TO_EN    V_CL12_RD_DATA_TO_EN(1U)
+
+#define S_CL11_RD_DATA_TO_EN    11
+#define V_CL11_RD_DATA_TO_EN(x) ((x) << S_CL11_RD_DATA_TO_EN)
+#define F_CL11_RD_DATA_TO_EN    V_CL11_RD_DATA_TO_EN(1U)
+
+#define S_CL10_RD_DATA_TO_EN    10
+#define V_CL10_RD_DATA_TO_EN(x) ((x) << S_CL10_RD_DATA_TO_EN)
+#define F_CL10_RD_DATA_TO_EN    V_CL10_RD_DATA_TO_EN(1U)
+
+#define S_CL9_RD_DATA_TO_EN    9
+#define V_CL9_RD_DATA_TO_EN(x) ((x) << S_CL9_RD_DATA_TO_EN)
+#define F_CL9_RD_DATA_TO_EN    V_CL9_RD_DATA_TO_EN(1U)
+
+#define S_CL8_RD_DATA_TO_EN    8
+#define V_CL8_RD_DATA_TO_EN(x) ((x) << S_CL8_RD_DATA_TO_EN)
+#define F_CL8_RD_DATA_TO_EN    V_CL8_RD_DATA_TO_EN(1U)
+
+#define S_CL7_RD_DATA_TO_EN    7
+#define V_CL7_RD_DATA_TO_EN(x) ((x) << S_CL7_RD_DATA_TO_EN)
+#define F_CL7_RD_DATA_TO_EN    V_CL7_RD_DATA_TO_EN(1U)
+
+#define S_CL6_RD_DATA_TO_EN    6
+#define V_CL6_RD_DATA_TO_EN(x) ((x) << S_CL6_RD_DATA_TO_EN)
+#define F_CL6_RD_DATA_TO_EN    V_CL6_RD_DATA_TO_EN(1U)
+
+#define S_CL5_RD_DATA_TO_EN    5
+#define V_CL5_RD_DATA_TO_EN(x) ((x) << S_CL5_RD_DATA_TO_EN)
+#define F_CL5_RD_DATA_TO_EN    V_CL5_RD_DATA_TO_EN(1U)
+
+#define S_CL4_RD_DATA_TO_EN    4
+#define V_CL4_RD_DATA_TO_EN(x) ((x) << S_CL4_RD_DATA_TO_EN)
+#define F_CL4_RD_DATA_TO_EN    V_CL4_RD_DATA_TO_EN(1U)
+
+#define S_CL3_RD_DATA_TO_EN    3
+#define V_CL3_RD_DATA_TO_EN(x) ((x) << S_CL3_RD_DATA_TO_EN)
+#define F_CL3_RD_DATA_TO_EN    V_CL3_RD_DATA_TO_EN(1U)
+
+#define S_CL2_RD_DATA_TO_EN    2
+#define V_CL2_RD_DATA_TO_EN(x) ((x) << S_CL2_RD_DATA_TO_EN)
+#define F_CL2_RD_DATA_TO_EN    V_CL2_RD_DATA_TO_EN(1U)
+
+#define S_CL1_RD_DATA_TO_EN    1
+#define V_CL1_RD_DATA_TO_EN(x) ((x) << S_CL1_RD_DATA_TO_EN)
+#define F_CL1_RD_DATA_TO_EN    V_CL1_RD_DATA_TO_EN(1U)
+
+#define S_CL0_RD_DATA_TO_EN    0
+#define V_CL0_RD_DATA_TO_EN(x) ((x) << S_CL0_RD_DATA_TO_EN)
+#define F_CL0_RD_DATA_TO_EN    V_CL0_RD_DATA_TO_EN(1U)
+
+#define S_FUTURE_CEXPANSION_RTE    29
+#define M_FUTURE_CEXPANSION_RTE    0x7U
+#define V_FUTURE_CEXPANSION_RTE(x) ((x) << S_FUTURE_CEXPANSION_RTE)
+#define G_FUTURE_CEXPANSION_RTE(x) (((x) >> S_FUTURE_CEXPANSION_RTE) & M_FUTURE_CEXPANSION_RTE)
+
+#define S_FUTURE_DEXPANSION_RTE    13
+#define M_FUTURE_DEXPANSION_RTE    0x7U
+#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
+#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
+
+#define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0
+
+#define S_CL12_RD_CMD_TO_ERROR    28
+#define V_CL12_RD_CMD_TO_ERROR(x) ((x) << S_CL12_RD_CMD_TO_ERROR)
+#define F_CL12_RD_CMD_TO_ERROR    V_CL12_RD_CMD_TO_ERROR(1U)
+
+#define S_CL11_RD_CMD_TO_ERROR    27
+#define V_CL11_RD_CMD_TO_ERROR(x) ((x) << S_CL11_RD_CMD_TO_ERROR)
+#define F_CL11_RD_CMD_TO_ERROR    V_CL11_RD_CMD_TO_ERROR(1U)
+
+#define S_CL10_RD_CMD_TO_ERROR    26
+#define V_CL10_RD_CMD_TO_ERROR(x) ((x) << S_CL10_RD_CMD_TO_ERROR)
+#define F_CL10_RD_CMD_TO_ERROR    V_CL10_RD_CMD_TO_ERROR(1U)
+
+#define S_CL9_RD_CMD_TO_ERROR    25
+#define V_CL9_RD_CMD_TO_ERROR(x) ((x) << S_CL9_RD_CMD_TO_ERROR)
+#define F_CL9_RD_CMD_TO_ERROR    V_CL9_RD_CMD_TO_ERROR(1U)
+
+#define S_CL8_RD_CMD_TO_ERROR    24
+#define V_CL8_RD_CMD_TO_ERROR(x) ((x) << S_CL8_RD_CMD_TO_ERROR)
+#define F_CL8_RD_CMD_TO_ERROR    V_CL8_RD_CMD_TO_ERROR(1U)
+
+#define S_CL7_RD_CMD_TO_ERROR    23
+#define V_CL7_RD_CMD_TO_ERROR(x) ((x) << S_CL7_RD_CMD_TO_ERROR)
+#define F_CL7_RD_CMD_TO_ERROR    V_CL7_RD_CMD_TO_ERROR(1U)
+
+#define S_CL6_RD_CMD_TO_ERROR    22
+#define V_CL6_RD_CMD_TO_ERROR(x) ((x) << S_CL6_RD_CMD_TO_ERROR)
+#define F_CL6_RD_CMD_TO_ERROR    V_CL6_RD_CMD_TO_ERROR(1U)
+
+#define S_CL5_RD_CMD_TO_ERROR    21
+#define V_CL5_RD_CMD_TO_ERROR(x) ((x) << S_CL5_RD_CMD_TO_ERROR)
+#define F_CL5_RD_CMD_TO_ERROR    V_CL5_RD_CMD_TO_ERROR(1U)
+
+#define S_CL4_RD_CMD_TO_ERROR    20
+#define V_CL4_RD_CMD_TO_ERROR(x) ((x) << S_CL4_RD_CMD_TO_ERROR)
+#define F_CL4_RD_CMD_TO_ERROR    V_CL4_RD_CMD_TO_ERROR(1U)
+
+#define S_CL3_RD_CMD_TO_ERROR    19
+#define V_CL3_RD_CMD_TO_ERROR(x) ((x) << S_CL3_RD_CMD_TO_ERROR)
+#define F_CL3_RD_CMD_TO_ERROR    V_CL3_RD_CMD_TO_ERROR(1U)
+
+#define S_CL2_RD_CMD_TO_ERROR    18
+#define V_CL2_RD_CMD_TO_ERROR(x) ((x) << S_CL2_RD_CMD_TO_ERROR)
+#define F_CL2_RD_CMD_TO_ERROR    V_CL2_RD_CMD_TO_ERROR(1U)
+
+#define S_CL1_RD_CMD_TO_ERROR    17
+#define V_CL1_RD_CMD_TO_ERROR(x) ((x) << S_CL1_RD_CMD_TO_ERROR)
+#define F_CL1_RD_CMD_TO_ERROR    V_CL1_RD_CMD_TO_ERROR(1U)
+
+#define S_CL0_RD_CMD_TO_ERROR    16
+#define V_CL0_RD_CMD_TO_ERROR(x) ((x) << S_CL0_RD_CMD_TO_ERROR)
+#define F_CL0_RD_CMD_TO_ERROR    V_CL0_RD_CMD_TO_ERROR(1U)
+
+#define S_CL12_RD_DATA_TO_ERROR    12
+#define V_CL12_RD_DATA_TO_ERROR(x) ((x) << S_CL12_RD_DATA_TO_ERROR)
+#define F_CL12_RD_DATA_TO_ERROR    V_CL12_RD_DATA_TO_ERROR(1U)
+
+#define S_CL11_RD_DATA_TO_ERROR    11
+#define V_CL11_RD_DATA_TO_ERROR(x) ((x) << S_CL11_RD_DATA_TO_ERROR)
+#define F_CL11_RD_DATA_TO_ERROR    V_CL11_RD_DATA_TO_ERROR(1U)
+
+#define S_CL10_RD_DATA_TO_ERROR    10
+#define V_CL10_RD_DATA_TO_ERROR(x) ((x) << S_CL10_RD_DATA_TO_ERROR)
+#define F_CL10_RD_DATA_TO_ERROR    V_CL10_RD_DATA_TO_ERROR(1U)
+
+#define S_CL9_RD_DATA_TO_ERROR    9
+#define V_CL9_RD_DATA_TO_ERROR(x) ((x) << S_CL9_RD_DATA_TO_ERROR)
+#define F_CL9_RD_DATA_TO_ERROR    V_CL9_RD_DATA_TO_ERROR(1U)
+
+#define S_CL8_RD_DATA_TO_ERROR    8
+#define V_CL8_RD_DATA_TO_ERROR(x) ((x) << S_CL8_RD_DATA_TO_ERROR)
+#define F_CL8_RD_DATA_TO_ERROR    V_CL8_RD_DATA_TO_ERROR(1U)
+
+#define S_CL7_RD_DATA_TO_ERROR    7
+#define V_CL7_RD_DATA_TO_ERROR(x) ((x) << S_CL7_RD_DATA_TO_ERROR)
+#define F_CL7_RD_DATA_TO_ERROR    V_CL7_RD_DATA_TO_ERROR(1U)
+
+#define S_CL6_RD_DATA_TO_ERROR    6
+#define V_CL6_RD_DATA_TO_ERROR(x) ((x) << S_CL6_RD_DATA_TO_ERROR)
+#define F_CL6_RD_DATA_TO_ERROR    V_CL6_RD_DATA_TO_ERROR(1U)
+
+#define S_CL5_RD_DATA_TO_ERROR    5
+#define V_CL5_RD_DATA_TO_ERROR(x) ((x) << S_CL5_RD_DATA_TO_ERROR)
+#define F_CL5_RD_DATA_TO_ERROR    V_CL5_RD_DATA_TO_ERROR(1U)
+
+#define S_CL4_RD_DATA_TO_ERROR    4
+#define V_CL4_RD_DATA_TO_ERROR(x) ((x) << S_CL4_RD_DATA_TO_ERROR)
+#define F_CL4_RD_DATA_TO_ERROR    V_CL4_RD_DATA_TO_ERROR(1U)
+
+#define S_CL3_RD_DATA_TO_ERROR    3
+#define V_CL3_RD_DATA_TO_ERROR(x) ((x) << S_CL3_RD_DATA_TO_ERROR)
+#define F_CL3_RD_DATA_TO_ERROR    V_CL3_RD_DATA_TO_ERROR(1U)
+
+#define S_CL2_RD_DATA_TO_ERROR    2
+#define V_CL2_RD_DATA_TO_ERROR(x) ((x) << S_CL2_RD_DATA_TO_ERROR)
+#define F_CL2_RD_DATA_TO_ERROR    V_CL2_RD_DATA_TO_ERROR(1U)
+
+#define S_CL1_RD_DATA_TO_ERROR    1
+#define V_CL1_RD_DATA_TO_ERROR(x) ((x) << S_CL1_RD_DATA_TO_ERROR)
+#define F_CL1_RD_DATA_TO_ERROR    V_CL1_RD_DATA_TO_ERROR(1U)
+
+#define S_CL0_RD_DATA_TO_ERROR    0
+#define V_CL0_RD_DATA_TO_ERROR(x) ((x) << S_CL0_RD_DATA_TO_ERROR)
+#define F_CL0_RD_DATA_TO_ERROR    V_CL0_RD_DATA_TO_ERROR(1U)
+
+#define S_FUTURE_CEXPANSION_RTS    29
+#define M_FUTURE_CEXPANSION_RTS    0x7U
+#define V_FUTURE_CEXPANSION_RTS(x) ((x) << S_FUTURE_CEXPANSION_RTS)
+#define G_FUTURE_CEXPANSION_RTS(x) (((x) >> S_FUTURE_CEXPANSION_RTS) & M_FUTURE_CEXPANSION_RTS)
+
+#define S_FUTURE_DEXPANSION_RTS    13
+#define M_FUTURE_DEXPANSION_RTS    0x7U
+#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
+#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
+
+#define A_MA_BKP_CNT_SEL 0x78e4
+
+#define S_BKP_CNT_TYPE    30
+#define M_BKP_CNT_TYPE    0x3U
+#define V_BKP_CNT_TYPE(x) ((x) << S_BKP_CNT_TYPE)
+#define G_BKP_CNT_TYPE(x) (((x) >> S_BKP_CNT_TYPE) & M_BKP_CNT_TYPE)
+
+#define S_BKP_CLIENT    24
+#define M_BKP_CLIENT    0xfU
+#define V_BKP_CLIENT(x) ((x) << S_BKP_CLIENT)
+#define G_BKP_CLIENT(x) (((x) >> S_BKP_CLIENT) & M_BKP_CLIENT)
+
+#define A_MA_BKP_CNT 0x78e8
+#define A_MA_WRT_ARB 0x78ec
+
+#define S_WRT_EN    31
+#define V_WRT_EN(x) ((x) << S_WRT_EN)
+#define F_WRT_EN    V_WRT_EN(1U)
+
+#define S_WR_TIM    16
+#define M_WR_TIM    0xffU
+#define V_WR_TIM(x) ((x) << S_WR_TIM)
+#define G_WR_TIM(x) (((x) >> S_WR_TIM) & M_WR_TIM)
+
+#define S_RD_WIN    8
+#define M_RD_WIN    0xffU
+#define V_RD_WIN(x) ((x) << S_RD_WIN)
+#define G_RD_WIN(x) (((x) >> S_RD_WIN) & M_RD_WIN)
+
+#define S_WR_WIN    0
+#define M_WR_WIN    0xffU
+#define V_WR_WIN(x) ((x) << S_WR_WIN)
+#define G_WR_WIN(x) (((x) >> S_WR_WIN) & M_WR_WIN)
+
+#define A_MA_IF_PARITY_ERROR_ENABLE 0x78f0
+
+#define S_T5_FUTURE_DEXPANSION    13
+#define M_T5_FUTURE_DEXPANSION    0x7ffffU
+#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
+#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+
+#define S_CL12_IF_PAR_EN    12
+#define V_CL12_IF_PAR_EN(x) ((x) << S_CL12_IF_PAR_EN)
+#define F_CL12_IF_PAR_EN    V_CL12_IF_PAR_EN(1U)
+
+#define S_CL11_IF_PAR_EN    11
+#define V_CL11_IF_PAR_EN(x) ((x) << S_CL11_IF_PAR_EN)
+#define F_CL11_IF_PAR_EN    V_CL11_IF_PAR_EN(1U)
+
+#define S_CL10_IF_PAR_EN    10
+#define V_CL10_IF_PAR_EN(x) ((x) << S_CL10_IF_PAR_EN)
+#define F_CL10_IF_PAR_EN    V_CL10_IF_PAR_EN(1U)
+
+#define S_CL9_IF_PAR_EN    9
+#define V_CL9_IF_PAR_EN(x) ((x) << S_CL9_IF_PAR_EN)
+#define F_CL9_IF_PAR_EN    V_CL9_IF_PAR_EN(1U)
+
+#define S_CL8_IF_PAR_EN    8
+#define V_CL8_IF_PAR_EN(x) ((x) << S_CL8_IF_PAR_EN)
+#define F_CL8_IF_PAR_EN    V_CL8_IF_PAR_EN(1U)
+
+#define S_CL7_IF_PAR_EN    7
+#define V_CL7_IF_PAR_EN(x) ((x) << S_CL7_IF_PAR_EN)
+#define F_CL7_IF_PAR_EN    V_CL7_IF_PAR_EN(1U)
+
+#define S_CL6_IF_PAR_EN    6
+#define V_CL6_IF_PAR_EN(x) ((x) << S_CL6_IF_PAR_EN)
+#define F_CL6_IF_PAR_EN    V_CL6_IF_PAR_EN(1U)
+
+#define S_CL5_IF_PAR_EN    5
+#define V_CL5_IF_PAR_EN(x) ((x) << S_CL5_IF_PAR_EN)
+#define F_CL5_IF_PAR_EN    V_CL5_IF_PAR_EN(1U)
+
+#define S_CL4_IF_PAR_EN    4
+#define V_CL4_IF_PAR_EN(x) ((x) << S_CL4_IF_PAR_EN)
+#define F_CL4_IF_PAR_EN    V_CL4_IF_PAR_EN(1U)
+
+#define S_CL3_IF_PAR_EN    3
+#define V_CL3_IF_PAR_EN(x) ((x) << S_CL3_IF_PAR_EN)
+#define F_CL3_IF_PAR_EN    V_CL3_IF_PAR_EN(1U)
+
+#define S_CL2_IF_PAR_EN    2
+#define V_CL2_IF_PAR_EN(x) ((x) << S_CL2_IF_PAR_EN)
+#define F_CL2_IF_PAR_EN    V_CL2_IF_PAR_EN(1U)
+
+#define S_CL1_IF_PAR_EN    1
+#define V_CL1_IF_PAR_EN(x) ((x) << S_CL1_IF_PAR_EN)
+#define F_CL1_IF_PAR_EN    V_CL1_IF_PAR_EN(1U)
+
+#define S_CL0_IF_PAR_EN    0
+#define V_CL0_IF_PAR_EN(x) ((x) << S_CL0_IF_PAR_EN)
+#define F_CL0_IF_PAR_EN    V_CL0_IF_PAR_EN(1U)
+
+#define S_FUTURE_DEXPANSION_IPE    13
+#define M_FUTURE_DEXPANSION_IPE    0x7ffffU
+#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
+#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
+
+#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
+
+#define S_T5_FUTURE_DEXPANSION    13
+#define M_T5_FUTURE_DEXPANSION    0x7ffffU
+#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
+#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+
+#define S_CL12_IF_PAR_ERROR    12
+#define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR)
+#define F_CL12_IF_PAR_ERROR    V_CL12_IF_PAR_ERROR(1U)
+
+#define S_CL11_IF_PAR_ERROR    11
+#define V_CL11_IF_PAR_ERROR(x) ((x) << S_CL11_IF_PAR_ERROR)
+#define F_CL11_IF_PAR_ERROR    V_CL11_IF_PAR_ERROR(1U)
+
+#define S_CL10_IF_PAR_ERROR    10
+#define V_CL10_IF_PAR_ERROR(x) ((x) << S_CL10_IF_PAR_ERROR)
+#define F_CL10_IF_PAR_ERROR    V_CL10_IF_PAR_ERROR(1U)
+
+#define S_CL9_IF_PAR_ERROR    9
+#define V_CL9_IF_PAR_ERROR(x) ((x) << S_CL9_IF_PAR_ERROR)
+#define F_CL9_IF_PAR_ERROR    V_CL9_IF_PAR_ERROR(1U)
+
+#define S_CL8_IF_PAR_ERROR    8
+#define V_CL8_IF_PAR_ERROR(x) ((x) << S_CL8_IF_PAR_ERROR)
+#define F_CL8_IF_PAR_ERROR    V_CL8_IF_PAR_ERROR(1U)
+
+#define S_CL7_IF_PAR_ERROR    7
+#define V_CL7_IF_PAR_ERROR(x) ((x) << S_CL7_IF_PAR_ERROR)
+#define F_CL7_IF_PAR_ERROR    V_CL7_IF_PAR_ERROR(1U)
+
+#define S_CL6_IF_PAR_ERROR    6
+#define V_CL6_IF_PAR_ERROR(x) ((x) << S_CL6_IF_PAR_ERROR)
+#define F_CL6_IF_PAR_ERROR    V_CL6_IF_PAR_ERROR(1U)
+
+#define S_CL5_IF_PAR_ERROR    5
+#define V_CL5_IF_PAR_ERROR(x) ((x) << S_CL5_IF_PAR_ERROR)
+#define F_CL5_IF_PAR_ERROR    V_CL5_IF_PAR_ERROR(1U)
+
+#define S_CL4_IF_PAR_ERROR    4
+#define V_CL4_IF_PAR_ERROR(x) ((x) << S_CL4_IF_PAR_ERROR)
+#define F_CL4_IF_PAR_ERROR    V_CL4_IF_PAR_ERROR(1U)
+
+#define S_CL3_IF_PAR_ERROR    3
+#define V_CL3_IF_PAR_ERROR(x) ((x) << S_CL3_IF_PAR_ERROR)
+#define F_CL3_IF_PAR_ERROR    V_CL3_IF_PAR_ERROR(1U)
+
+#define S_CL2_IF_PAR_ERROR    2
+#define V_CL2_IF_PAR_ERROR(x) ((x) << S_CL2_IF_PAR_ERROR)
+#define F_CL2_IF_PAR_ERROR    V_CL2_IF_PAR_ERROR(1U)
+
+#define S_CL1_IF_PAR_ERROR    1
+#define V_CL1_IF_PAR_ERROR(x) ((x) << S_CL1_IF_PAR_ERROR)
+#define F_CL1_IF_PAR_ERROR    V_CL1_IF_PAR_ERROR(1U)
+
+#define S_CL0_IF_PAR_ERROR    0
+#define V_CL0_IF_PAR_ERROR(x) ((x) << S_CL0_IF_PAR_ERROR)
+#define F_CL0_IF_PAR_ERROR    V_CL0_IF_PAR_ERROR(1U)
+
+#define S_FUTURE_DEXPANSION_IPS    13
+#define M_FUTURE_DEXPANSION_IPS    0x7ffffU
+#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
+#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
+
+#define A_MA_LOCAL_DEBUG_CFG 0x78f8
+
+#define S_DEBUG_OR    15
+#define V_DEBUG_OR(x) ((x) << S_DEBUG_OR)
+#define F_DEBUG_OR    V_DEBUG_OR(1U)
+
+#define S_DEBUG_HI    14
+#define V_DEBUG_HI(x) ((x) << S_DEBUG_HI)
+#define F_DEBUG_HI    V_DEBUG_HI(1U)
+
+#define S_DEBUG_RPT    13
+#define V_DEBUG_RPT(x) ((x) << S_DEBUG_RPT)
+#define F_DEBUG_RPT    V_DEBUG_RPT(1U)
+
+#define S_DEBUGPAGE    10
+#define M_DEBUGPAGE    0x7U
+#define V_DEBUGPAGE(x) ((x) << S_DEBUGPAGE)
+#define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE)
+
+#define A_MA_LOCAL_DEBUG_RPT 0x78fc
+#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000
+
+#define S_CMDVLD0    31
+#define V_CMDVLD0(x) ((x) << S_CMDVLD0)
+#define F_CMDVLD0    V_CMDVLD0(1U)
+
+#define S_CMDRDY0    30
+#define V_CMDRDY0(x) ((x) << S_CMDRDY0)
+#define F_CMDRDY0    V_CMDRDY0(1U)
+
+#define S_CMDTYPE0    29
+#define V_CMDTYPE0(x) ((x) << S_CMDTYPE0)
+#define F_CMDTYPE0    V_CMDTYPE0(1U)
+
+#define S_CMDLEN0    21
+#define M_CMDLEN0    0xffU
+#define V_CMDLEN0(x) ((x) << S_CMDLEN0)
+#define G_CMDLEN0(x) (((x) >> S_CMDLEN0) & M_CMDLEN0)
+
+#define S_CMDADDR0    8
+#define M_CMDADDR0    0x1fffU
+#define V_CMDADDR0(x) ((x) << S_CMDADDR0)
+#define G_CMDADDR0(x) (((x) >> S_CMDADDR0) & M_CMDADDR0)
+
+#define S_WRDATAVLD0    7
+#define V_WRDATAVLD0(x) ((x) << S_WRDATAVLD0)
+#define F_WRDATAVLD0    V_WRDATAVLD0(1U)
+
+#define S_WRDATARDY0    6
+#define V_WRDATARDY0(x) ((x) << S_WRDATARDY0)
+#define F_WRDATARDY0    V_WRDATARDY0(1U)
+
+#define S_RDDATARDY0    5
+#define V_RDDATARDY0(x) ((x) << S_RDDATARDY0)
+#define F_RDDATARDY0    V_RDDATARDY0(1U)
+
+#define S_RDDATAVLD0    4
+#define V_RDDATAVLD0(x) ((x) << S_RDDATAVLD0)
+#define F_RDDATAVLD0    V_RDDATAVLD0(1U)
+
+#define S_RDDATA0    0
+#define M_RDDATA0    0xfU
+#define V_RDDATA0(x) ((x) << S_RDDATA0)
+#define G_RDDATA0(x) (((x) >> S_RDDATA0) & M_RDDATA0)
+
+#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_EXTERNAL 0xa001
+
+#define S_CMDVLD1    31
+#define V_CMDVLD1(x) ((x) << S_CMDVLD1)
+#define F_CMDVLD1    V_CMDVLD1(1U)
+
+#define S_CMDRDY1    30
+#define V_CMDRDY1(x) ((x) << S_CMDRDY1)
+#define F_CMDRDY1    V_CMDRDY1(1U)
+
+#define S_CMDTYPE1    29
+#define V_CMDTYPE1(x) ((x) << S_CMDTYPE1)
+#define F_CMDTYPE1    V_CMDTYPE1(1U)
+
+#define S_CMDLEN1    21
+#define M_CMDLEN1    0xffU
+#define V_CMDLEN1(x) ((x) << S_CMDLEN1)
+#define G_CMDLEN1(x) (((x) >> S_CMDLEN1) & M_CMDLEN1)
+
+#define S_CMDADDR1    8
+#define M_CMDADDR1    0x1fffU
+#define V_CMDADDR1(x) ((x) << S_CMDADDR1)
+#define G_CMDADDR1(x) (((x) >> S_CMDADDR1) & M_CMDADDR1)
+
+#define S_WRDATAVLD1    7
+#define V_WRDATAVLD1(x) ((x) << S_WRDATAVLD1)
+#define F_WRDATAVLD1    V_WRDATAVLD1(1U)
+
+#define S_WRDATARDY1    6
+#define V_WRDATARDY1(x) ((x) << S_WRDATARDY1)
+#define F_WRDATARDY1    V_WRDATARDY1(1U)
+
+#define S_RDDATARDY1    5
+#define V_RDDATARDY1(x) ((x) << S_RDDATARDY1)
+#define F_RDDATARDY1    V_RDDATARDY1(1U)
+
+#define S_RDDATAVLD1    4
+#define V_RDDATAVLD1(x) ((x) << S_RDDATAVLD1)
+#define F_RDDATAVLD1    V_RDDATAVLD1(1U)
+
+#define S_RDDATA1    0
+#define M_RDDATA1    0xfU
+#define V_RDDATA1(x) ((x) << S_RDDATA1)
+#define G_RDDATA1(x) (((x) >> S_RDDATA1) & M_RDDATA1)
+
+#define A_MA_ULP_TX_CLIENT_INTERFACE_EXTERNAL 0xa002
+
+#define S_CMDVLD2    31
+#define V_CMDVLD2(x) ((x) << S_CMDVLD2)
+#define F_CMDVLD2    V_CMDVLD2(1U)
+
+#define S_CMDRDY2    30
+#define V_CMDRDY2(x) ((x) << S_CMDRDY2)
+#define F_CMDRDY2    V_CMDRDY2(1U)
+
+#define S_CMDTYPE2    29
+#define V_CMDTYPE2(x) ((x) << S_CMDTYPE2)
+#define F_CMDTYPE2    V_CMDTYPE2(1U)
+
+#define S_CMDLEN2    21
+#define M_CMDLEN2    0xffU
+#define V_CMDLEN2(x) ((x) << S_CMDLEN2)
+#define G_CMDLEN2(x) (((x) >> S_CMDLEN2) & M_CMDLEN2)
+
+#define S_CMDADDR2    8
+#define M_CMDADDR2    0x1fffU
+#define V_CMDADDR2(x) ((x) << S_CMDADDR2)
+#define G_CMDADDR2(x) (((x) >> S_CMDADDR2) & M_CMDADDR2)
+
+#define S_WRDATAVLD2    7
+#define V_WRDATAVLD2(x) ((x) << S_WRDATAVLD2)
+#define F_WRDATAVLD2    V_WRDATAVLD2(1U)
+
+#define S_WRDATARDY2    6
+#define V_WRDATARDY2(x) ((x) << S_WRDATARDY2)
+#define F_WRDATARDY2    V_WRDATARDY2(1U)
+
+#define S_RDDATARDY2    5
+#define V_RDDATARDY2(x) ((x) << S_RDDATARDY2)
+#define F_RDDATARDY2    V_RDDATARDY2(1U)
+
+#define S_RDDATAVLD2    4
+#define V_RDDATAVLD2(x) ((x) << S_RDDATAVLD2)
+#define F_RDDATAVLD2    V_RDDATAVLD2(1U)
+
+#define S_RDDATA2    0
+#define M_RDDATA2    0xfU
+#define V_RDDATA2(x) ((x) << S_RDDATA2)
+#define G_RDDATA2(x) (((x) >> S_RDDATA2) & M_RDDATA2)
+
+#define A_MA_ULP_RX_CLIENT_INTERFACE_EXTERNAL 0xa003
+
+#define S_CMDVLD3    31
+#define V_CMDVLD3(x) ((x) << S_CMDVLD3)
+#define F_CMDVLD3    V_CMDVLD3(1U)
+
+#define S_CMDRDY3    30
+#define V_CMDRDY3(x) ((x) << S_CMDRDY3)
+#define F_CMDRDY3    V_CMDRDY3(1U)
+
+#define S_CMDTYPE3    29
+#define V_CMDTYPE3(x) ((x) << S_CMDTYPE3)
+#define F_CMDTYPE3    V_CMDTYPE3(1U)
+
+#define S_CMDLEN3    21
+#define M_CMDLEN3    0xffU
+#define V_CMDLEN3(x) ((x) << S_CMDLEN3)
+#define G_CMDLEN3(x) (((x) >> S_CMDLEN3) & M_CMDLEN3)
+
+#define S_CMDADDR3    8
+#define M_CMDADDR3    0x1fffU
+#define V_CMDADDR3(x) ((x) << S_CMDADDR3)
+#define G_CMDADDR3(x) (((x) >> S_CMDADDR3) & M_CMDADDR3)
+
+#define S_WRDATAVLD3    7
+#define V_WRDATAVLD3(x) ((x) << S_WRDATAVLD3)
+#define F_WRDATAVLD3    V_WRDATAVLD3(1U)
+
+#define S_WRDATARDY3    6
+#define V_WRDATARDY3(x) ((x) << S_WRDATARDY3)
+#define F_WRDATARDY3    V_WRDATARDY3(1U)
+
+#define S_RDDATARDY3    5
+#define V_RDDATARDY3(x) ((x) << S_RDDATARDY3)
+#define F_RDDATARDY3    V_RDDATARDY3(1U)
+
+#define S_RDDATAVLD3    4
+#define V_RDDATAVLD3(x) ((x) << S_RDDATAVLD3)
+#define F_RDDATAVLD3    V_RDDATAVLD3(1U)
+
+#define S_RDDATA3    0
+#define M_RDDATA3    0xfU
+#define V_RDDATA3(x) ((x) << S_RDDATA3)
+#define G_RDDATA3(x) (((x) >> S_RDDATA3) & M_RDDATA3)
+
+#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_EXTERNAL 0xa004
+
+#define S_CMDVLD4    31
+#define V_CMDVLD4(x) ((x) << S_CMDVLD4)
+#define F_CMDVLD4    V_CMDVLD4(1U)
+
+#define S_CMDRDY4    30
+#define V_CMDRDY4(x) ((x) << S_CMDRDY4)
+#define F_CMDRDY4    V_CMDRDY4(1U)
+
+#define S_CMDTYPE4    29
+#define V_CMDTYPE4(x) ((x) << S_CMDTYPE4)
+#define F_CMDTYPE4    V_CMDTYPE4(1U)
+
+#define S_CMDLEN4    21
+#define M_CMDLEN4    0xffU
+#define V_CMDLEN4(x) ((x) << S_CMDLEN4)
+#define G_CMDLEN4(x) (((x) >> S_CMDLEN4) & M_CMDLEN4)
+
+#define S_CMDADDR4    8
+#define M_CMDADDR4    0x1fffU
+#define V_CMDADDR4(x) ((x) << S_CMDADDR4)
+#define G_CMDADDR4(x) (((x) >> S_CMDADDR4) & M_CMDADDR4)
+
+#define S_WRDATAVLD4    7
+#define V_WRDATAVLD4(x) ((x) << S_WRDATAVLD4)
+#define F_WRDATAVLD4    V_WRDATAVLD4(1U)
+
+#define S_WRDATARDY4    6
+#define V_WRDATARDY4(x) ((x) << S_WRDATARDY4)
+#define F_WRDATARDY4    V_WRDATARDY4(1U)
+
+#define S_RDDATARDY4    5
+#define V_RDDATARDY4(x) ((x) << S_RDDATARDY4)
+#define F_RDDATARDY4    V_RDDATARDY4(1U)
+
+#define S_RDDATAVLD4    4
+#define V_RDDATAVLD4(x) ((x) << S_RDDATAVLD4)
+#define F_RDDATAVLD4    V_RDDATAVLD4(1U)
+
+#define S_RDDATA4    0
+#define M_RDDATA4    0xfU
+#define V_RDDATA4(x) ((x) << S_RDDATA4)
+#define G_RDDATA4(x) (((x) >> S_RDDATA4) & M_RDDATA4)
+
+#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa005
+
+#define S_CMDVLD5    31
+#define V_CMDVLD5(x) ((x) << S_CMDVLD5)
+#define F_CMDVLD5    V_CMDVLD5(1U)
+
+#define S_CMDRDY5    30
+#define V_CMDRDY5(x) ((x) << S_CMDRDY5)
+#define F_CMDRDY5    V_CMDRDY5(1U)
+
+#define S_CMDTYPE5    29
+#define V_CMDTYPE5(x) ((x) << S_CMDTYPE5)
+#define F_CMDTYPE5    V_CMDTYPE5(1U)
+
+#define S_CMDLEN5    21
+#define M_CMDLEN5    0xffU
+#define V_CMDLEN5(x) ((x) << S_CMDLEN5)
+#define G_CMDLEN5(x) (((x) >> S_CMDLEN5) & M_CMDLEN5)
+
+#define S_CMDADDR5    8
+#define M_CMDADDR5    0x1fffU
+#define V_CMDADDR5(x) ((x) << S_CMDADDR5)
+#define G_CMDADDR5(x) (((x) >> S_CMDADDR5) & M_CMDADDR5)
+
+#define S_WRDATAVLD5    7
+#define V_WRDATAVLD5(x) ((x) << S_WRDATAVLD5)
+#define F_WRDATAVLD5    V_WRDATAVLD5(1U)
+
+#define S_WRDATARDY5    6
+#define V_WRDATARDY5(x) ((x) << S_WRDATARDY5)
+#define F_WRDATARDY5    V_WRDATARDY5(1U)
+
+#define S_RDDATARDY5    5
+#define V_RDDATARDY5(x) ((x) << S_RDDATARDY5)
+#define F_RDDATARDY5    V_RDDATARDY5(1U)
+
+#define S_RDDATAVLD5    4
+#define V_RDDATAVLD5(x) ((x) << S_RDDATAVLD5)
+#define F_RDDATAVLD5    V_RDDATAVLD5(1U)
+
+#define S_RDDATA5    0
+#define M_RDDATA5    0xfU
+#define V_RDDATA5(x) ((x) << S_RDDATA5)
+#define G_RDDATA5(x) (((x) >> S_RDDATA5) & M_RDDATA5)
+
+#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_EXTERNAL 0xa006
+
+#define S_CMDVLD6    31
+#define V_CMDVLD6(x) ((x) << S_CMDVLD6)
+#define F_CMDVLD6    V_CMDVLD6(1U)
+
+#define S_CMDRDY6    30
+#define V_CMDRDY6(x) ((x) << S_CMDRDY6)
+#define F_CMDRDY6    V_CMDRDY6(1U)
+
+#define S_CMDTYPE6    29
+#define V_CMDTYPE6(x) ((x) << S_CMDTYPE6)
+#define F_CMDTYPE6    V_CMDTYPE6(1U)
+
+#define S_CMDLEN6    21
+#define M_CMDLEN6    0xffU
+#define V_CMDLEN6(x) ((x) << S_CMDLEN6)
+#define G_CMDLEN6(x) (((x) >> S_CMDLEN6) & M_CMDLEN6)
+
+#define S_CMDADDR6    8
+#define M_CMDADDR6    0x1fffU
+#define V_CMDADDR6(x) ((x) << S_CMDADDR6)
+#define G_CMDADDR6(x) (((x) >> S_CMDADDR6) & M_CMDADDR6)
+
+#define S_WRDATAVLD6    7
+#define V_WRDATAVLD6(x) ((x) << S_WRDATAVLD6)
+#define F_WRDATAVLD6    V_WRDATAVLD6(1U)
+
+#define S_WRDATARDY6    6
+#define V_WRDATARDY6(x) ((x) << S_WRDATARDY6)
+#define F_WRDATARDY6    V_WRDATARDY6(1U)
+
+#define S_RDDATARDY6    5
+#define V_RDDATARDY6(x) ((x) << S_RDDATARDY6)
+#define F_RDDATARDY6    V_RDDATARDY6(1U)
+
+#define S_RDDATAVLD6    4
+#define V_RDDATAVLD6(x) ((x) << S_RDDATAVLD6)
+#define F_RDDATAVLD6    V_RDDATAVLD6(1U)
+
+#define S_RDDATA6    0
+#define M_RDDATA6    0xfU
+#define V_RDDATA6(x) ((x) << S_RDDATA6)
+#define G_RDDATA6(x) (((x) >> S_RDDATA6) & M_RDDATA6)
+
+#define A_MA_LE_CLIENT_INTERFACE_EXTERNAL 0xa007
+
+#define S_CMDVLD7    31
+#define V_CMDVLD7(x) ((x) << S_CMDVLD7)
+#define F_CMDVLD7    V_CMDVLD7(1U)
+
+#define S_CMDRDY7    30
+#define V_CMDRDY7(x) ((x) << S_CMDRDY7)
+#define F_CMDRDY7    V_CMDRDY7(1U)
+
+#define S_CMDTYPE7    29
+#define V_CMDTYPE7(x) ((x) << S_CMDTYPE7)
+#define F_CMDTYPE7    V_CMDTYPE7(1U)
+
+#define S_CMDLEN7    21
+#define M_CMDLEN7    0xffU
+#define V_CMDLEN7(x) ((x) << S_CMDLEN7)
+#define G_CMDLEN7(x) (((x) >> S_CMDLEN7) & M_CMDLEN7)
+
+#define S_CMDADDR7    8
+#define M_CMDADDR7    0x1fffU
+#define V_CMDADDR7(x) ((x) << S_CMDADDR7)
+#define G_CMDADDR7(x) (((x) >> S_CMDADDR7) & M_CMDADDR7)
+
+#define S_WRDATAVLD7    7
+#define V_WRDATAVLD7(x) ((x) << S_WRDATAVLD7)
+#define F_WRDATAVLD7    V_WRDATAVLD7(1U)
+
+#define S_WRDATARDY7    6
+#define V_WRDATARDY7(x) ((x) << S_WRDATARDY7)
+#define F_WRDATARDY7    V_WRDATARDY7(1U)
+
+#define S_RDDATARDY7    5
+#define V_RDDATARDY7(x) ((x) << S_RDDATARDY7)
+#define F_RDDATARDY7    V_RDDATARDY7(1U)
+
+#define S_RDDATAVLD7    4
+#define V_RDDATAVLD7(x) ((x) << S_RDDATAVLD7)
+#define F_RDDATAVLD7    V_RDDATAVLD7(1U)
+
+#define S_RDDATA7    0
+#define M_RDDATA7    0xfU
+#define V_RDDATA7(x) ((x) << S_RDDATA7)
+#define G_RDDATA7(x) (((x) >> S_RDDATA7) & M_RDDATA7)
+
+#define A_MA_CIM_CLIENT_INTERFACE_EXTERNAL 0xa008
+
+#define S_CMDVLD8    31
+#define V_CMDVLD8(x) ((x) << S_CMDVLD8)
+#define F_CMDVLD8    V_CMDVLD8(1U)
+
+#define S_CMDRDY8    30
+#define V_CMDRDY8(x) ((x) << S_CMDRDY8)
+#define F_CMDRDY8    V_CMDRDY8(1U)
+
+#define S_CMDTYPE8    29
+#define V_CMDTYPE8(x) ((x) << S_CMDTYPE8)
+#define F_CMDTYPE8    V_CMDTYPE8(1U)
+
+#define S_CMDLEN8    21
+#define M_CMDLEN8    0xffU
+#define V_CMDLEN8(x) ((x) << S_CMDLEN8)
+#define G_CMDLEN8(x) (((x) >> S_CMDLEN8) & M_CMDLEN8)
+
+#define S_CMDADDR8    8
+#define M_CMDADDR8    0x1fffU
+#define V_CMDADDR8(x) ((x) << S_CMDADDR8)
+#define G_CMDADDR8(x) (((x) >> S_CMDADDR8) & M_CMDADDR8)
+
+#define S_WRDATAVLD8    7
+#define V_WRDATAVLD8(x) ((x) << S_WRDATAVLD8)
+#define F_WRDATAVLD8    V_WRDATAVLD8(1U)
+
+#define S_WRDATARDY8    6
+#define V_WRDATARDY8(x) ((x) << S_WRDATARDY8)
+#define F_WRDATARDY8    V_WRDATARDY8(1U)
+
+#define S_RDDATARDY8    5
+#define V_RDDATARDY8(x) ((x) << S_RDDATARDY8)
+#define F_RDDATARDY8    V_RDDATARDY8(1U)
+
+#define S_RDDATAVLD8    4
+#define V_RDDATAVLD8(x) ((x) << S_RDDATAVLD8)
+#define F_RDDATAVLD8    V_RDDATAVLD8(1U)
+
+#define S_RDDATA8    0
+#define M_RDDATA8    0xfU
+#define V_RDDATA8(x) ((x) << S_RDDATA8)
+#define G_RDDATA8(x) (((x) >> S_RDDATA8) & M_RDDATA8)
+
+#define A_MA_PCIE_CLIENT_INTERFACE_EXTERNAL 0xa009
+
+#define S_CMDVLD9    31
+#define V_CMDVLD9(x) ((x) << S_CMDVLD9)
+#define F_CMDVLD9    V_CMDVLD9(1U)
+
+#define S_CMDRDY9    30
+#define V_CMDRDY9(x) ((x) << S_CMDRDY9)
+#define F_CMDRDY9    V_CMDRDY9(1U)
+
+#define S_CMDTYPE9    29
+#define V_CMDTYPE9(x) ((x) << S_CMDTYPE9)
+#define F_CMDTYPE9    V_CMDTYPE9(1U)
+
+#define S_CMDLEN9    21
+#define M_CMDLEN9    0xffU
+#define V_CMDLEN9(x) ((x) << S_CMDLEN9)
+#define G_CMDLEN9(x) (((x) >> S_CMDLEN9) & M_CMDLEN9)
+
+#define S_CMDADDR9    8
+#define M_CMDADDR9    0x1fffU
+#define V_CMDADDR9(x) ((x) << S_CMDADDR9)
+#define G_CMDADDR9(x) (((x) >> S_CMDADDR9) & M_CMDADDR9)
+
+#define S_WRDATAVLD9    7
+#define V_WRDATAVLD9(x) ((x) << S_WRDATAVLD9)
+#define F_WRDATAVLD9    V_WRDATAVLD9(1U)
+
+#define S_WRDATARDY9    6
+#define V_WRDATARDY9(x) ((x) << S_WRDATARDY9)
+#define F_WRDATARDY9    V_WRDATARDY9(1U)
+
+#define S_RDDATARDY9    5
+#define V_RDDATARDY9(x) ((x) << S_RDDATARDY9)
+#define F_RDDATARDY9    V_RDDATARDY9(1U)
+
+#define S_RDDATAVLD9    4
+#define V_RDDATAVLD9(x) ((x) << S_RDDATAVLD9)
+#define F_RDDATAVLD9    V_RDDATAVLD9(1U)
+
+#define S_RDDATA9    0
+#define M_RDDATA9    0xfU
+#define V_RDDATA9(x) ((x) << S_RDDATA9)
+#define G_RDDATA9(x) (((x) >> S_RDDATA9) & M_RDDATA9)
+
+#define A_MA_PM_TX_CLIENT_INTERFACE_EXTERNAL 0xa00a
+
+#define S_CMDVLD10    31
+#define V_CMDVLD10(x) ((x) << S_CMDVLD10)
+#define F_CMDVLD10    V_CMDVLD10(1U)
+
+#define S_CMDRDY10    30
+#define V_CMDRDY10(x) ((x) << S_CMDRDY10)
+#define F_CMDRDY10    V_CMDRDY10(1U)
+
+#define S_CMDTYPE10    29
+#define V_CMDTYPE10(x) ((x) << S_CMDTYPE10)
+#define F_CMDTYPE10    V_CMDTYPE10(1U)
+
+#define S_CMDLEN10    21
+#define M_CMDLEN10    0xffU
+#define V_CMDLEN10(x) ((x) << S_CMDLEN10)
+#define G_CMDLEN10(x) (((x) >> S_CMDLEN10) & M_CMDLEN10)
+
+#define S_CMDADDR10    8
+#define M_CMDADDR10    0x1fffU
+#define V_CMDADDR10(x) ((x) << S_CMDADDR10)
+#define G_CMDADDR10(x) (((x) >> S_CMDADDR10) & M_CMDADDR10)
+
+#define S_WRDATAVLD10    7
+#define V_WRDATAVLD10(x) ((x) << S_WRDATAVLD10)
+#define F_WRDATAVLD10    V_WRDATAVLD10(1U)
+
+#define S_WRDATARDY10    6
+#define V_WRDATARDY10(x) ((x) << S_WRDATARDY10)
+#define F_WRDATARDY10    V_WRDATARDY10(1U)
+
+#define S_RDDATARDY10    5
+#define V_RDDATARDY10(x) ((x) << S_RDDATARDY10)
+#define F_RDDATARDY10    V_RDDATARDY10(1U)
+
+#define S_RDDATAVLD10    4
+#define V_RDDATAVLD10(x) ((x) << S_RDDATAVLD10)
+#define F_RDDATAVLD10    V_RDDATAVLD10(1U)
+
+#define S_RDDATA10    0
+#define M_RDDATA10    0xfU
+#define V_RDDATA10(x) ((x) << S_RDDATA10)
+#define G_RDDATA10(x) (((x) >> S_RDDATA10) & M_RDDATA10)
+
+#define A_MA_PM_RX_CLIENT_INTERFACE_EXTERNAL 0xa00b
+
+#define S_CMDVLD11    31
+#define V_CMDVLD11(x) ((x) << S_CMDVLD11)
+#define F_CMDVLD11    V_CMDVLD11(1U)
+
+#define S_CMDRDY11    30
+#define V_CMDRDY11(x) ((x) << S_CMDRDY11)
+#define F_CMDRDY11    V_CMDRDY11(1U)
+
+#define S_CMDTYPE11    29
+#define V_CMDTYPE11(x) ((x) << S_CMDTYPE11)
+#define F_CMDTYPE11    V_CMDTYPE11(1U)
+
+#define S_CMDLEN11    21
+#define M_CMDLEN11    0xffU
+#define V_CMDLEN11(x) ((x) << S_CMDLEN11)
+#define G_CMDLEN11(x) (((x) >> S_CMDLEN11) & M_CMDLEN11)
+
+#define S_CMDADDR11    8
+#define M_CMDADDR11    0x1fffU
+#define V_CMDADDR11(x) ((x) << S_CMDADDR11)
+#define G_CMDADDR11(x) (((x) >> S_CMDADDR11) & M_CMDADDR11)
+
+#define S_WRDATAVLD11    7
+#define V_WRDATAVLD11(x) ((x) << S_WRDATAVLD11)
+#define F_WRDATAVLD11    V_WRDATAVLD11(1U)
+
+#define S_WRDATARDY11    6
+#define V_WRDATARDY11(x) ((x) << S_WRDATARDY11)
+#define F_WRDATARDY11    V_WRDATARDY11(1U)
+
+#define S_RDDATARDY11    5
+#define V_RDDATARDY11(x) ((x) << S_RDDATARDY11)
+#define F_RDDATARDY11    V_RDDATARDY11(1U)
+
+#define S_RDDATAVLD11    4
+#define V_RDDATAVLD11(x) ((x) << S_RDDATAVLD11)
+#define F_RDDATAVLD11    V_RDDATAVLD11(1U)
+
+#define S_RDDATA11    0
+#define M_RDDATA11    0xfU
+#define V_RDDATA11(x) ((x) << S_RDDATA11)
+#define G_RDDATA11(x) (((x) >> S_RDDATA11) & M_RDDATA11)
+
+#define A_MA_HMA_CLIENT_INTERFACE_EXTERNAL 0xa00c
+
+#define S_CMDVLD12    31
+#define V_CMDVLD12(x) ((x) << S_CMDVLD12)
+#define F_CMDVLD12    V_CMDVLD12(1U)
+
+#define S_CMDRDY12    30
+#define V_CMDRDY12(x) ((x) << S_CMDRDY12)
+#define F_CMDRDY12    V_CMDRDY12(1U)
+
+#define S_CMDTYPE12    29
+#define V_CMDTYPE12(x) ((x) << S_CMDTYPE12)
+#define F_CMDTYPE12    V_CMDTYPE12(1U)
+
+#define S_CMDLEN12    21
+#define M_CMDLEN12    0xffU
+#define V_CMDLEN12(x) ((x) << S_CMDLEN12)
+#define G_CMDLEN12(x) (((x) >> S_CMDLEN12) & M_CMDLEN12)
+
+#define S_CMDADDR12    8
+#define M_CMDADDR12    0x1fffU
+#define V_CMDADDR12(x) ((x) << S_CMDADDR12)
+#define G_CMDADDR12(x) (((x) >> S_CMDADDR12) & M_CMDADDR12)
+
+#define S_WRDATAVLD12    7
+#define V_WRDATAVLD12(x) ((x) << S_WRDATAVLD12)
+#define F_WRDATAVLD12    V_WRDATAVLD12(1U)
+
+#define S_WRDATARDY12    6
+#define V_WRDATARDY12(x) ((x) << S_WRDATARDY12)
+#define F_WRDATARDY12    V_WRDATARDY12(1U)
+
+#define S_RDDATARDY12    5
+#define V_RDDATARDY12(x) ((x) << S_RDDATARDY12)
+#define F_RDDATARDY12    V_RDDATARDY12(1U)
+
+#define S_RDDATAVLD12    4
+#define V_RDDATAVLD12(x) ((x) << S_RDDATAVLD12)
+#define F_RDDATAVLD12    V_RDDATAVLD12(1U)
+
+#define S_RDDATA12    0
+#define M_RDDATA12    0xfU
+#define V_RDDATA12(x) ((x) << S_RDDATA12)
+#define G_RDDATA12(x) (((x) >> S_RDDATA12) & M_RDDATA12)
+
+#define A_MA_TARGET_0_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00d
+
+#define S_CI0_ARB0_REQ    31
+#define V_CI0_ARB0_REQ(x) ((x) << S_CI0_ARB0_REQ)
+#define F_CI0_ARB0_REQ    V_CI0_ARB0_REQ(1U)
+
+#define S_ARB0_CI0_GNT    30
+#define V_ARB0_CI0_GNT(x) ((x) << S_ARB0_CI0_GNT)
+#define F_ARB0_CI0_GNT    V_ARB0_CI0_GNT(1U)
+
+#define S_CI0_DM0_WDATA_VLD    29
+#define V_CI0_DM0_WDATA_VLD(x) ((x) << S_CI0_DM0_WDATA_VLD)
+#define F_CI0_DM0_WDATA_VLD    V_CI0_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI0_RDATA_VLD    28
+#define V_DM0_CI0_RDATA_VLD(x) ((x) << S_DM0_CI0_RDATA_VLD)
+#define F_DM0_CI0_RDATA_VLD    V_DM0_CI0_RDATA_VLD(1U)
+
+#define S_CI1_ARB0_REQ    27
+#define V_CI1_ARB0_REQ(x) ((x) << S_CI1_ARB0_REQ)
+#define F_CI1_ARB0_REQ    V_CI1_ARB0_REQ(1U)
+
+#define S_ARB0_CI1_GNT    26
+#define V_ARB0_CI1_GNT(x) ((x) << S_ARB0_CI1_GNT)
+#define F_ARB0_CI1_GNT    V_ARB0_CI1_GNT(1U)
+
+#define S_CI1_DM0_WDATA_VLD    25
+#define V_CI1_DM0_WDATA_VLD(x) ((x) << S_CI1_DM0_WDATA_VLD)
+#define F_CI1_DM0_WDATA_VLD    V_CI1_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI1_RDATA_VLD    24
+#define V_DM0_CI1_RDATA_VLD(x) ((x) << S_DM0_CI1_RDATA_VLD)
+#define F_DM0_CI1_RDATA_VLD    V_DM0_CI1_RDATA_VLD(1U)
+
+#define S_CI2_ARB0_REQ    23
+#define V_CI2_ARB0_REQ(x) ((x) << S_CI2_ARB0_REQ)
+#define F_CI2_ARB0_REQ    V_CI2_ARB0_REQ(1U)
+
+#define S_ARB0_CI2_GNT    22
+#define V_ARB0_CI2_GNT(x) ((x) << S_ARB0_CI2_GNT)
+#define F_ARB0_CI2_GNT    V_ARB0_CI2_GNT(1U)
+
+#define S_CI2_DM0_WDATA_VLD    21
+#define V_CI2_DM0_WDATA_VLD(x) ((x) << S_CI2_DM0_WDATA_VLD)
+#define F_CI2_DM0_WDATA_VLD    V_CI2_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI2_RDATA_VLD    20
+#define V_DM0_CI2_RDATA_VLD(x) ((x) << S_DM0_CI2_RDATA_VLD)
+#define F_DM0_CI2_RDATA_VLD    V_DM0_CI2_RDATA_VLD(1U)
+
+#define S_CI3_ARB0_REQ    19
+#define V_CI3_ARB0_REQ(x) ((x) << S_CI3_ARB0_REQ)
+#define F_CI3_ARB0_REQ    V_CI3_ARB0_REQ(1U)
+
+#define S_ARB0_CI3_GNT    18
+#define V_ARB0_CI3_GNT(x) ((x) << S_ARB0_CI3_GNT)
+#define F_ARB0_CI3_GNT    V_ARB0_CI3_GNT(1U)
+
+#define S_CI3_DM0_WDATA_VLD    17
+#define V_CI3_DM0_WDATA_VLD(x) ((x) << S_CI3_DM0_WDATA_VLD)
+#define F_CI3_DM0_WDATA_VLD    V_CI3_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI3_RDATA_VLD    16
+#define V_DM0_CI3_RDATA_VLD(x) ((x) << S_DM0_CI3_RDATA_VLD)
+#define F_DM0_CI3_RDATA_VLD    V_DM0_CI3_RDATA_VLD(1U)
+
+#define S_CI4_ARB0_REQ    15
+#define V_CI4_ARB0_REQ(x) ((x) << S_CI4_ARB0_REQ)
+#define F_CI4_ARB0_REQ    V_CI4_ARB0_REQ(1U)
+
+#define S_ARB0_CI4_GNT    14
+#define V_ARB0_CI4_GNT(x) ((x) << S_ARB0_CI4_GNT)
+#define F_ARB0_CI4_GNT    V_ARB0_CI4_GNT(1U)
+
+#define S_CI4_DM0_WDATA_VLD    13
+#define V_CI4_DM0_WDATA_VLD(x) ((x) << S_CI4_DM0_WDATA_VLD)
+#define F_CI4_DM0_WDATA_VLD    V_CI4_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI4_RDATA_VLD    12
+#define V_DM0_CI4_RDATA_VLD(x) ((x) << S_DM0_CI4_RDATA_VLD)
+#define F_DM0_CI4_RDATA_VLD    V_DM0_CI4_RDATA_VLD(1U)
+
+#define S_CI5_ARB0_REQ    11
+#define V_CI5_ARB0_REQ(x) ((x) << S_CI5_ARB0_REQ)
+#define F_CI5_ARB0_REQ    V_CI5_ARB0_REQ(1U)
+
+#define S_ARB0_CI5_GNT    10
+#define V_ARB0_CI5_GNT(x) ((x) << S_ARB0_CI5_GNT)
+#define F_ARB0_CI5_GNT    V_ARB0_CI5_GNT(1U)
+
+#define S_CI5_DM0_WDATA_VLD    9
+#define V_CI5_DM0_WDATA_VLD(x) ((x) << S_CI5_DM0_WDATA_VLD)
+#define F_CI5_DM0_WDATA_VLD    V_CI5_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI5_RDATA_VLD    8
+#define V_DM0_CI5_RDATA_VLD(x) ((x) << S_DM0_CI5_RDATA_VLD)
+#define F_DM0_CI5_RDATA_VLD    V_DM0_CI5_RDATA_VLD(1U)
+
+#define S_CI6_ARB0_REQ    7
+#define V_CI6_ARB0_REQ(x) ((x) << S_CI6_ARB0_REQ)
+#define F_CI6_ARB0_REQ    V_CI6_ARB0_REQ(1U)
+
+#define S_ARB0_CI6_GNT    6
+#define V_ARB0_CI6_GNT(x) ((x) << S_ARB0_CI6_GNT)
+#define F_ARB0_CI6_GNT    V_ARB0_CI6_GNT(1U)
+
+#define S_CI6_DM0_WDATA_VLD    5
+#define V_CI6_DM0_WDATA_VLD(x) ((x) << S_CI6_DM0_WDATA_VLD)
+#define F_CI6_DM0_WDATA_VLD    V_CI6_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI6_RDATA_VLD    4
+#define V_DM0_CI6_RDATA_VLD(x) ((x) << S_DM0_CI6_RDATA_VLD)
+#define F_DM0_CI6_RDATA_VLD    V_DM0_CI6_RDATA_VLD(1U)
+
+#define S_CI7_ARB0_REQ    3
+#define V_CI7_ARB0_REQ(x) ((x) << S_CI7_ARB0_REQ)
+#define F_CI7_ARB0_REQ    V_CI7_ARB0_REQ(1U)
+
+#define S_ARB0_CI7_GNT    2
+#define V_ARB0_CI7_GNT(x) ((x) << S_ARB0_CI7_GNT)
+#define F_ARB0_CI7_GNT    V_ARB0_CI7_GNT(1U)
+
+#define S_CI7_DM0_WDATA_VLD    1
+#define V_CI7_DM0_WDATA_VLD(x) ((x) << S_CI7_DM0_WDATA_VLD)
+#define F_CI7_DM0_WDATA_VLD    V_CI7_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI7_RDATA_VLD    0
+#define V_DM0_CI7_RDATA_VLD(x) ((x) << S_DM0_CI7_RDATA_VLD)
+#define F_DM0_CI7_RDATA_VLD    V_DM0_CI7_RDATA_VLD(1U)
+
+#define A_MA_TARGET_1_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00e
+
+#define S_CI0_ARB1_REQ    31
+#define V_CI0_ARB1_REQ(x) ((x) << S_CI0_ARB1_REQ)
+#define F_CI0_ARB1_REQ    V_CI0_ARB1_REQ(1U)
+
+#define S_ARB1_CI0_GNT    30
+#define V_ARB1_CI0_GNT(x) ((x) << S_ARB1_CI0_GNT)
+#define F_ARB1_CI0_GNT    V_ARB1_CI0_GNT(1U)
+
+#define S_CI0_DM1_WDATA_VLD    29
+#define V_CI0_DM1_WDATA_VLD(x) ((x) << S_CI0_DM1_WDATA_VLD)
+#define F_CI0_DM1_WDATA_VLD    V_CI0_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI0_RDATA_VLD    28
+#define V_DM1_CI0_RDATA_VLD(x) ((x) << S_DM1_CI0_RDATA_VLD)
+#define F_DM1_CI0_RDATA_VLD    V_DM1_CI0_RDATA_VLD(1U)
+
+#define S_CI1_ARB1_REQ    27
+#define V_CI1_ARB1_REQ(x) ((x) << S_CI1_ARB1_REQ)
+#define F_CI1_ARB1_REQ    V_CI1_ARB1_REQ(1U)
+
+#define S_ARB1_CI1_GNT    26
+#define V_ARB1_CI1_GNT(x) ((x) << S_ARB1_CI1_GNT)
+#define F_ARB1_CI1_GNT    V_ARB1_CI1_GNT(1U)
+
+#define S_CI1_DM1_WDATA_VLD    25
+#define V_CI1_DM1_WDATA_VLD(x) ((x) << S_CI1_DM1_WDATA_VLD)
+#define F_CI1_DM1_WDATA_VLD    V_CI1_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI1_RDATA_VLD    24
+#define V_DM1_CI1_RDATA_VLD(x) ((x) << S_DM1_CI1_RDATA_VLD)
+#define F_DM1_CI1_RDATA_VLD    V_DM1_CI1_RDATA_VLD(1U)
+
+#define S_CI2_ARB1_REQ    23
+#define V_CI2_ARB1_REQ(x) ((x) << S_CI2_ARB1_REQ)
+#define F_CI2_ARB1_REQ    V_CI2_ARB1_REQ(1U)
+
+#define S_ARB1_CI2_GNT    22
+#define V_ARB1_CI2_GNT(x) ((x) << S_ARB1_CI2_GNT)
+#define F_ARB1_CI2_GNT    V_ARB1_CI2_GNT(1U)
+
+#define S_CI2_DM1_WDATA_VLD    21
+#define V_CI2_DM1_WDATA_VLD(x) ((x) << S_CI2_DM1_WDATA_VLD)
+#define F_CI2_DM1_WDATA_VLD    V_CI2_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI2_RDATA_VLD    20
+#define V_DM1_CI2_RDATA_VLD(x) ((x) << S_DM1_CI2_RDATA_VLD)
+#define F_DM1_CI2_RDATA_VLD    V_DM1_CI2_RDATA_VLD(1U)
+
+#define S_CI3_ARB1_REQ    19
+#define V_CI3_ARB1_REQ(x) ((x) << S_CI3_ARB1_REQ)
+#define F_CI3_ARB1_REQ    V_CI3_ARB1_REQ(1U)
+
+#define S_ARB1_CI3_GNT    18
+#define V_ARB1_CI3_GNT(x) ((x) << S_ARB1_CI3_GNT)
+#define F_ARB1_CI3_GNT    V_ARB1_CI3_GNT(1U)
+
+#define S_CI3_DM1_WDATA_VLD    17
+#define V_CI3_DM1_WDATA_VLD(x) ((x) << S_CI3_DM1_WDATA_VLD)
+#define F_CI3_DM1_WDATA_VLD    V_CI3_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI3_RDATA_VLD    16
+#define V_DM1_CI3_RDATA_VLD(x) ((x) << S_DM1_CI3_RDATA_VLD)
+#define F_DM1_CI3_RDATA_VLD    V_DM1_CI3_RDATA_VLD(1U)
+
+#define S_CI4_ARB1_REQ    15
+#define V_CI4_ARB1_REQ(x) ((x) << S_CI4_ARB1_REQ)
+#define F_CI4_ARB1_REQ    V_CI4_ARB1_REQ(1U)
+
+#define S_ARB1_CI4_GNT    14
+#define V_ARB1_CI4_GNT(x) ((x) << S_ARB1_CI4_GNT)
+#define F_ARB1_CI4_GNT    V_ARB1_CI4_GNT(1U)
+
+#define S_CI4_DM1_WDATA_VLD    13
+#define V_CI4_DM1_WDATA_VLD(x) ((x) << S_CI4_DM1_WDATA_VLD)
+#define F_CI4_DM1_WDATA_VLD    V_CI4_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI4_RDATA_VLD    12
+#define V_DM1_CI4_RDATA_VLD(x) ((x) << S_DM1_CI4_RDATA_VLD)
+#define F_DM1_CI4_RDATA_VLD    V_DM1_CI4_RDATA_VLD(1U)
+
+#define S_CI5_ARB1_REQ    11
+#define V_CI5_ARB1_REQ(x) ((x) << S_CI5_ARB1_REQ)
+#define F_CI5_ARB1_REQ    V_CI5_ARB1_REQ(1U)
+
+#define S_ARB1_CI5_GNT    10
+#define V_ARB1_CI5_GNT(x) ((x) << S_ARB1_CI5_GNT)
+#define F_ARB1_CI5_GNT    V_ARB1_CI5_GNT(1U)
+
+#define S_CI5_DM1_WDATA_VLD    9
+#define V_CI5_DM1_WDATA_VLD(x) ((x) << S_CI5_DM1_WDATA_VLD)
+#define F_CI5_DM1_WDATA_VLD    V_CI5_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI5_RDATA_VLD    8
+#define V_DM1_CI5_RDATA_VLD(x) ((x) << S_DM1_CI5_RDATA_VLD)
+#define F_DM1_CI5_RDATA_VLD    V_DM1_CI5_RDATA_VLD(1U)
+
+#define S_CI6_ARB1_REQ    7
+#define V_CI6_ARB1_REQ(x) ((x) << S_CI6_ARB1_REQ)
+#define F_CI6_ARB1_REQ    V_CI6_ARB1_REQ(1U)
+
+#define S_ARB1_CI6_GNT    6
+#define V_ARB1_CI6_GNT(x) ((x) << S_ARB1_CI6_GNT)
+#define F_ARB1_CI6_GNT    V_ARB1_CI6_GNT(1U)
+
+#define S_CI6_DM1_WDATA_VLD    5
+#define V_CI6_DM1_WDATA_VLD(x) ((x) << S_CI6_DM1_WDATA_VLD)
+#define F_CI6_DM1_WDATA_VLD    V_CI6_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI6_RDATA_VLD    4
+#define V_DM1_CI6_RDATA_VLD(x) ((x) << S_DM1_CI6_RDATA_VLD)
+#define F_DM1_CI6_RDATA_VLD    V_DM1_CI6_RDATA_VLD(1U)
+
+#define S_CI7_ARB1_REQ    3
+#define V_CI7_ARB1_REQ(x) ((x) << S_CI7_ARB1_REQ)
+#define F_CI7_ARB1_REQ    V_CI7_ARB1_REQ(1U)
+
+#define S_ARB1_CI7_GNT    2
+#define V_ARB1_CI7_GNT(x) ((x) << S_ARB1_CI7_GNT)
+#define F_ARB1_CI7_GNT    V_ARB1_CI7_GNT(1U)
+
+#define S_CI7_DM1_WDATA_VLD    1
+#define V_CI7_DM1_WDATA_VLD(x) ((x) << S_CI7_DM1_WDATA_VLD)
+#define F_CI7_DM1_WDATA_VLD    V_CI7_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI7_RDATA_VLD    0
+#define V_DM1_CI7_RDATA_VLD(x) ((x) << S_DM1_CI7_RDATA_VLD)
+#define F_DM1_CI7_RDATA_VLD    V_DM1_CI7_RDATA_VLD(1U)
+
+#define A_MA_TARGET_2_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00f
+
+#define S_CI0_ARB2_REQ    31
+#define V_CI0_ARB2_REQ(x) ((x) << S_CI0_ARB2_REQ)
+#define F_CI0_ARB2_REQ    V_CI0_ARB2_REQ(1U)
+
+#define S_ARB2_CI0_GNT    30
+#define V_ARB2_CI0_GNT(x) ((x) << S_ARB2_CI0_GNT)
+#define F_ARB2_CI0_GNT    V_ARB2_CI0_GNT(1U)
+
+#define S_CI0_DM2_WDATA_VLD    29
+#define V_CI0_DM2_WDATA_VLD(x) ((x) << S_CI0_DM2_WDATA_VLD)
+#define F_CI0_DM2_WDATA_VLD    V_CI0_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI0_RDATA_VLD    28
+#define V_DM2_CI0_RDATA_VLD(x) ((x) << S_DM2_CI0_RDATA_VLD)
+#define F_DM2_CI0_RDATA_VLD    V_DM2_CI0_RDATA_VLD(1U)
+
+#define S_CI1_ARB2_REQ    27
+#define V_CI1_ARB2_REQ(x) ((x) << S_CI1_ARB2_REQ)
+#define F_CI1_ARB2_REQ    V_CI1_ARB2_REQ(1U)
+
+#define S_ARB2_CI1_GNT    26
+#define V_ARB2_CI1_GNT(x) ((x) << S_ARB2_CI1_GNT)
+#define F_ARB2_CI1_GNT    V_ARB2_CI1_GNT(1U)
+
+#define S_CI1_DM2_WDATA_VLD    25
+#define V_CI1_DM2_WDATA_VLD(x) ((x) << S_CI1_DM2_WDATA_VLD)
+#define F_CI1_DM2_WDATA_VLD    V_CI1_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI1_RDATA_VLD    24
+#define V_DM2_CI1_RDATA_VLD(x) ((x) << S_DM2_CI1_RDATA_VLD)
+#define F_DM2_CI1_RDATA_VLD    V_DM2_CI1_RDATA_VLD(1U)
+
+#define S_CI2_ARB2_REQ    23
+#define V_CI2_ARB2_REQ(x) ((x) << S_CI2_ARB2_REQ)
+#define F_CI2_ARB2_REQ    V_CI2_ARB2_REQ(1U)
+
+#define S_ARB2_CI2_GNT    22
+#define V_ARB2_CI2_GNT(x) ((x) << S_ARB2_CI2_GNT)
+#define F_ARB2_CI2_GNT    V_ARB2_CI2_GNT(1U)
+
+#define S_CI2_DM2_WDATA_VLD    21
+#define V_CI2_DM2_WDATA_VLD(x) ((x) << S_CI2_DM2_WDATA_VLD)
+#define F_CI2_DM2_WDATA_VLD    V_CI2_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI2_RDATA_VLD    20
+#define V_DM2_CI2_RDATA_VLD(x) ((x) << S_DM2_CI2_RDATA_VLD)
+#define F_DM2_CI2_RDATA_VLD    V_DM2_CI2_RDATA_VLD(1U)
+
+#define S_CI3_ARB2_REQ    19
+#define V_CI3_ARB2_REQ(x) ((x) << S_CI3_ARB2_REQ)
+#define F_CI3_ARB2_REQ    V_CI3_ARB2_REQ(1U)
+
+#define S_ARB2_CI3_GNT    18
+#define V_ARB2_CI3_GNT(x) ((x) << S_ARB2_CI3_GNT)
+#define F_ARB2_CI3_GNT    V_ARB2_CI3_GNT(1U)
+
+#define S_CI3_DM2_WDATA_VLD    17
+#define V_CI3_DM2_WDATA_VLD(x) ((x) << S_CI3_DM2_WDATA_VLD)
+#define F_CI3_DM2_WDATA_VLD    V_CI3_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI3_RDATA_VLD    16
+#define V_DM2_CI3_RDATA_VLD(x) ((x) << S_DM2_CI3_RDATA_VLD)
+#define F_DM2_CI3_RDATA_VLD    V_DM2_CI3_RDATA_VLD(1U)
+
+#define S_CI4_ARB2_REQ    15
+#define V_CI4_ARB2_REQ(x) ((x) << S_CI4_ARB2_REQ)
+#define F_CI4_ARB2_REQ    V_CI4_ARB2_REQ(1U)
+
+#define S_ARB2_CI4_GNT    14
+#define V_ARB2_CI4_GNT(x) ((x) << S_ARB2_CI4_GNT)
+#define F_ARB2_CI4_GNT    V_ARB2_CI4_GNT(1U)
+
+#define S_CI4_DM2_WDATA_VLD    13
+#define V_CI4_DM2_WDATA_VLD(x) ((x) << S_CI4_DM2_WDATA_VLD)
+#define F_CI4_DM2_WDATA_VLD    V_CI4_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI4_RDATA_VLD    12
+#define V_DM2_CI4_RDATA_VLD(x) ((x) << S_DM2_CI4_RDATA_VLD)
+#define F_DM2_CI4_RDATA_VLD    V_DM2_CI4_RDATA_VLD(1U)
+
+#define S_CI5_ARB2_REQ    11
+#define V_CI5_ARB2_REQ(x) ((x) << S_CI5_ARB2_REQ)
+#define F_CI5_ARB2_REQ    V_CI5_ARB2_REQ(1U)
+
+#define S_ARB2_CI5_GNT    10
+#define V_ARB2_CI5_GNT(x) ((x) << S_ARB2_CI5_GNT)
+#define F_ARB2_CI5_GNT    V_ARB2_CI5_GNT(1U)
+
+#define S_CI5_DM2_WDATA_VLD    9
+#define V_CI5_DM2_WDATA_VLD(x) ((x) << S_CI5_DM2_WDATA_VLD)
+#define F_CI5_DM2_WDATA_VLD    V_CI5_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI5_RDATA_VLD    8
+#define V_DM2_CI5_RDATA_VLD(x) ((x) << S_DM2_CI5_RDATA_VLD)
+#define F_DM2_CI5_RDATA_VLD    V_DM2_CI5_RDATA_VLD(1U)
+
+#define S_CI6_ARB2_REQ    7
+#define V_CI6_ARB2_REQ(x) ((x) << S_CI6_ARB2_REQ)
+#define F_CI6_ARB2_REQ    V_CI6_ARB2_REQ(1U)
+
+#define S_ARB2_CI6_GNT    6
+#define V_ARB2_CI6_GNT(x) ((x) << S_ARB2_CI6_GNT)
+#define F_ARB2_CI6_GNT    V_ARB2_CI6_GNT(1U)
+
+#define S_CI6_DM2_WDATA_VLD    5
+#define V_CI6_DM2_WDATA_VLD(x) ((x) << S_CI6_DM2_WDATA_VLD)
+#define F_CI6_DM2_WDATA_VLD    V_CI6_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI6_RDATA_VLD    4
+#define V_DM2_CI6_RDATA_VLD(x) ((x) << S_DM2_CI6_RDATA_VLD)
+#define F_DM2_CI6_RDATA_VLD    V_DM2_CI6_RDATA_VLD(1U)
+
+#define S_CI7_ARB2_REQ    3
+#define V_CI7_ARB2_REQ(x) ((x) << S_CI7_ARB2_REQ)
+#define F_CI7_ARB2_REQ    V_CI7_ARB2_REQ(1U)
+
+#define S_ARB2_CI7_GNT    2
+#define V_ARB2_CI7_GNT(x) ((x) << S_ARB2_CI7_GNT)
+#define F_ARB2_CI7_GNT    V_ARB2_CI7_GNT(1U)
+
+#define S_CI7_DM2_WDATA_VLD    1
+#define V_CI7_DM2_WDATA_VLD(x) ((x) << S_CI7_DM2_WDATA_VLD)
+#define F_CI7_DM2_WDATA_VLD    V_CI7_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI7_RDATA_VLD    0
+#define V_DM2_CI7_RDATA_VLD(x) ((x) << S_DM2_CI7_RDATA_VLD)
+#define F_DM2_CI7_RDATA_VLD    V_DM2_CI7_RDATA_VLD(1U)
+
+#define A_MA_TARGET_3_ARBITER_INTERFACE_EXTERNAL_REG0 0xa010
+
+#define S_CI0_ARB3_REQ    31
+#define V_CI0_ARB3_REQ(x) ((x) << S_CI0_ARB3_REQ)
+#define F_CI0_ARB3_REQ    V_CI0_ARB3_REQ(1U)
+
+#define S_ARB3_CI0_GNT    30
+#define V_ARB3_CI0_GNT(x) ((x) << S_ARB3_CI0_GNT)
+#define F_ARB3_CI0_GNT    V_ARB3_CI0_GNT(1U)
+
+#define S_CI0_DM3_WDATA_VLD    29
+#define V_CI0_DM3_WDATA_VLD(x) ((x) << S_CI0_DM3_WDATA_VLD)
+#define F_CI0_DM3_WDATA_VLD    V_CI0_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI0_RDATA_VLD    28
+#define V_DM3_CI0_RDATA_VLD(x) ((x) << S_DM3_CI0_RDATA_VLD)
+#define F_DM3_CI0_RDATA_VLD    V_DM3_CI0_RDATA_VLD(1U)
+
+#define S_CI1_ARB3_REQ    27
+#define V_CI1_ARB3_REQ(x) ((x) << S_CI1_ARB3_REQ)
+#define F_CI1_ARB3_REQ    V_CI1_ARB3_REQ(1U)
+
+#define S_ARB3_CI1_GNT    26
+#define V_ARB3_CI1_GNT(x) ((x) << S_ARB3_CI1_GNT)
+#define F_ARB3_CI1_GNT    V_ARB3_CI1_GNT(1U)
+
+#define S_CI1_DM3_WDATA_VLD    25
+#define V_CI1_DM3_WDATA_VLD(x) ((x) << S_CI1_DM3_WDATA_VLD)
+#define F_CI1_DM3_WDATA_VLD    V_CI1_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI1_RDATA_VLD    24
+#define V_DM3_CI1_RDATA_VLD(x) ((x) << S_DM3_CI1_RDATA_VLD)
+#define F_DM3_CI1_RDATA_VLD    V_DM3_CI1_RDATA_VLD(1U)
+
+#define S_CI2_ARB3_REQ    23
+#define V_CI2_ARB3_REQ(x) ((x) << S_CI2_ARB3_REQ)
+#define F_CI2_ARB3_REQ    V_CI2_ARB3_REQ(1U)
+
+#define S_ARB3_CI2_GNT    22
+#define V_ARB3_CI2_GNT(x) ((x) << S_ARB3_CI2_GNT)
+#define F_ARB3_CI2_GNT    V_ARB3_CI2_GNT(1U)
+
+#define S_CI2_DM3_WDATA_VLD    21
+#define V_CI2_DM3_WDATA_VLD(x) ((x) << S_CI2_DM3_WDATA_VLD)
+#define F_CI2_DM3_WDATA_VLD    V_CI2_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI2_RDATA_VLD    20
+#define V_DM3_CI2_RDATA_VLD(x) ((x) << S_DM3_CI2_RDATA_VLD)
+#define F_DM3_CI2_RDATA_VLD    V_DM3_CI2_RDATA_VLD(1U)
+
+#define S_CI3_ARB3_REQ    19
+#define V_CI3_ARB3_REQ(x) ((x) << S_CI3_ARB3_REQ)
+#define F_CI3_ARB3_REQ    V_CI3_ARB3_REQ(1U)
+
+#define S_ARB3_CI3_GNT    18
+#define V_ARB3_CI3_GNT(x) ((x) << S_ARB3_CI3_GNT)
+#define F_ARB3_CI3_GNT    V_ARB3_CI3_GNT(1U)
+
+#define S_CI3_DM3_WDATA_VLD    17
+#define V_CI3_DM3_WDATA_VLD(x) ((x) << S_CI3_DM3_WDATA_VLD)
+#define F_CI3_DM3_WDATA_VLD    V_CI3_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI3_RDATA_VLD    16
+#define V_DM3_CI3_RDATA_VLD(x) ((x) << S_DM3_CI3_RDATA_VLD)
+#define F_DM3_CI3_RDATA_VLD    V_DM3_CI3_RDATA_VLD(1U)
+
+#define S_CI4_ARB3_REQ    15
+#define V_CI4_ARB3_REQ(x) ((x) << S_CI4_ARB3_REQ)
+#define F_CI4_ARB3_REQ    V_CI4_ARB3_REQ(1U)
+
+#define S_ARB3_CI4_GNT    14
+#define V_ARB3_CI4_GNT(x) ((x) << S_ARB3_CI4_GNT)
+#define F_ARB3_CI4_GNT    V_ARB3_CI4_GNT(1U)
+
+#define S_CI4_DM3_WDATA_VLD    13
+#define V_CI4_DM3_WDATA_VLD(x) ((x) << S_CI4_DM3_WDATA_VLD)
+#define F_CI4_DM3_WDATA_VLD    V_CI4_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI4_RDATA_VLD    12
+#define V_DM3_CI4_RDATA_VLD(x) ((x) << S_DM3_CI4_RDATA_VLD)
+#define F_DM3_CI4_RDATA_VLD    V_DM3_CI4_RDATA_VLD(1U)
+
+#define S_CI5_ARB3_REQ    11
+#define V_CI5_ARB3_REQ(x) ((x) << S_CI5_ARB3_REQ)
+#define F_CI5_ARB3_REQ    V_CI5_ARB3_REQ(1U)
+
+#define S_ARB3_CI5_GNT    10
+#define V_ARB3_CI5_GNT(x) ((x) << S_ARB3_CI5_GNT)
+#define F_ARB3_CI5_GNT    V_ARB3_CI5_GNT(1U)
+
+#define S_CI5_DM3_WDATA_VLD    9
+#define V_CI5_DM3_WDATA_VLD(x) ((x) << S_CI5_DM3_WDATA_VLD)
+#define F_CI5_DM3_WDATA_VLD    V_CI5_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI5_RDATA_VLD    8
+#define V_DM3_CI5_RDATA_VLD(x) ((x) << S_DM3_CI5_RDATA_VLD)
+#define F_DM3_CI5_RDATA_VLD    V_DM3_CI5_RDATA_VLD(1U)
+
+#define S_CI6_ARB3_REQ    7
+#define V_CI6_ARB3_REQ(x) ((x) << S_CI6_ARB3_REQ)
+#define F_CI6_ARB3_REQ    V_CI6_ARB3_REQ(1U)
+
+#define S_ARB3_CI6_GNT    6
+#define V_ARB3_CI6_GNT(x) ((x) << S_ARB3_CI6_GNT)
+#define F_ARB3_CI6_GNT    V_ARB3_CI6_GNT(1U)
+
+#define S_CI6_DM3_WDATA_VLD    5
+#define V_CI6_DM3_WDATA_VLD(x) ((x) << S_CI6_DM3_WDATA_VLD)
+#define F_CI6_DM3_WDATA_VLD    V_CI6_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI6_RDATA_VLD    4
+#define V_DM3_CI6_RDATA_VLD(x) ((x) << S_DM3_CI6_RDATA_VLD)
+#define F_DM3_CI6_RDATA_VLD    V_DM3_CI6_RDATA_VLD(1U)
+
+#define S_CI7_ARB3_REQ    3
+#define V_CI7_ARB3_REQ(x) ((x) << S_CI7_ARB3_REQ)
+#define F_CI7_ARB3_REQ    V_CI7_ARB3_REQ(1U)
+
+#define S_ARB3_CI7_GNT    2
+#define V_ARB3_CI7_GNT(x) ((x) << S_ARB3_CI7_GNT)
+#define F_ARB3_CI7_GNT    V_ARB3_CI7_GNT(1U)
+
+#define S_CI7_DM3_WDATA_VLD    1
+#define V_CI7_DM3_WDATA_VLD(x) ((x) << S_CI7_DM3_WDATA_VLD)
+#define F_CI7_DM3_WDATA_VLD    V_CI7_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI7_RDATA_VLD    0
+#define V_DM3_CI7_RDATA_VLD(x) ((x) << S_DM3_CI7_RDATA_VLD)
+#define F_DM3_CI7_RDATA_VLD    V_DM3_CI7_RDATA_VLD(1U)
+
+#define A_MA_MA_DEBUG_SIGNATURE_LTL_END 0xa011
+#define A_MA_MA_DEBUG_SIGNATURE_BIG_END_INVERSE 0xa012
+#define A_MA_TARGET_0_ARBITER_INTERFACE_EXTERNAL_REG1 0xa013
+
+#define S_CI8_ARB0_REQ    31
+#define V_CI8_ARB0_REQ(x) ((x) << S_CI8_ARB0_REQ)
+#define F_CI8_ARB0_REQ    V_CI8_ARB0_REQ(1U)
+
+#define S_ARB0_CI8_GNT    30
+#define V_ARB0_CI8_GNT(x) ((x) << S_ARB0_CI8_GNT)
+#define F_ARB0_CI8_GNT    V_ARB0_CI8_GNT(1U)
+
+#define S_CI8_DM0_WDATA_VLD    29
+#define V_CI8_DM0_WDATA_VLD(x) ((x) << S_CI8_DM0_WDATA_VLD)
+#define F_CI8_DM0_WDATA_VLD    V_CI8_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI8_RDATA_VLD    28
+#define V_DM0_CI8_RDATA_VLD(x) ((x) << S_DM0_CI8_RDATA_VLD)
+#define F_DM0_CI8_RDATA_VLD    V_DM0_CI8_RDATA_VLD(1U)
+
+#define S_CI9_ARB0_REQ    27
+#define V_CI9_ARB0_REQ(x) ((x) << S_CI9_ARB0_REQ)
+#define F_CI9_ARB0_REQ    V_CI9_ARB0_REQ(1U)
+
+#define S_ARB0_CI9_GNT    26
+#define V_ARB0_CI9_GNT(x) ((x) << S_ARB0_CI9_GNT)
+#define F_ARB0_CI9_GNT    V_ARB0_CI9_GNT(1U)
+
+#define S_CI9_DM0_WDATA_VLD    25
+#define V_CI9_DM0_WDATA_VLD(x) ((x) << S_CI9_DM0_WDATA_VLD)
+#define F_CI9_DM0_WDATA_VLD    V_CI9_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI9_RDATA_VLD    24
+#define V_DM0_CI9_RDATA_VLD(x) ((x) << S_DM0_CI9_RDATA_VLD)
+#define F_DM0_CI9_RDATA_VLD    V_DM0_CI9_RDATA_VLD(1U)
+
+#define S_CI10_ARB0_REQ    23
+#define V_CI10_ARB0_REQ(x) ((x) << S_CI10_ARB0_REQ)
+#define F_CI10_ARB0_REQ    V_CI10_ARB0_REQ(1U)
+
+#define S_ARB0_CI10_GNT    22
+#define V_ARB0_CI10_GNT(x) ((x) << S_ARB0_CI10_GNT)
+#define F_ARB0_CI10_GNT    V_ARB0_CI10_GNT(1U)
+
+#define S_CI10_DM0_WDATA_VLD    21
+#define V_CI10_DM0_WDATA_VLD(x) ((x) << S_CI10_DM0_WDATA_VLD)
+#define F_CI10_DM0_WDATA_VLD    V_CI10_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI10_RDATA_VLD    20
+#define V_DM0_CI10_RDATA_VLD(x) ((x) << S_DM0_CI10_RDATA_VLD)
+#define F_DM0_CI10_RDATA_VLD    V_DM0_CI10_RDATA_VLD(1U)
+
+#define S_CI11_ARB0_REQ    19
+#define V_CI11_ARB0_REQ(x) ((x) << S_CI11_ARB0_REQ)
+#define F_CI11_ARB0_REQ    V_CI11_ARB0_REQ(1U)
+
+#define S_ARB0_CI11_GNT    18
+#define V_ARB0_CI11_GNT(x) ((x) << S_ARB0_CI11_GNT)
+#define F_ARB0_CI11_GNT    V_ARB0_CI11_GNT(1U)
+
+#define S_CI11_DM0_WDATA_VLD    17
+#define V_CI11_DM0_WDATA_VLD(x) ((x) << S_CI11_DM0_WDATA_VLD)
+#define F_CI11_DM0_WDATA_VLD    V_CI11_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI11_RDATA_VLD    16
+#define V_DM0_CI11_RDATA_VLD(x) ((x) << S_DM0_CI11_RDATA_VLD)
+#define F_DM0_CI11_RDATA_VLD    V_DM0_CI11_RDATA_VLD(1U)
+
+#define S_CI12_ARB0_REQ    15
+#define V_CI12_ARB0_REQ(x) ((x) << S_CI12_ARB0_REQ)
+#define F_CI12_ARB0_REQ    V_CI12_ARB0_REQ(1U)
+
+#define S_ARB0_CI12_GNT    14
+#define V_ARB0_CI12_GNT(x) ((x) << S_ARB0_CI12_GNT)
+#define F_ARB0_CI12_GNT    V_ARB0_CI12_GNT(1U)
+
+#define S_CI12_DM0_WDATA_VLD    13
+#define V_CI12_DM0_WDATA_VLD(x) ((x) << S_CI12_DM0_WDATA_VLD)
+#define F_CI12_DM0_WDATA_VLD    V_CI12_DM0_WDATA_VLD(1U)
+
+#define S_DM0_CI12_RDATA_VLD    12
+#define V_DM0_CI12_RDATA_VLD(x) ((x) << S_DM0_CI12_RDATA_VLD)
+#define F_DM0_CI12_RDATA_VLD    V_DM0_CI12_RDATA_VLD(1U)
+
+#define A_MA_TARGET_1_ARBITER_INTERFACE_EXTERNAL_REG1 0xa014
+
+#define S_CI8_ARB1_REQ    31
+#define V_CI8_ARB1_REQ(x) ((x) << S_CI8_ARB1_REQ)
+#define F_CI8_ARB1_REQ    V_CI8_ARB1_REQ(1U)
+
+#define S_ARB1_CI8_GNT    30
+#define V_ARB1_CI8_GNT(x) ((x) << S_ARB1_CI8_GNT)
+#define F_ARB1_CI8_GNT    V_ARB1_CI8_GNT(1U)
+
+#define S_CI8_DM1_WDATA_VLD    29
+#define V_CI8_DM1_WDATA_VLD(x) ((x) << S_CI8_DM1_WDATA_VLD)
+#define F_CI8_DM1_WDATA_VLD    V_CI8_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI8_RDATA_VLD    28
+#define V_DM1_CI8_RDATA_VLD(x) ((x) << S_DM1_CI8_RDATA_VLD)
+#define F_DM1_CI8_RDATA_VLD    V_DM1_CI8_RDATA_VLD(1U)
+
+#define S_CI9_ARB1_REQ    27
+#define V_CI9_ARB1_REQ(x) ((x) << S_CI9_ARB1_REQ)
+#define F_CI9_ARB1_REQ    V_CI9_ARB1_REQ(1U)
+
+#define S_ARB1_CI9_GNT    26
+#define V_ARB1_CI9_GNT(x) ((x) << S_ARB1_CI9_GNT)
+#define F_ARB1_CI9_GNT    V_ARB1_CI9_GNT(1U)
+
+#define S_CI9_DM1_WDATA_VLD    25
+#define V_CI9_DM1_WDATA_VLD(x) ((x) << S_CI9_DM1_WDATA_VLD)
+#define F_CI9_DM1_WDATA_VLD    V_CI9_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI9_RDATA_VLD    24
+#define V_DM1_CI9_RDATA_VLD(x) ((x) << S_DM1_CI9_RDATA_VLD)
+#define F_DM1_CI9_RDATA_VLD    V_DM1_CI9_RDATA_VLD(1U)
+
+#define S_CI10_ARB1_REQ    23
+#define V_CI10_ARB1_REQ(x) ((x) << S_CI10_ARB1_REQ)
+#define F_CI10_ARB1_REQ    V_CI10_ARB1_REQ(1U)
+
+#define S_ARB1_CI10_GNT    22
+#define V_ARB1_CI10_GNT(x) ((x) << S_ARB1_CI10_GNT)
+#define F_ARB1_CI10_GNT    V_ARB1_CI10_GNT(1U)
+
+#define S_CI10_DM1_WDATA_VLD    21
+#define V_CI10_DM1_WDATA_VLD(x) ((x) << S_CI10_DM1_WDATA_VLD)
+#define F_CI10_DM1_WDATA_VLD    V_CI10_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI10_RDATA_VLD    20
+#define V_DM1_CI10_RDATA_VLD(x) ((x) << S_DM1_CI10_RDATA_VLD)
+#define F_DM1_CI10_RDATA_VLD    V_DM1_CI10_RDATA_VLD(1U)
+
+#define S_CI11_ARB1_REQ    19
+#define V_CI11_ARB1_REQ(x) ((x) << S_CI11_ARB1_REQ)
+#define F_CI11_ARB1_REQ    V_CI11_ARB1_REQ(1U)
+
+#define S_ARB1_CI11_GNT    18
+#define V_ARB1_CI11_GNT(x) ((x) << S_ARB1_CI11_GNT)
+#define F_ARB1_CI11_GNT    V_ARB1_CI11_GNT(1U)
+
+#define S_CI11_DM1_WDATA_VLD    17
+#define V_CI11_DM1_WDATA_VLD(x) ((x) << S_CI11_DM1_WDATA_VLD)
+#define F_CI11_DM1_WDATA_VLD    V_CI11_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI11_RDATA_VLD    16
+#define V_DM1_CI11_RDATA_VLD(x) ((x) << S_DM1_CI11_RDATA_VLD)
+#define F_DM1_CI11_RDATA_VLD    V_DM1_CI11_RDATA_VLD(1U)
+
+#define S_CI12_ARB1_REQ    15
+#define V_CI12_ARB1_REQ(x) ((x) << S_CI12_ARB1_REQ)
+#define F_CI12_ARB1_REQ    V_CI12_ARB1_REQ(1U)
+
+#define S_ARB1_CI12_GNT    14
+#define V_ARB1_CI12_GNT(x) ((x) << S_ARB1_CI12_GNT)
+#define F_ARB1_CI12_GNT    V_ARB1_CI12_GNT(1U)
+
+#define S_CI12_DM1_WDATA_VLD    13
+#define V_CI12_DM1_WDATA_VLD(x) ((x) << S_CI12_DM1_WDATA_VLD)
+#define F_CI12_DM1_WDATA_VLD    V_CI12_DM1_WDATA_VLD(1U)
+
+#define S_DM1_CI12_RDATA_VLD    12
+#define V_DM1_CI12_RDATA_VLD(x) ((x) << S_DM1_CI12_RDATA_VLD)
+#define F_DM1_CI12_RDATA_VLD    V_DM1_CI12_RDATA_VLD(1U)
+
+#define A_MA_TARGET_2_ARBITER_INTERFACE_EXTERNAL_REG1 0xa015
+
+#define S_CI8_ARB2_REQ    31
+#define V_CI8_ARB2_REQ(x) ((x) << S_CI8_ARB2_REQ)
+#define F_CI8_ARB2_REQ    V_CI8_ARB2_REQ(1U)
+
+#define S_ARB2_CI8_GNT    30
+#define V_ARB2_CI8_GNT(x) ((x) << S_ARB2_CI8_GNT)
+#define F_ARB2_CI8_GNT    V_ARB2_CI8_GNT(1U)
+
+#define S_CI8_DM2_WDATA_VLD    29
+#define V_CI8_DM2_WDATA_VLD(x) ((x) << S_CI8_DM2_WDATA_VLD)
+#define F_CI8_DM2_WDATA_VLD    V_CI8_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI8_RDATA_VLD    28
+#define V_DM2_CI8_RDATA_VLD(x) ((x) << S_DM2_CI8_RDATA_VLD)
+#define F_DM2_CI8_RDATA_VLD    V_DM2_CI8_RDATA_VLD(1U)
+
+#define S_CI9_ARB2_REQ    27
+#define V_CI9_ARB2_REQ(x) ((x) << S_CI9_ARB2_REQ)
+#define F_CI9_ARB2_REQ    V_CI9_ARB2_REQ(1U)
+
+#define S_ARB2_CI9_GNT    26
+#define V_ARB2_CI9_GNT(x) ((x) << S_ARB2_CI9_GNT)
+#define F_ARB2_CI9_GNT    V_ARB2_CI9_GNT(1U)
+
+#define S_CI9_DM2_WDATA_VLD    25
+#define V_CI9_DM2_WDATA_VLD(x) ((x) << S_CI9_DM2_WDATA_VLD)
+#define F_CI9_DM2_WDATA_VLD    V_CI9_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI9_RDATA_VLD    24
+#define V_DM2_CI9_RDATA_VLD(x) ((x) << S_DM2_CI9_RDATA_VLD)
+#define F_DM2_CI9_RDATA_VLD    V_DM2_CI9_RDATA_VLD(1U)
+
+#define S_CI10_ARB2_REQ    23
+#define V_CI10_ARB2_REQ(x) ((x) << S_CI10_ARB2_REQ)
+#define F_CI10_ARB2_REQ    V_CI10_ARB2_REQ(1U)
+
+#define S_ARB2_CI10_GNT    22
+#define V_ARB2_CI10_GNT(x) ((x) << S_ARB2_CI10_GNT)
+#define F_ARB2_CI10_GNT    V_ARB2_CI10_GNT(1U)
+
+#define S_CI10_DM2_WDATA_VLD    21
+#define V_CI10_DM2_WDATA_VLD(x) ((x) << S_CI10_DM2_WDATA_VLD)
+#define F_CI10_DM2_WDATA_VLD    V_CI10_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI10_RDATA_VLD    20
+#define V_DM2_CI10_RDATA_VLD(x) ((x) << S_DM2_CI10_RDATA_VLD)
+#define F_DM2_CI10_RDATA_VLD    V_DM2_CI10_RDATA_VLD(1U)
+
+#define S_CI11_ARB2_REQ    19
+#define V_CI11_ARB2_REQ(x) ((x) << S_CI11_ARB2_REQ)
+#define F_CI11_ARB2_REQ    V_CI11_ARB2_REQ(1U)
+
+#define S_ARB2_CI11_GNT    18
+#define V_ARB2_CI11_GNT(x) ((x) << S_ARB2_CI11_GNT)
+#define F_ARB2_CI11_GNT    V_ARB2_CI11_GNT(1U)
+
+#define S_CI11_DM2_WDATA_VLD    17
+#define V_CI11_DM2_WDATA_VLD(x) ((x) << S_CI11_DM2_WDATA_VLD)
+#define F_CI11_DM2_WDATA_VLD    V_CI11_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI11_RDATA_VLD    16
+#define V_DM2_CI11_RDATA_VLD(x) ((x) << S_DM2_CI11_RDATA_VLD)
+#define F_DM2_CI11_RDATA_VLD    V_DM2_CI11_RDATA_VLD(1U)
+
+#define S_CI12_ARB2_REQ    15
+#define V_CI12_ARB2_REQ(x) ((x) << S_CI12_ARB2_REQ)
+#define F_CI12_ARB2_REQ    V_CI12_ARB2_REQ(1U)
+
+#define S_ARB2_CI12_GNT    14
+#define V_ARB2_CI12_GNT(x) ((x) << S_ARB2_CI12_GNT)
+#define F_ARB2_CI12_GNT    V_ARB2_CI12_GNT(1U)
+
+#define S_CI12_DM2_WDATA_VLD    13
+#define V_CI12_DM2_WDATA_VLD(x) ((x) << S_CI12_DM2_WDATA_VLD)
+#define F_CI12_DM2_WDATA_VLD    V_CI12_DM2_WDATA_VLD(1U)
+
+#define S_DM2_CI12_RDATA_VLD    12
+#define V_DM2_CI12_RDATA_VLD(x) ((x) << S_DM2_CI12_RDATA_VLD)
+#define F_DM2_CI12_RDATA_VLD    V_DM2_CI12_RDATA_VLD(1U)
+
+#define A_MA_TARGET_3_ARBITER_INTERFACE_EXTERNAL_REG1 0xa016
+
+#define S_CI8_ARB3_REQ    31
+#define V_CI8_ARB3_REQ(x) ((x) << S_CI8_ARB3_REQ)
+#define F_CI8_ARB3_REQ    V_CI8_ARB3_REQ(1U)
+
+#define S_ARB3_CI8_GNT    30
+#define V_ARB3_CI8_GNT(x) ((x) << S_ARB3_CI8_GNT)
+#define F_ARB3_CI8_GNT    V_ARB3_CI8_GNT(1U)
+
+#define S_CI8_DM3_WDATA_VLD    29
+#define V_CI8_DM3_WDATA_VLD(x) ((x) << S_CI8_DM3_WDATA_VLD)
+#define F_CI8_DM3_WDATA_VLD    V_CI8_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI8_RDATA_VLD    28
+#define V_DM3_CI8_RDATA_VLD(x) ((x) << S_DM3_CI8_RDATA_VLD)
+#define F_DM3_CI8_RDATA_VLD    V_DM3_CI8_RDATA_VLD(1U)
+
+#define S_CI9_ARB3_REQ    27
+#define V_CI9_ARB3_REQ(x) ((x) << S_CI9_ARB3_REQ)
+#define F_CI9_ARB3_REQ    V_CI9_ARB3_REQ(1U)
+
+#define S_ARB3_CI9_GNT    26
+#define V_ARB3_CI9_GNT(x) ((x) << S_ARB3_CI9_GNT)
+#define F_ARB3_CI9_GNT    V_ARB3_CI9_GNT(1U)
+
+#define S_CI9_DM3_WDATA_VLD    25
+#define V_CI9_DM3_WDATA_VLD(x) ((x) << S_CI9_DM3_WDATA_VLD)
+#define F_CI9_DM3_WDATA_VLD    V_CI9_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI9_RDATA_VLD    24
+#define V_DM3_CI9_RDATA_VLD(x) ((x) << S_DM3_CI9_RDATA_VLD)
+#define F_DM3_CI9_RDATA_VLD    V_DM3_CI9_RDATA_VLD(1U)
+
+#define S_CI10_ARB3_REQ    23
+#define V_CI10_ARB3_REQ(x) ((x) << S_CI10_ARB3_REQ)
+#define F_CI10_ARB3_REQ    V_CI10_ARB3_REQ(1U)
+
+#define S_ARB3_CI10_GNT    22
+#define V_ARB3_CI10_GNT(x) ((x) << S_ARB3_CI10_GNT)
+#define F_ARB3_CI10_GNT    V_ARB3_CI10_GNT(1U)
+
+#define S_CI10_DM3_WDATA_VLD    21
+#define V_CI10_DM3_WDATA_VLD(x) ((x) << S_CI10_DM3_WDATA_VLD)
+#define F_CI10_DM3_WDATA_VLD    V_CI10_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI10_RDATA_VLD    20
+#define V_DM3_CI10_RDATA_VLD(x) ((x) << S_DM3_CI10_RDATA_VLD)
+#define F_DM3_CI10_RDATA_VLD    V_DM3_CI10_RDATA_VLD(1U)
+
+#define S_CI11_ARB3_REQ    19
+#define V_CI11_ARB3_REQ(x) ((x) << S_CI11_ARB3_REQ)
+#define F_CI11_ARB3_REQ    V_CI11_ARB3_REQ(1U)
+
+#define S_ARB3_CI11_GNT    18
+#define V_ARB3_CI11_GNT(x) ((x) << S_ARB3_CI11_GNT)
+#define F_ARB3_CI11_GNT    V_ARB3_CI11_GNT(1U)
+
+#define S_CI11_DM3_WDATA_VLD    17
+#define V_CI11_DM3_WDATA_VLD(x) ((x) << S_CI11_DM3_WDATA_VLD)
+#define F_CI11_DM3_WDATA_VLD    V_CI11_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI11_RDATA_VLD    16
+#define V_DM3_CI11_RDATA_VLD(x) ((x) << S_DM3_CI11_RDATA_VLD)
+#define F_DM3_CI11_RDATA_VLD    V_DM3_CI11_RDATA_VLD(1U)
+
+#define S_CI12_ARB3_REQ    15
+#define V_CI12_ARB3_REQ(x) ((x) << S_CI12_ARB3_REQ)
+#define F_CI12_ARB3_REQ    V_CI12_ARB3_REQ(1U)
+
+#define S_ARB3_CI12_GNT    14
+#define V_ARB3_CI12_GNT(x) ((x) << S_ARB3_CI12_GNT)
+#define F_ARB3_CI12_GNT    V_ARB3_CI12_GNT(1U)
+
+#define S_CI12_DM3_WDATA_VLD    13
+#define V_CI12_DM3_WDATA_VLD(x) ((x) << S_CI12_DM3_WDATA_VLD)
+#define F_CI12_DM3_WDATA_VLD    V_CI12_DM3_WDATA_VLD(1U)
+
+#define S_DM3_CI12_RDATA_VLD    12
+#define V_DM3_CI12_RDATA_VLD(x) ((x) << S_DM3_CI12_RDATA_VLD)
+#define F_DM3_CI12_RDATA_VLD    V_DM3_CI12_RDATA_VLD(1U)
+
+#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG0 0xa400
+
+#define S_CMD_IN_FIFO_CNT0    30
+#define M_CMD_IN_FIFO_CNT0    0x3U
+#define V_CMD_IN_FIFO_CNT0(x) ((x) << S_CMD_IN_FIFO_CNT0)
+#define G_CMD_IN_FIFO_CNT0(x) (((x) >> S_CMD_IN_FIFO_CNT0) & M_CMD_IN_FIFO_CNT0)
+
+#define S_CMD_SPLIT_FIFO_CNT0    28
+#define M_CMD_SPLIT_FIFO_CNT0    0x3U
+#define V_CMD_SPLIT_FIFO_CNT0(x) ((x) << S_CMD_SPLIT_FIFO_CNT0)
+#define G_CMD_SPLIT_FIFO_CNT0(x) (((x) >> S_CMD_SPLIT_FIFO_CNT0) & M_CMD_SPLIT_FIFO_CNT0)
+
+#define S_CMD_THROTTLE_FIFO_CNT0    22
+#define M_CMD_THROTTLE_FIFO_CNT0    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT0(x) ((x) << S_CMD_THROTTLE_FIFO_CNT0)
+#define G_CMD_THROTTLE_FIFO_CNT0(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT0) & M_CMD_THROTTLE_FIFO_CNT0)
+
+#define S_RD_CHNL_FIFO_CNT0    15
+#define M_RD_CHNL_FIFO_CNT0    0x7fU
+#define V_RD_CHNL_FIFO_CNT0(x) ((x) << S_RD_CHNL_FIFO_CNT0)
+#define G_RD_CHNL_FIFO_CNT0(x) (((x) >> S_RD_CHNL_FIFO_CNT0) & M_RD_CHNL_FIFO_CNT0)
+
+#define S_RD_DATA_EXT_FIFO_CNT0    13
+#define M_RD_DATA_EXT_FIFO_CNT0    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT0(x) ((x) << S_RD_DATA_EXT_FIFO_CNT0)
+#define G_RD_DATA_EXT_FIFO_CNT0(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT0) & M_RD_DATA_EXT_FIFO_CNT0)
+
+#define S_RD_DATA_512B_FIFO_CNT0    5
+#define M_RD_DATA_512B_FIFO_CNT0    0xffU
+#define V_RD_DATA_512B_FIFO_CNT0(x) ((x) << S_RD_DATA_512B_FIFO_CNT0)
+#define G_RD_DATA_512B_FIFO_CNT0(x) (((x) >> S_RD_DATA_512B_FIFO_CNT0) & M_RD_DATA_512B_FIFO_CNT0)
+
+#define S_RD_REQ_TAG_FIFO_CNT0    1
+#define M_RD_REQ_TAG_FIFO_CNT0    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT0(x) ((x) << S_RD_REQ_TAG_FIFO_CNT0)
+#define G_RD_REQ_TAG_FIFO_CNT0(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT0) & M_RD_REQ_TAG_FIFO_CNT0)
+
+#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG0 0xa401
+
+#define S_CMD_IN_FIFO_CNT1    30
+#define M_CMD_IN_FIFO_CNT1    0x3U
+#define V_CMD_IN_FIFO_CNT1(x) ((x) << S_CMD_IN_FIFO_CNT1)
+#define G_CMD_IN_FIFO_CNT1(x) (((x) >> S_CMD_IN_FIFO_CNT1) & M_CMD_IN_FIFO_CNT1)
+
+#define S_CMD_SPLIT_FIFO_CNT1    28
+#define M_CMD_SPLIT_FIFO_CNT1    0x3U
+#define V_CMD_SPLIT_FIFO_CNT1(x) ((x) << S_CMD_SPLIT_FIFO_CNT1)
+#define G_CMD_SPLIT_FIFO_CNT1(x) (((x) >> S_CMD_SPLIT_FIFO_CNT1) & M_CMD_SPLIT_FIFO_CNT1)
+
+#define S_CMD_THROTTLE_FIFO_CNT1    22
+#define M_CMD_THROTTLE_FIFO_CNT1    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT1(x) ((x) << S_CMD_THROTTLE_FIFO_CNT1)
+#define G_CMD_THROTTLE_FIFO_CNT1(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT1) & M_CMD_THROTTLE_FIFO_CNT1)
+
+#define S_RD_CHNL_FIFO_CNT1    15
+#define M_RD_CHNL_FIFO_CNT1    0x7fU
+#define V_RD_CHNL_FIFO_CNT1(x) ((x) << S_RD_CHNL_FIFO_CNT1)
+#define G_RD_CHNL_FIFO_CNT1(x) (((x) >> S_RD_CHNL_FIFO_CNT1) & M_RD_CHNL_FIFO_CNT1)
+
+#define S_RD_DATA_EXT_FIFO_CNT1    13
+#define M_RD_DATA_EXT_FIFO_CNT1    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT1(x) ((x) << S_RD_DATA_EXT_FIFO_CNT1)
+#define G_RD_DATA_EXT_FIFO_CNT1(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT1) & M_RD_DATA_EXT_FIFO_CNT1)
+
+#define S_RD_DATA_512B_FIFO_CNT1    5
+#define M_RD_DATA_512B_FIFO_CNT1    0xffU
+#define V_RD_DATA_512B_FIFO_CNT1(x) ((x) << S_RD_DATA_512B_FIFO_CNT1)
+#define G_RD_DATA_512B_FIFO_CNT1(x) (((x) >> S_RD_DATA_512B_FIFO_CNT1) & M_RD_DATA_512B_FIFO_CNT1)
+
+#define S_RD_REQ_TAG_FIFO_CNT1    1
+#define M_RD_REQ_TAG_FIFO_CNT1    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT1(x) ((x) << S_RD_REQ_TAG_FIFO_CNT1)
+#define G_RD_REQ_TAG_FIFO_CNT1(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT1) & M_RD_REQ_TAG_FIFO_CNT1)
+
+#define A_MA_ULP_TX_CLIENT_INTERFACE_INTERNAL_REG0 0xa402
+
+#define S_CMD_IN_FIFO_CNT2    30
+#define M_CMD_IN_FIFO_CNT2    0x3U
+#define V_CMD_IN_FIFO_CNT2(x) ((x) << S_CMD_IN_FIFO_CNT2)
+#define G_CMD_IN_FIFO_CNT2(x) (((x) >> S_CMD_IN_FIFO_CNT2) & M_CMD_IN_FIFO_CNT2)
+
+#define S_CMD_SPLIT_FIFO_CNT2    28
+#define M_CMD_SPLIT_FIFO_CNT2    0x3U
+#define V_CMD_SPLIT_FIFO_CNT2(x) ((x) << S_CMD_SPLIT_FIFO_CNT2)
+#define G_CMD_SPLIT_FIFO_CNT2(x) (((x) >> S_CMD_SPLIT_FIFO_CNT2) & M_CMD_SPLIT_FIFO_CNT2)
+
+#define S_CMD_THROTTLE_FIFO_CNT2    22
+#define M_CMD_THROTTLE_FIFO_CNT2    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT2(x) ((x) << S_CMD_THROTTLE_FIFO_CNT2)
+#define G_CMD_THROTTLE_FIFO_CNT2(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT2) & M_CMD_THROTTLE_FIFO_CNT2)
+
+#define S_RD_CHNL_FIFO_CNT2    15
+#define M_RD_CHNL_FIFO_CNT2    0x7fU
+#define V_RD_CHNL_FIFO_CNT2(x) ((x) << S_RD_CHNL_FIFO_CNT2)
+#define G_RD_CHNL_FIFO_CNT2(x) (((x) >> S_RD_CHNL_FIFO_CNT2) & M_RD_CHNL_FIFO_CNT2)
+
+#define S_RD_DATA_EXT_FIFO_CNT2    13
+#define M_RD_DATA_EXT_FIFO_CNT2    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT2(x) ((x) << S_RD_DATA_EXT_FIFO_CNT2)
+#define G_RD_DATA_EXT_FIFO_CNT2(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT2) & M_RD_DATA_EXT_FIFO_CNT2)
+
+#define S_RD_DATA_512B_FIFO_CNT2    5
+#define M_RD_DATA_512B_FIFO_CNT2    0xffU
+#define V_RD_DATA_512B_FIFO_CNT2(x) ((x) << S_RD_DATA_512B_FIFO_CNT2)
+#define G_RD_DATA_512B_FIFO_CNT2(x) (((x) >> S_RD_DATA_512B_FIFO_CNT2) & M_RD_DATA_512B_FIFO_CNT2)
+
+#define S_RD_REQ_TAG_FIFO_CNT2    1
+#define M_RD_REQ_TAG_FIFO_CNT2    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT2(x) ((x) << S_RD_REQ_TAG_FIFO_CNT2)
+#define G_RD_REQ_TAG_FIFO_CNT2(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT2) & M_RD_REQ_TAG_FIFO_CNT2)
+
+#define A_MA_ULP_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa403
+
+#define S_CMD_IN_FIFO_CNT3    30
+#define M_CMD_IN_FIFO_CNT3    0x3U
+#define V_CMD_IN_FIFO_CNT3(x) ((x) << S_CMD_IN_FIFO_CNT3)
+#define G_CMD_IN_FIFO_CNT3(x) (((x) >> S_CMD_IN_FIFO_CNT3) & M_CMD_IN_FIFO_CNT3)
+
+#define S_CMD_SPLIT_FIFO_CNT3    28
+#define M_CMD_SPLIT_FIFO_CNT3    0x3U
+#define V_CMD_SPLIT_FIFO_CNT3(x) ((x) << S_CMD_SPLIT_FIFO_CNT3)
+#define G_CMD_SPLIT_FIFO_CNT3(x) (((x) >> S_CMD_SPLIT_FIFO_CNT3) & M_CMD_SPLIT_FIFO_CNT3)
+
+#define S_CMD_THROTTLE_FIFO_CNT3    22
+#define M_CMD_THROTTLE_FIFO_CNT3    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT3(x) ((x) << S_CMD_THROTTLE_FIFO_CNT3)
+#define G_CMD_THROTTLE_FIFO_CNT3(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT3) & M_CMD_THROTTLE_FIFO_CNT3)
+
+#define S_RD_CHNL_FIFO_CNT3    15
+#define M_RD_CHNL_FIFO_CNT3    0x7fU
+#define V_RD_CHNL_FIFO_CNT3(x) ((x) << S_RD_CHNL_FIFO_CNT3)
+#define G_RD_CHNL_FIFO_CNT3(x) (((x) >> S_RD_CHNL_FIFO_CNT3) & M_RD_CHNL_FIFO_CNT3)
+
+#define S_RD_DATA_EXT_FIFO_CNT3    13
+#define M_RD_DATA_EXT_FIFO_CNT3    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT3(x) ((x) << S_RD_DATA_EXT_FIFO_CNT3)
+#define G_RD_DATA_EXT_FIFO_CNT3(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT3) & M_RD_DATA_EXT_FIFO_CNT3)
+
+#define S_RD_DATA_512B_FIFO_CNT3    5
+#define M_RD_DATA_512B_FIFO_CNT3    0xffU
+#define V_RD_DATA_512B_FIFO_CNT3(x) ((x) << S_RD_DATA_512B_FIFO_CNT3)
+#define G_RD_DATA_512B_FIFO_CNT3(x) (((x) >> S_RD_DATA_512B_FIFO_CNT3) & M_RD_DATA_512B_FIFO_CNT3)
+
+#define S_RD_REQ_TAG_FIFO_CNT3    1
+#define M_RD_REQ_TAG_FIFO_CNT3    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT3(x) ((x) << S_RD_REQ_TAG_FIFO_CNT3)
+#define G_RD_REQ_TAG_FIFO_CNT3(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT3) & M_RD_REQ_TAG_FIFO_CNT3)
+
+#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa404
+
+#define S_CMD_IN_FIFO_CNT4    30
+#define M_CMD_IN_FIFO_CNT4    0x3U
+#define V_CMD_IN_FIFO_CNT4(x) ((x) << S_CMD_IN_FIFO_CNT4)
+#define G_CMD_IN_FIFO_CNT4(x) (((x) >> S_CMD_IN_FIFO_CNT4) & M_CMD_IN_FIFO_CNT4)
+
+#define S_CMD_SPLIT_FIFO_CNT4    28
+#define M_CMD_SPLIT_FIFO_CNT4    0x3U
+#define V_CMD_SPLIT_FIFO_CNT4(x) ((x) << S_CMD_SPLIT_FIFO_CNT4)
+#define G_CMD_SPLIT_FIFO_CNT4(x) (((x) >> S_CMD_SPLIT_FIFO_CNT4) & M_CMD_SPLIT_FIFO_CNT4)
+
+#define S_CMD_THROTTLE_FIFO_CNT4    22
+#define M_CMD_THROTTLE_FIFO_CNT4    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT4(x) ((x) << S_CMD_THROTTLE_FIFO_CNT4)
+#define G_CMD_THROTTLE_FIFO_CNT4(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT4) & M_CMD_THROTTLE_FIFO_CNT4)
+
+#define S_RD_CHNL_FIFO_CNT4    15
+#define M_RD_CHNL_FIFO_CNT4    0x7fU
+#define V_RD_CHNL_FIFO_CNT4(x) ((x) << S_RD_CHNL_FIFO_CNT4)
+#define G_RD_CHNL_FIFO_CNT4(x) (((x) >> S_RD_CHNL_FIFO_CNT4) & M_RD_CHNL_FIFO_CNT4)
+
+#define S_RD_DATA_EXT_FIFO_CNT4    13
+#define M_RD_DATA_EXT_FIFO_CNT4    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT4(x) ((x) << S_RD_DATA_EXT_FIFO_CNT4)
+#define G_RD_DATA_EXT_FIFO_CNT4(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT4) & M_RD_DATA_EXT_FIFO_CNT4)
+
+#define S_RD_DATA_512B_FIFO_CNT4    5
+#define M_RD_DATA_512B_FIFO_CNT4    0xffU
+#define V_RD_DATA_512B_FIFO_CNT4(x) ((x) << S_RD_DATA_512B_FIFO_CNT4)
+#define G_RD_DATA_512B_FIFO_CNT4(x) (((x) >> S_RD_DATA_512B_FIFO_CNT4) & M_RD_DATA_512B_FIFO_CNT4)
+
+#define S_RD_REQ_TAG_FIFO_CNT4    1
+#define M_RD_REQ_TAG_FIFO_CNT4    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT4(x) ((x) << S_RD_REQ_TAG_FIFO_CNT4)
+#define G_RD_REQ_TAG_FIFO_CNT4(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT4) & M_RD_REQ_TAG_FIFO_CNT4)
+
+#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG0 0xa405
+
+#define S_CMD_IN_FIFO_CNT5    30
+#define M_CMD_IN_FIFO_CNT5    0x3U
+#define V_CMD_IN_FIFO_CNT5(x) ((x) << S_CMD_IN_FIFO_CNT5)
+#define G_CMD_IN_FIFO_CNT5(x) (((x) >> S_CMD_IN_FIFO_CNT5) & M_CMD_IN_FIFO_CNT5)
+
+#define S_CMD_SPLIT_FIFO_CNT5    28
+#define M_CMD_SPLIT_FIFO_CNT5    0x3U
+#define V_CMD_SPLIT_FIFO_CNT5(x) ((x) << S_CMD_SPLIT_FIFO_CNT5)
+#define G_CMD_SPLIT_FIFO_CNT5(x) (((x) >> S_CMD_SPLIT_FIFO_CNT5) & M_CMD_SPLIT_FIFO_CNT5)
+
+#define S_CMD_THROTTLE_FIFO_CNT5    22
+#define M_CMD_THROTTLE_FIFO_CNT5    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT5(x) ((x) << S_CMD_THROTTLE_FIFO_CNT5)
+#define G_CMD_THROTTLE_FIFO_CNT5(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT5) & M_CMD_THROTTLE_FIFO_CNT5)
+
+#define S_RD_CHNL_FIFO_CNT5    15
+#define M_RD_CHNL_FIFO_CNT5    0x7fU
+#define V_RD_CHNL_FIFO_CNT5(x) ((x) << S_RD_CHNL_FIFO_CNT5)
+#define G_RD_CHNL_FIFO_CNT5(x) (((x) >> S_RD_CHNL_FIFO_CNT5) & M_RD_CHNL_FIFO_CNT5)
+
+#define S_RD_DATA_EXT_FIFO_CNT5    13
+#define M_RD_DATA_EXT_FIFO_CNT5    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT5(x) ((x) << S_RD_DATA_EXT_FIFO_CNT5)
+#define G_RD_DATA_EXT_FIFO_CNT5(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT5) & M_RD_DATA_EXT_FIFO_CNT5)
+
+#define S_RD_DATA_512B_FIFO_CNT5    5
+#define M_RD_DATA_512B_FIFO_CNT5    0xffU
+#define V_RD_DATA_512B_FIFO_CNT5(x) ((x) << S_RD_DATA_512B_FIFO_CNT5)
+#define G_RD_DATA_512B_FIFO_CNT5(x) (((x) >> S_RD_DATA_512B_FIFO_CNT5) & M_RD_DATA_512B_FIFO_CNT5)
+
+#define S_RD_REQ_TAG_FIFO_CNT5    1
+#define M_RD_REQ_TAG_FIFO_CNT5    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT5(x) ((x) << S_RD_REQ_TAG_FIFO_CNT5)
+#define G_RD_REQ_TAG_FIFO_CNT5(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT5) & M_RD_REQ_TAG_FIFO_CNT5)
+
+#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG0 0xa406
+
+#define S_CMD_IN_FIFO_CNT6    30
+#define M_CMD_IN_FIFO_CNT6    0x3U
+#define V_CMD_IN_FIFO_CNT6(x) ((x) << S_CMD_IN_FIFO_CNT6)
+#define G_CMD_IN_FIFO_CNT6(x) (((x) >> S_CMD_IN_FIFO_CNT6) & M_CMD_IN_FIFO_CNT6)
+
+#define S_CMD_SPLIT_FIFO_CNT6    28
+#define M_CMD_SPLIT_FIFO_CNT6    0x3U
+#define V_CMD_SPLIT_FIFO_CNT6(x) ((x) << S_CMD_SPLIT_FIFO_CNT6)
+#define G_CMD_SPLIT_FIFO_CNT6(x) (((x) >> S_CMD_SPLIT_FIFO_CNT6) & M_CMD_SPLIT_FIFO_CNT6)
+
+#define S_CMD_THROTTLE_FIFO_CNT6    22
+#define M_CMD_THROTTLE_FIFO_CNT6    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT6(x) ((x) << S_CMD_THROTTLE_FIFO_CNT6)
+#define G_CMD_THROTTLE_FIFO_CNT6(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT6) & M_CMD_THROTTLE_FIFO_CNT6)
+
+#define S_RD_CHNL_FIFO_CNT6    15
+#define M_RD_CHNL_FIFO_CNT6    0x7fU
+#define V_RD_CHNL_FIFO_CNT6(x) ((x) << S_RD_CHNL_FIFO_CNT6)
+#define G_RD_CHNL_FIFO_CNT6(x) (((x) >> S_RD_CHNL_FIFO_CNT6) & M_RD_CHNL_FIFO_CNT6)
+
+#define S_RD_DATA_EXT_FIFO_CNT6    13
+#define M_RD_DATA_EXT_FIFO_CNT6    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT6(x) ((x) << S_RD_DATA_EXT_FIFO_CNT6)
+#define G_RD_DATA_EXT_FIFO_CNT6(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT6) & M_RD_DATA_EXT_FIFO_CNT6)
+
+#define S_RD_DATA_512B_FIFO_CNT6    5
+#define M_RD_DATA_512B_FIFO_CNT6    0xffU
+#define V_RD_DATA_512B_FIFO_CNT6(x) ((x) << S_RD_DATA_512B_FIFO_CNT6)
+#define G_RD_DATA_512B_FIFO_CNT6(x) (((x) >> S_RD_DATA_512B_FIFO_CNT6) & M_RD_DATA_512B_FIFO_CNT6)
+
+#define S_RD_REQ_TAG_FIFO_CNT6    1
+#define M_RD_REQ_TAG_FIFO_CNT6    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT6(x) ((x) << S_RD_REQ_TAG_FIFO_CNT6)
+#define G_RD_REQ_TAG_FIFO_CNT6(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT6) & M_RD_REQ_TAG_FIFO_CNT6)
+
+#define A_MA_LE_CLIENT_INTERFACE_INTERNAL_REG0 0xa407
+
+#define S_CMD_IN_FIFO_CNT7    30
+#define M_CMD_IN_FIFO_CNT7    0x3U
+#define V_CMD_IN_FIFO_CNT7(x) ((x) << S_CMD_IN_FIFO_CNT7)
+#define G_CMD_IN_FIFO_CNT7(x) (((x) >> S_CMD_IN_FIFO_CNT7) & M_CMD_IN_FIFO_CNT7)
+
+#define S_CMD_SPLIT_FIFO_CNT7    28
+#define M_CMD_SPLIT_FIFO_CNT7    0x3U
+#define V_CMD_SPLIT_FIFO_CNT7(x) ((x) << S_CMD_SPLIT_FIFO_CNT7)
+#define G_CMD_SPLIT_FIFO_CNT7(x) (((x) >> S_CMD_SPLIT_FIFO_CNT7) & M_CMD_SPLIT_FIFO_CNT7)
+
+#define S_CMD_THROTTLE_FIFO_CNT7    22
+#define M_CMD_THROTTLE_FIFO_CNT7    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT7(x) ((x) << S_CMD_THROTTLE_FIFO_CNT7)
+#define G_CMD_THROTTLE_FIFO_CNT7(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT7) & M_CMD_THROTTLE_FIFO_CNT7)
+
+#define S_RD_CHNL_FIFO_CNT7    15
+#define M_RD_CHNL_FIFO_CNT7    0x7fU
+#define V_RD_CHNL_FIFO_CNT7(x) ((x) << S_RD_CHNL_FIFO_CNT7)
+#define G_RD_CHNL_FIFO_CNT7(x) (((x) >> S_RD_CHNL_FIFO_CNT7) & M_RD_CHNL_FIFO_CNT7)
+
+#define S_RD_DATA_EXT_FIFO_CNT7    13
+#define M_RD_DATA_EXT_FIFO_CNT7    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT7(x) ((x) << S_RD_DATA_EXT_FIFO_CNT7)
+#define G_RD_DATA_EXT_FIFO_CNT7(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT7) & M_RD_DATA_EXT_FIFO_CNT7)
+
+#define S_RD_DATA_512B_FIFO_CNT7    5
+#define M_RD_DATA_512B_FIFO_CNT7    0xffU
+#define V_RD_DATA_512B_FIFO_CNT7(x) ((x) << S_RD_DATA_512B_FIFO_CNT7)
+#define G_RD_DATA_512B_FIFO_CNT7(x) (((x) >> S_RD_DATA_512B_FIFO_CNT7) & M_RD_DATA_512B_FIFO_CNT7)
+
+#define S_RD_REQ_TAG_FIFO_CNT7    1
+#define M_RD_REQ_TAG_FIFO_CNT7    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT7(x) ((x) << S_RD_REQ_TAG_FIFO_CNT7)
+#define G_RD_REQ_TAG_FIFO_CNT7(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT7) & M_RD_REQ_TAG_FIFO_CNT7)
+
+#define A_MA_CIM_CLIENT_INTERFACE_INTERNAL_REG0 0xa408
+
+#define S_CMD_IN_FIFO_CNT8    30
+#define M_CMD_IN_FIFO_CNT8    0x3U
+#define V_CMD_IN_FIFO_CNT8(x) ((x) << S_CMD_IN_FIFO_CNT8)
+#define G_CMD_IN_FIFO_CNT8(x) (((x) >> S_CMD_IN_FIFO_CNT8) & M_CMD_IN_FIFO_CNT8)
+
+#define S_CMD_SPLIT_FIFO_CNT8    28
+#define M_CMD_SPLIT_FIFO_CNT8    0x3U
+#define V_CMD_SPLIT_FIFO_CNT8(x) ((x) << S_CMD_SPLIT_FIFO_CNT8)
+#define G_CMD_SPLIT_FIFO_CNT8(x) (((x) >> S_CMD_SPLIT_FIFO_CNT8) & M_CMD_SPLIT_FIFO_CNT8)
+
+#define S_CMD_THROTTLE_FIFO_CNT8    22
+#define M_CMD_THROTTLE_FIFO_CNT8    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT8(x) ((x) << S_CMD_THROTTLE_FIFO_CNT8)
+#define G_CMD_THROTTLE_FIFO_CNT8(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT8) & M_CMD_THROTTLE_FIFO_CNT8)
+
+#define S_RD_CHNL_FIFO_CNT8    15
+#define M_RD_CHNL_FIFO_CNT8    0x7fU
+#define V_RD_CHNL_FIFO_CNT8(x) ((x) << S_RD_CHNL_FIFO_CNT8)
+#define G_RD_CHNL_FIFO_CNT8(x) (((x) >> S_RD_CHNL_FIFO_CNT8) & M_RD_CHNL_FIFO_CNT8)
+
+#define S_RD_DATA_EXT_FIFO_CNT8    13
+#define M_RD_DATA_EXT_FIFO_CNT8    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT8(x) ((x) << S_RD_DATA_EXT_FIFO_CNT8)
+#define G_RD_DATA_EXT_FIFO_CNT8(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT8) & M_RD_DATA_EXT_FIFO_CNT8)
+
+#define S_RD_DATA_512B_FIFO_CNT8    5
+#define M_RD_DATA_512B_FIFO_CNT8    0xffU
+#define V_RD_DATA_512B_FIFO_CNT8(x) ((x) << S_RD_DATA_512B_FIFO_CNT8)
+#define G_RD_DATA_512B_FIFO_CNT8(x) (((x) >> S_RD_DATA_512B_FIFO_CNT8) & M_RD_DATA_512B_FIFO_CNT8)
+
+#define S_RD_REQ_TAG_FIFO_CNT8    1
+#define M_RD_REQ_TAG_FIFO_CNT8    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT8(x) ((x) << S_RD_REQ_TAG_FIFO_CNT8)
+#define G_RD_REQ_TAG_FIFO_CNT8(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT8) & M_RD_REQ_TAG_FIFO_CNT8)
+
+#define A_MA_PCIE_CLIENT_INTERFACE_INTERNAL_REG0 0xa409
+
+#define S_CMD_IN_FIFO_CNT9    30
+#define M_CMD_IN_FIFO_CNT9    0x3U
+#define V_CMD_IN_FIFO_CNT9(x) ((x) << S_CMD_IN_FIFO_CNT9)
+#define G_CMD_IN_FIFO_CNT9(x) (((x) >> S_CMD_IN_FIFO_CNT9) & M_CMD_IN_FIFO_CNT9)
+
+#define S_CMD_SPLIT_FIFO_CNT9    28
+#define M_CMD_SPLIT_FIFO_CNT9    0x3U
+#define V_CMD_SPLIT_FIFO_CNT9(x) ((x) << S_CMD_SPLIT_FIFO_CNT9)
+#define G_CMD_SPLIT_FIFO_CNT9(x) (((x) >> S_CMD_SPLIT_FIFO_CNT9) & M_CMD_SPLIT_FIFO_CNT9)
+
+#define S_CMD_THROTTLE_FIFO_CNT9    22
+#define M_CMD_THROTTLE_FIFO_CNT9    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT9(x) ((x) << S_CMD_THROTTLE_FIFO_CNT9)
+#define G_CMD_THROTTLE_FIFO_CNT9(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT9) & M_CMD_THROTTLE_FIFO_CNT9)
+
+#define S_RD_CHNL_FIFO_CNT9    15
+#define M_RD_CHNL_FIFO_CNT9    0x7fU
+#define V_RD_CHNL_FIFO_CNT9(x) ((x) << S_RD_CHNL_FIFO_CNT9)
+#define G_RD_CHNL_FIFO_CNT9(x) (((x) >> S_RD_CHNL_FIFO_CNT9) & M_RD_CHNL_FIFO_CNT9)
+
+#define S_RD_DATA_EXT_FIFO_CNT9    13
+#define M_RD_DATA_EXT_FIFO_CNT9    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT9(x) ((x) << S_RD_DATA_EXT_FIFO_CNT9)
+#define G_RD_DATA_EXT_FIFO_CNT9(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT9) & M_RD_DATA_EXT_FIFO_CNT9)
+
+#define S_RD_DATA_512B_FIFO_CNT9    5
+#define M_RD_DATA_512B_FIFO_CNT9    0xffU
+#define V_RD_DATA_512B_FIFO_CNT9(x) ((x) << S_RD_DATA_512B_FIFO_CNT9)
+#define G_RD_DATA_512B_FIFO_CNT9(x) (((x) >> S_RD_DATA_512B_FIFO_CNT9) & M_RD_DATA_512B_FIFO_CNT9)
+
+#define S_RD_REQ_TAG_FIFO_CNT9    1
+#define M_RD_REQ_TAG_FIFO_CNT9    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT9(x) ((x) << S_RD_REQ_TAG_FIFO_CNT9)
+#define G_RD_REQ_TAG_FIFO_CNT9(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT9) & M_RD_REQ_TAG_FIFO_CNT9)
+
+#define A_MA_PM_TX_CLIENT_INTERFACE_INTERNAL_REG0 0xa40a
+
+#define S_CMD_IN_FIFO_CNT10    30
+#define M_CMD_IN_FIFO_CNT10    0x3U
+#define V_CMD_IN_FIFO_CNT10(x) ((x) << S_CMD_IN_FIFO_CNT10)
+#define G_CMD_IN_FIFO_CNT10(x) (((x) >> S_CMD_IN_FIFO_CNT10) & M_CMD_IN_FIFO_CNT10)
+
+#define S_CMD_SPLIT_FIFO_CNT10    28
+#define M_CMD_SPLIT_FIFO_CNT10    0x3U
+#define V_CMD_SPLIT_FIFO_CNT10(x) ((x) << S_CMD_SPLIT_FIFO_CNT10)
+#define G_CMD_SPLIT_FIFO_CNT10(x) (((x) >> S_CMD_SPLIT_FIFO_CNT10) & M_CMD_SPLIT_FIFO_CNT10)
+
+#define S_CMD_THROTTLE_FIFO_CNT10    22
+#define M_CMD_THROTTLE_FIFO_CNT10    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT10(x) ((x) << S_CMD_THROTTLE_FIFO_CNT10)
+#define G_CMD_THROTTLE_FIFO_CNT10(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT10) & M_CMD_THROTTLE_FIFO_CNT10)
+
+#define S_RD_CHNL_FIFO_CNT10    15
+#define M_RD_CHNL_FIFO_CNT10    0x7fU
+#define V_RD_CHNL_FIFO_CNT10(x) ((x) << S_RD_CHNL_FIFO_CNT10)
+#define G_RD_CHNL_FIFO_CNT10(x) (((x) >> S_RD_CHNL_FIFO_CNT10) & M_RD_CHNL_FIFO_CNT10)
+
+#define S_RD_DATA_EXT_FIFO_CNT10    13
+#define M_RD_DATA_EXT_FIFO_CNT10    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT10(x) ((x) << S_RD_DATA_EXT_FIFO_CNT10)
+#define G_RD_DATA_EXT_FIFO_CNT10(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT10) & M_RD_DATA_EXT_FIFO_CNT10)
+
+#define S_RD_DATA_512B_FIFO_CNT10    5
+#define M_RD_DATA_512B_FIFO_CNT10    0xffU
+#define V_RD_DATA_512B_FIFO_CNT10(x) ((x) << S_RD_DATA_512B_FIFO_CNT10)
+#define G_RD_DATA_512B_FIFO_CNT10(x) (((x) >> S_RD_DATA_512B_FIFO_CNT10) & M_RD_DATA_512B_FIFO_CNT10)
+
+#define S_RD_REQ_TAG_FIFO_CNT10    1
+#define M_RD_REQ_TAG_FIFO_CNT10    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT10(x) ((x) << S_RD_REQ_TAG_FIFO_CNT10)
+#define G_RD_REQ_TAG_FIFO_CNT10(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT10) & M_RD_REQ_TAG_FIFO_CNT10)
+
+#define A_MA_PM_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa40b
+
+#define S_CMD_IN_FIFO_CNT11    30
+#define M_CMD_IN_FIFO_CNT11    0x3U
+#define V_CMD_IN_FIFO_CNT11(x) ((x) << S_CMD_IN_FIFO_CNT11)
+#define G_CMD_IN_FIFO_CNT11(x) (((x) >> S_CMD_IN_FIFO_CNT11) & M_CMD_IN_FIFO_CNT11)
+
+#define S_CMD_SPLIT_FIFO_CNT11    28
+#define M_CMD_SPLIT_FIFO_CNT11    0x3U
+#define V_CMD_SPLIT_FIFO_CNT11(x) ((x) << S_CMD_SPLIT_FIFO_CNT11)
+#define G_CMD_SPLIT_FIFO_CNT11(x) (((x) >> S_CMD_SPLIT_FIFO_CNT11) & M_CMD_SPLIT_FIFO_CNT11)
+
+#define S_CMD_THROTTLE_FIFO_CNT11    22
+#define M_CMD_THROTTLE_FIFO_CNT11    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT11(x) ((x) << S_CMD_THROTTLE_FIFO_CNT11)
+#define G_CMD_THROTTLE_FIFO_CNT11(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT11) & M_CMD_THROTTLE_FIFO_CNT11)
+
+#define S_RD_CHNL_FIFO_CNT11    15
+#define M_RD_CHNL_FIFO_CNT11    0x7fU
+#define V_RD_CHNL_FIFO_CNT11(x) ((x) << S_RD_CHNL_FIFO_CNT11)
+#define G_RD_CHNL_FIFO_CNT11(x) (((x) >> S_RD_CHNL_FIFO_CNT11) & M_RD_CHNL_FIFO_CNT11)
+
+#define S_RD_DATA_EXT_FIFO_CNT11    13
+#define M_RD_DATA_EXT_FIFO_CNT11    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT11(x) ((x) << S_RD_DATA_EXT_FIFO_CNT11)
+#define G_RD_DATA_EXT_FIFO_CNT11(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT11) & M_RD_DATA_EXT_FIFO_CNT11)
+
+#define S_RD_DATA_512B_FIFO_CNT11    5
+#define M_RD_DATA_512B_FIFO_CNT11    0xffU
+#define V_RD_DATA_512B_FIFO_CNT11(x) ((x) << S_RD_DATA_512B_FIFO_CNT11)
+#define G_RD_DATA_512B_FIFO_CNT11(x) (((x) >> S_RD_DATA_512B_FIFO_CNT11) & M_RD_DATA_512B_FIFO_CNT11)
+
+#define S_RD_REQ_TAG_FIFO_CNT11    1
+#define M_RD_REQ_TAG_FIFO_CNT11    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT11(x) ((x) << S_RD_REQ_TAG_FIFO_CNT11)
+#define G_RD_REQ_TAG_FIFO_CNT11(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT11) & M_RD_REQ_TAG_FIFO_CNT11)
+
+#define A_MA_HMA_CLIENT_INTERFACE_INTERNAL_REG0 0xa40c
+
+#define S_CMD_IN_FIFO_CNT12    30
+#define M_CMD_IN_FIFO_CNT12    0x3U
+#define V_CMD_IN_FIFO_CNT12(x) ((x) << S_CMD_IN_FIFO_CNT12)
+#define G_CMD_IN_FIFO_CNT12(x) (((x) >> S_CMD_IN_FIFO_CNT12) & M_CMD_IN_FIFO_CNT12)
+
+#define S_CMD_SPLIT_FIFO_CNT12    28
+#define M_CMD_SPLIT_FIFO_CNT12    0x3U
+#define V_CMD_SPLIT_FIFO_CNT12(x) ((x) << S_CMD_SPLIT_FIFO_CNT12)
+#define G_CMD_SPLIT_FIFO_CNT12(x) (((x) >> S_CMD_SPLIT_FIFO_CNT12) & M_CMD_SPLIT_FIFO_CNT12)
+
+#define S_CMD_THROTTLE_FIFO_CNT12    22
+#define M_CMD_THROTTLE_FIFO_CNT12    0x3fU
+#define V_CMD_THROTTLE_FIFO_CNT12(x) ((x) << S_CMD_THROTTLE_FIFO_CNT12)
+#define G_CMD_THROTTLE_FIFO_CNT12(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT12) & M_CMD_THROTTLE_FIFO_CNT12)
+
+#define S_RD_CHNL_FIFO_CNT12    15
+#define M_RD_CHNL_FIFO_CNT12    0x7fU
+#define V_RD_CHNL_FIFO_CNT12(x) ((x) << S_RD_CHNL_FIFO_CNT12)
+#define G_RD_CHNL_FIFO_CNT12(x) (((x) >> S_RD_CHNL_FIFO_CNT12) & M_RD_CHNL_FIFO_CNT12)
+
+#define S_RD_DATA_EXT_FIFO_CNT12    13
+#define M_RD_DATA_EXT_FIFO_CNT12    0x3U
+#define V_RD_DATA_EXT_FIFO_CNT12(x) ((x) << S_RD_DATA_EXT_FIFO_CNT12)
+#define G_RD_DATA_EXT_FIFO_CNT12(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT12) & M_RD_DATA_EXT_FIFO_CNT12)
+
+#define S_RD_DATA_512B_FIFO_CNT12    5
+#define M_RD_DATA_512B_FIFO_CNT12    0xffU
+#define V_RD_DATA_512B_FIFO_CNT12(x) ((x) << S_RD_DATA_512B_FIFO_CNT12)
+#define G_RD_DATA_512B_FIFO_CNT12(x) (((x) >> S_RD_DATA_512B_FIFO_CNT12) & M_RD_DATA_512B_FIFO_CNT12)
+
+#define S_RD_REQ_TAG_FIFO_CNT12    1
+#define M_RD_REQ_TAG_FIFO_CNT12    0xfU
+#define V_RD_REQ_TAG_FIFO_CNT12(x) ((x) << S_RD_REQ_TAG_FIFO_CNT12)
+#define G_RD_REQ_TAG_FIFO_CNT12(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT12) & M_RD_REQ_TAG_FIFO_CNT12)
+
+#define A_MA_TARGET_0_ARBITER_INTERFACE_INTERNAL_REG0 0xa40d
+
+#define S_WR_DATA_FSM0    23
+#define V_WR_DATA_FSM0(x) ((x) << S_WR_DATA_FSM0)
+#define F_WR_DATA_FSM0    V_WR_DATA_FSM0(1U)
+
+#define S_RD_DATA_FSM0    22
+#define V_RD_DATA_FSM0(x) ((x) << S_RD_DATA_FSM0)
+#define F_RD_DATA_FSM0    V_RD_DATA_FSM0(1U)
+
+#define S_TGT_CMD_FIFO_CNT0    19
+#define M_TGT_CMD_FIFO_CNT0    0x7U
+#define V_TGT_CMD_FIFO_CNT0(x) ((x) << S_TGT_CMD_FIFO_CNT0)
+#define G_TGT_CMD_FIFO_CNT0(x) (((x) >> S_TGT_CMD_FIFO_CNT0) & M_TGT_CMD_FIFO_CNT0)
+
+#define S_CLNT_NUM_FIFO_CNT0    16
+#define M_CLNT_NUM_FIFO_CNT0    0x7U
+#define V_CLNT_NUM_FIFO_CNT0(x) ((x) << S_CLNT_NUM_FIFO_CNT0)
+#define G_CLNT_NUM_FIFO_CNT0(x) (((x) >> S_CLNT_NUM_FIFO_CNT0) & M_CLNT_NUM_FIFO_CNT0)
+
+#define S_WR_CMD_TAG_FIFO_CNT_TGT0    8
+#define M_WR_CMD_TAG_FIFO_CNT_TGT0    0xffU
+#define V_WR_CMD_TAG_FIFO_CNT_TGT0(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT0)
+#define G_WR_CMD_TAG_FIFO_CNT_TGT0(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT0) & M_WR_CMD_TAG_FIFO_CNT_TGT0)
+
+#define S_WR_DATA_512B_FIFO_CNT_TGT0    0
+#define M_WR_DATA_512B_FIFO_CNT_TGT0    0xffU
+#define V_WR_DATA_512B_FIFO_CNT_TGT0(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT0)
+#define G_WR_DATA_512B_FIFO_CNT_TGT0(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT0) & M_WR_DATA_512B_FIFO_CNT_TGT0)
+
+#define A_MA_TARGET_1_ARBITER_INTERFACE_INTERNAL_REG0 0xa40e
+
+#define S_WR_DATA_FSM1    23
+#define V_WR_DATA_FSM1(x) ((x) << S_WR_DATA_FSM1)
+#define F_WR_DATA_FSM1    V_WR_DATA_FSM1(1U)
+
+#define S_RD_DATA_FSM1    22
+#define V_RD_DATA_FSM1(x) ((x) << S_RD_DATA_FSM1)
+#define F_RD_DATA_FSM1    V_RD_DATA_FSM1(1U)
+
+#define S_TGT_CMD_FIFO_CNT1    19
+#define M_TGT_CMD_FIFO_CNT1    0x7U
+#define V_TGT_CMD_FIFO_CNT1(x) ((x) << S_TGT_CMD_FIFO_CNT1)
+#define G_TGT_CMD_FIFO_CNT1(x) (((x) >> S_TGT_CMD_FIFO_CNT1) & M_TGT_CMD_FIFO_CNT1)
+
+#define S_CLNT_NUM_FIFO_CNT1    16
+#define M_CLNT_NUM_FIFO_CNT1    0x7U
+#define V_CLNT_NUM_FIFO_CNT1(x) ((x) << S_CLNT_NUM_FIFO_CNT1)
+#define G_CLNT_NUM_FIFO_CNT1(x) (((x) >> S_CLNT_NUM_FIFO_CNT1) & M_CLNT_NUM_FIFO_CNT1)
+
+#define S_WR_CMD_TAG_FIFO_CNT_TGT1    8
+#define M_WR_CMD_TAG_FIFO_CNT_TGT1    0xffU
+#define V_WR_CMD_TAG_FIFO_CNT_TGT1(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT1)
+#define G_WR_CMD_TAG_FIFO_CNT_TGT1(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT1) & M_WR_CMD_TAG_FIFO_CNT_TGT1)
+
+#define S_WR_DATA_512B_FIFO_CNT_TGT1    0
+#define M_WR_DATA_512B_FIFO_CNT_TGT1    0xffU
+#define V_WR_DATA_512B_FIFO_CNT_TGT1(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT1)
+#define G_WR_DATA_512B_FIFO_CNT_TGT1(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT1) & M_WR_DATA_512B_FIFO_CNT_TGT1)
+
+#define A_MA_TARGET_2_ARBITER_INTERFACE_INTERNAL_REG0 0xa40f
+
+#define S_WR_DATA_FSM2    23
+#define V_WR_DATA_FSM2(x) ((x) << S_WR_DATA_FSM2)
+#define F_WR_DATA_FSM2    V_WR_DATA_FSM2(1U)
+
+#define S_RD_DATA_FSM2    22
+#define V_RD_DATA_FSM2(x) ((x) << S_RD_DATA_FSM2)
+#define F_RD_DATA_FSM2    V_RD_DATA_FSM2(1U)
+
+#define S_TGT_CMD_FIFO_CNT2    19
+#define M_TGT_CMD_FIFO_CNT2    0x7U
+#define V_TGT_CMD_FIFO_CNT2(x) ((x) << S_TGT_CMD_FIFO_CNT2)
+#define G_TGT_CMD_FIFO_CNT2(x) (((x) >> S_TGT_CMD_FIFO_CNT2) & M_TGT_CMD_FIFO_CNT2)
+
+#define S_CLNT_NUM_FIFO_CNT2    16
+#define M_CLNT_NUM_FIFO_CNT2    0x7U
+#define V_CLNT_NUM_FIFO_CNT2(x) ((x) << S_CLNT_NUM_FIFO_CNT2)
+#define G_CLNT_NUM_FIFO_CNT2(x) (((x) >> S_CLNT_NUM_FIFO_CNT2) & M_CLNT_NUM_FIFO_CNT2)
+
+#define S_WR_CMD_TAG_FIFO_CNT_TGT2    8
+#define M_WR_CMD_TAG_FIFO_CNT_TGT2    0xffU
+#define V_WR_CMD_TAG_FIFO_CNT_TGT2(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT2)
+#define G_WR_CMD_TAG_FIFO_CNT_TGT2(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT2) & M_WR_CMD_TAG_FIFO_CNT_TGT2)
+
+#define S_WR_DATA_512B_FIFO_CNT_TGT2    0
+#define M_WR_DATA_512B_FIFO_CNT_TGT2    0xffU
+#define V_WR_DATA_512B_FIFO_CNT_TGT2(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT2)
+#define G_WR_DATA_512B_FIFO_CNT_TGT2(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT2) & M_WR_DATA_512B_FIFO_CNT_TGT2)
+
+#define A_MA_TARGET_3_ARBITER_INTERFACE_INTERNAL_REG0 0xa410
+
+#define S_WR_DATA_FSM3    23
+#define V_WR_DATA_FSM3(x) ((x) << S_WR_DATA_FSM3)
+#define F_WR_DATA_FSM3    V_WR_DATA_FSM3(1U)
+
+#define S_RD_DATA_FSM3    22
+#define V_RD_DATA_FSM3(x) ((x) << S_RD_DATA_FSM3)
+#define F_RD_DATA_FSM3    V_RD_DATA_FSM3(1U)
+
+#define S_TGT_CMD_FIFO_CNT3    19
+#define M_TGT_CMD_FIFO_CNT3    0x7U
+#define V_TGT_CMD_FIFO_CNT3(x) ((x) << S_TGT_CMD_FIFO_CNT3)
+#define G_TGT_CMD_FIFO_CNT3(x) (((x) >> S_TGT_CMD_FIFO_CNT3) & M_TGT_CMD_FIFO_CNT3)
+
+#define S_CLNT_NUM_FIFO_CNT3    16
+#define M_CLNT_NUM_FIFO_CNT3    0x7U
+#define V_CLNT_NUM_FIFO_CNT3(x) ((x) << S_CLNT_NUM_FIFO_CNT3)
+#define G_CLNT_NUM_FIFO_CNT3(x) (((x) >> S_CLNT_NUM_FIFO_CNT3) & M_CLNT_NUM_FIFO_CNT3)
+
+#define S_WR_CMD_TAG_FIFO_CNT_TGT3    8
+#define M_WR_CMD_TAG_FIFO_CNT_TGT3    0xffU
+#define V_WR_CMD_TAG_FIFO_CNT_TGT3(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT3)
+#define G_WR_CMD_TAG_FIFO_CNT_TGT3(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT3) & M_WR_CMD_TAG_FIFO_CNT_TGT3)
+
+#define S_WR_DATA_512B_FIFO_CNT_TGT    0
+#define M_WR_DATA_512B_FIFO_CNT_TGT    0xffU
+#define V_WR_DATA_512B_FIFO_CNT_TGT(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT)
+#define G_WR_DATA_512B_FIFO_CNT_TGT(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT) & M_WR_DATA_512B_FIFO_CNT_TGT)
+
+#define A_MA_SGE_THREAD_0_CLNT_EXP_RD_CYC_CNT_LO 0xa412
+#define A_MA_SGE_THREAD_1_CLNT_EXP_RD_CYC_CNT_LO 0xa413
+#define A_MA_ULP_TX_CLNT_EXP_RD_CYC_CNT_LO 0xa414
+#define A_MA_ULP_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa415
+#define A_MA_ULP_TX_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa416
+#define A_MA_TP_THREAD_0_CLNT_EXP_RD_CYC_CNT_LO 0xa417
+#define A_MA_TP_THREAD_1_CLNT_EXP_RD_CYC_CNT_LO 0xa418
+#define A_MA_LE_CLNT_EXP_RD_CYC_CNT_LO 0xa419
+#define A_MA_CIM_CLNT_EXP_RD_CYC_CNT_LO 0xa41a
+#define A_MA_PCIE_CLNT_EXP_RD_CYC_CNT_LO 0xa41b
+#define A_MA_PM_TX_CLNT_EXP_RD_CYC_CNT_LO 0xa41c
+#define A_MA_PM_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa41d
+#define A_MA_HMA_CLNT_EXP_RD_CYC_CNT_LO 0xa41e
+#define A_T6_MA_EDRAM0_WRDATA_CNT1 0xa800
+#define A_T6_MA_EDRAM0_WRDATA_CNT0 0xa801
+#define A_T6_MA_EDRAM1_WRDATA_CNT1 0xa802
+#define A_T6_MA_EDRAM1_WRDATA_CNT0 0xa803
+#define A_T6_MA_EXT_MEMORY0_WRDATA_CNT1 0xa804
+#define A_T6_MA_EXT_MEMORY0_WRDATA_CNT0 0xa805
+#define A_T6_MA_HOST_MEMORY_WRDATA_CNT1 0xa806
+#define A_T6_MA_HOST_MEMORY_WRDATA_CNT0 0xa807
+#define A_T6_MA_EXT_MEMORY1_WRDATA_CNT1 0xa808
+#define A_T6_MA_EXT_MEMORY1_WRDATA_CNT0 0xa809
+#define A_T6_MA_EDRAM0_RDDATA_CNT1 0xa80a
+#define A_T6_MA_EDRAM0_RDDATA_CNT0 0xa80b
+#define A_T6_MA_EDRAM1_RDDATA_CNT1 0xa80c
+#define A_T6_MA_EDRAM1_RDDATA_CNT0 0xa80d
+#define A_T6_MA_EXT_MEMORY0_RDDATA_CNT1 0xa80e
+#define A_T6_MA_EXT_MEMORY0_RDDATA_CNT0 0xa80f
+#define A_T6_MA_HOST_MEMORY_RDDATA_CNT1 0xa810
+#define A_T6_MA_HOST_MEMORY_RDDATA_CNT0 0xa811
+#define A_T6_MA_EXT_MEMORY1_RDDATA_CNT1 0xa812
+#define A_T6_MA_EXT_MEMORY1_RDDATA_CNT0 0xa813
+#define A_MA_SGE_THREAD_0_CLNT_ACT_WR_CYC_CNT_HI 0xac00
+#define A_MA_SGE_THREAD_0_CLNT_ACT_WR_CYC_CNT_LO 0xac01
+#define A_MA_SGE_THREAD_1_CLNT_ACT_WR_CYC_CNT_HI 0xac02
+#define A_MA_SGE_THREAD_1_CLNT_ACT_WR_CYC_CNT_LO 0xac03
+#define A_MA_ULP_TX_CLNT_ACT_WR_CYC_CNT_HI 0xac04
+#define A_MA_ULP_TX_CLNT_ACT_WR_CYC_CNT_LO 0xac05
+#define A_MA_ULP_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac06
+#define A_MA_ULP_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac07
+#define A_MA_ULP_TX_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac08
+#define A_MA_ULP_TX_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac09
+#define A_MA_TP_THREAD_0_CLNT_ACT_WR_CYC_CNT_HI 0xac0a
+#define A_MA_TP_THREAD_0_CLNT_ACT_WR_CYC_CNT_LO 0xac0b
+#define A_MA_TP_THREAD_1_CLNT_ACT_WR_CYC_CNT_HI 0xac0c
+#define A_MA_TP_THREAD_1_CLNT_ACT_WR_CYC_CNT_LO 0xac0d
+#define A_MA_LE_CLNT_ACT_WR_CYC_CNT_HI 0xac0e
+#define A_MA_LE_CLNT_ACT_WR_CYC_CNT_LO 0xac0f
+#define A_MA_CIM_CLNT_ACT_WR_CYC_CNT_HI 0xac10
+#define A_MA_CIM_CLNT_ACT_WR_CYC_CNT_LO 0xac11
+#define A_MA_PCIE_CLNT_ACT_WR_CYC_CNT_HI 0xac12
+#define A_MA_PCIE_CLNT_ACT_WR_CYC_CNT_LO 0xac13
+#define A_MA_PM_TX_CLNT_ACT_WR_CYC_CNT_HI 0xac14
+#define A_MA_PM_TX_CLNT_ACT_WR_CYC_CNT_LO 0xac15
+#define A_MA_PM_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac16
+#define A_MA_PM_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac17
+#define A_MA_HMA_CLNT_ACT_WR_CYC_CNT_HI 0xac18
+#define A_MA_HMA_CLNT_ACT_WR_CYC_CNT_LO 0xac19
+#define A_MA_SGE_THREAD_0_CLNT_WR_REQ_CNT 0xb000
+#define A_MA_SGE_THREAD_1_CLNT_WR_REQ_CNT 0xb001
+#define A_MA_ULP_TX_CLNT_WR_REQ_CNT 0xb002
+#define A_MA_ULP_RX_CLNT_WR_REQ_CNT 0xb003
+#define A_MA_ULP_TX_RX_CLNT_WR_REQ_CNT 0xb004
+#define A_MA_TP_THREAD_0_CLNT_WR_REQ_CNT 0xb005
+#define A_MA_TP_THREAD_1_CLNT_WR_REQ_CNT 0xb006
+#define A_MA_LE_CLNT_WR_REQ_CNT 0xb007
+#define A_MA_CIM_CLNT_WR_REQ_CNT 0xb008
+#define A_MA_PCIE_CLNT_WR_REQ_CNT 0xb009
+#define A_MA_PM_TX_CLNT_WR_REQ_CNT 0xb00a
+#define A_MA_PM_RX_CLNT_WR_REQ_CNT 0xb00b
+#define A_MA_HMA_CLNT_WR_REQ_CNT 0xb00c
+#define A_MA_SGE_THREAD_0_CLNT_RD_REQ_CNT 0xb00d
+#define A_MA_SGE_THREAD_1_CLNT_RD_REQ_CNT 0xb00e
+#define A_MA_ULP_TX_CLNT_RD_REQ_CNT 0xb00f
+#define A_MA_ULP_RX_CLNT_RD_REQ_CNT 0xb010
+#define A_MA_ULP_TX_RX_CLNT_RD_REQ_CNT 0xb011
+#define A_MA_TP_THREAD_0_CLNT_RD_REQ_CNT 0xb012
+#define A_MA_TP_THREAD_1_CLNT_RD_REQ_CNT 0xb013
+#define A_MA_LE_CLNT_RD_REQ_CNT 0xb014
+#define A_MA_CIM_CLNT_RD_REQ_CNT 0xb015
+#define A_MA_PCIE_CLNT_RD_REQ_CNT 0xb016
+#define A_MA_PM_TX_CLNT_RD_REQ_CNT 0xb017
+#define A_MA_PM_RX_CLNT_RD_REQ_CNT 0xb018
+#define A_MA_HMA_CLNT_RD_REQ_CNT 0xb019
+#define A_MA_SGE_THREAD_0_CLNT_EXP_RD_CYC_CNT_HI 0xb400
+#define A_MA_SGE_THREAD_1_CLNT_EXP_RD_CYC_CNT_HI 0xb401
+#define A_MA_ULP_TX_CLNT_EXP_RD_CYC_CNT_HI 0xb402
+#define A_MA_ULP_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb403
+#define A_MA_ULP_TX_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb404
+#define A_MA_TP_THREAD_0_CLNT_EXP_RD_CYC_CNT_HI 0xb405
+#define A_MA_TP_THREAD_1_CLNT_EXP_RD_CYC_CNT_HI 0xb406
+#define A_MA_LE_CLNT_EXP_RD_CYC_CNT_HI 0xb407
+#define A_MA_CIM_CLNT_EXP_RD_CYC_CNT_HI 0xb408
+#define A_MA_PCIE_CLNT_EXP_RD_CYC_CNT_HI 0xb409
+#define A_MA_PM_TX_CLNT_EXP_RD_CYC_CNT_HI 0xb40a
+#define A_MA_PM_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb40b
+#define A_MA_HMA_CLNT_EXP_RD_CYC_CNT_HI 0xb40c
+#define A_MA_SGE_THREAD_0_CLNT_EXP_WR_CYC_CNT_HI 0xb40d
+#define A_MA_SGE_THREAD_1_CLNT_EXP_WR_CYC_CNT_HI 0xb40e
+#define A_MA_ULP_TX_CLNT_EXP_WR_CYC_CNT_HI 0xb40f
+#define A_MA_ULP_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb410
+#define A_MA_ULP_TX_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb411
+#define A_MA_TP_THREAD_0_CLNT_EXP_WR_CYC_CNT_HI 0xb412
+#define A_MA_TP_THREAD_1_CLNT_EXP_WR_CYC_CNT_HI 0xb413
+#define A_MA_LE_CLNT_EXP_WR_CYC_CNT_HI 0xb414
+#define A_MA_CIM_CLNT_EXP_WR_CYC_CNT_HI 0xb415
+#define A_MA_PCIE_CLNT_EXP_WR_CYC_CNT_HI 0xb416
+#define A_MA_PM_TX_CLNT_EXP_WR_CYC_CNT_HI 0xb417
+#define A_MA_PM_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb418
+#define A_MA_HMA_CLNT_EXP_WR_CYC_CNT_HI 0xb419
+#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG1 0xe400
+
+#define S_WR_DATA_EXT_FIFO_CNT0    30
+#define M_WR_DATA_EXT_FIFO_CNT0    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT0(x) ((x) << S_WR_DATA_EXT_FIFO_CNT0)
+#define G_WR_DATA_EXT_FIFO_CNT0(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT0) & M_WR_DATA_EXT_FIFO_CNT0)
+
+#define S_WR_CMD_TAG_FIFO_CNT0    26
+#define M_WR_CMD_TAG_FIFO_CNT0    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT0(x) ((x) << S_WR_CMD_TAG_FIFO_CNT0)
+#define G_WR_CMD_TAG_FIFO_CNT0(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT0) & M_WR_CMD_TAG_FIFO_CNT0)
+
+#define S_WR_DATA_512B_FIFO_CNT0    18
+#define M_WR_DATA_512B_FIFO_CNT0    0xffU
+#define V_WR_DATA_512B_FIFO_CNT0(x) ((x) << S_WR_DATA_512B_FIFO_CNT0)
+#define G_WR_DATA_512B_FIFO_CNT0(x) (((x) >> S_WR_DATA_512B_FIFO_CNT0) & M_WR_DATA_512B_FIFO_CNT0)
+
+#define S_RD_DATA_ALIGN_FSM0    17
+#define V_RD_DATA_ALIGN_FSM0(x) ((x) << S_RD_DATA_ALIGN_FSM0)
+#define F_RD_DATA_ALIGN_FSM0    V_RD_DATA_ALIGN_FSM0(1U)
+
+#define S_RD_DATA_FETCH_FSM0    16
+#define V_RD_DATA_FETCH_FSM0(x) ((x) << S_RD_DATA_FETCH_FSM0)
+#define F_RD_DATA_FETCH_FSM0    V_RD_DATA_FETCH_FSM0(1U)
+
+#define S_COHERENCY_TX_FSM0    15
+#define V_COHERENCY_TX_FSM0(x) ((x) << S_COHERENCY_TX_FSM0)
+#define F_COHERENCY_TX_FSM0    V_COHERENCY_TX_FSM0(1U)
+
+#define S_COHERENCY_RX_FSM0    14
+#define V_COHERENCY_RX_FSM0(x) ((x) << S_COHERENCY_RX_FSM0)
+#define F_COHERENCY_RX_FSM0    V_COHERENCY_RX_FSM0(1U)
+
+#define S_ARB_REQ_FSM0    13
+#define V_ARB_REQ_FSM0(x) ((x) << S_ARB_REQ_FSM0)
+#define F_ARB_REQ_FSM0    V_ARB_REQ_FSM0(1U)
+
+#define S_CMD_SPLIT_FSM0    10
+#define M_CMD_SPLIT_FSM0    0x7U
+#define V_CMD_SPLIT_FSM0(x) ((x) << S_CMD_SPLIT_FSM0)
+#define G_CMD_SPLIT_FSM0(x) (((x) >> S_CMD_SPLIT_FSM0) & M_CMD_SPLIT_FSM0)
+
+#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG1 0xe420
+
+#define S_WR_DATA_EXT_FIFO_CNT1    30
+#define M_WR_DATA_EXT_FIFO_CNT1    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT1(x) ((x) << S_WR_DATA_EXT_FIFO_CNT1)
+#define G_WR_DATA_EXT_FIFO_CNT1(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT1) & M_WR_DATA_EXT_FIFO_CNT1)
+
+#define S_WR_CMD_TAG_FIFO_CNT1    26
+#define M_WR_CMD_TAG_FIFO_CNT1    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT1(x) ((x) << S_WR_CMD_TAG_FIFO_CNT1)
+#define G_WR_CMD_TAG_FIFO_CNT1(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT1) & M_WR_CMD_TAG_FIFO_CNT1)
+
+#define S_WR_DATA_512B_FIFO_CNT1    18
+#define M_WR_DATA_512B_FIFO_CNT1    0xffU
+#define V_WR_DATA_512B_FIFO_CNT1(x) ((x) << S_WR_DATA_512B_FIFO_CNT1)
+#define G_WR_DATA_512B_FIFO_CNT1(x) (((x) >> S_WR_DATA_512B_FIFO_CNT1) & M_WR_DATA_512B_FIFO_CNT1)
+
+#define S_RD_DATA_ALIGN_FSM1    17
+#define V_RD_DATA_ALIGN_FSM1(x) ((x) << S_RD_DATA_ALIGN_FSM1)
+#define F_RD_DATA_ALIGN_FSM1    V_RD_DATA_ALIGN_FSM1(1U)
+
+#define S_RD_DATA_FETCH_FSM1    16
+#define V_RD_DATA_FETCH_FSM1(x) ((x) << S_RD_DATA_FETCH_FSM1)
+#define F_RD_DATA_FETCH_FSM1    V_RD_DATA_FETCH_FSM1(1U)
+
+#define S_COHERENCY_TX_FSM1    15
+#define V_COHERENCY_TX_FSM1(x) ((x) << S_COHERENCY_TX_FSM1)
+#define F_COHERENCY_TX_FSM1    V_COHERENCY_TX_FSM1(1U)
+
+#define S_COHERENCY_RX_FSM1    14
+#define V_COHERENCY_RX_FSM1(x) ((x) << S_COHERENCY_RX_FSM1)
+#define F_COHERENCY_RX_FSM1    V_COHERENCY_RX_FSM1(1U)
+
+#define S_ARB_REQ_FSM1    13
+#define V_ARB_REQ_FSM1(x) ((x) << S_ARB_REQ_FSM1)
+#define F_ARB_REQ_FSM1    V_ARB_REQ_FSM1(1U)
+
+#define S_CMD_SPLIT_FSM1    10
+#define M_CMD_SPLIT_FSM1    0x7U
+#define V_CMD_SPLIT_FSM1(x) ((x) << S_CMD_SPLIT_FSM1)
+#define G_CMD_SPLIT_FSM1(x) (((x) >> S_CMD_SPLIT_FSM1) & M_CMD_SPLIT_FSM1)
+
+#define A_MA_ULP_TX_CLIENT_INTERFACE_INTERNAL_REG1 0xe440
+
+#define S_WR_DATA_EXT_FIFO_CNT2    30
+#define M_WR_DATA_EXT_FIFO_CNT2    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT2(x) ((x) << S_WR_DATA_EXT_FIFO_CNT2)
+#define G_WR_DATA_EXT_FIFO_CNT2(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT2) & M_WR_DATA_EXT_FIFO_CNT2)
+
+#define S_WR_CMD_TAG_FIFO_CNT2    26
+#define M_WR_CMD_TAG_FIFO_CNT2    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT2(x) ((x) << S_WR_CMD_TAG_FIFO_CNT2)
+#define G_WR_CMD_TAG_FIFO_CNT2(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT2) & M_WR_CMD_TAG_FIFO_CNT2)
+
+#define S_WR_DATA_512B_FIFO_CNT2    18
+#define M_WR_DATA_512B_FIFO_CNT2    0xffU
+#define V_WR_DATA_512B_FIFO_CNT2(x) ((x) << S_WR_DATA_512B_FIFO_CNT2)
+#define G_WR_DATA_512B_FIFO_CNT2(x) (((x) >> S_WR_DATA_512B_FIFO_CNT2) & M_WR_DATA_512B_FIFO_CNT2)
+
+#define S_RD_DATA_ALIGN_FSM2    17
+#define V_RD_DATA_ALIGN_FSM2(x) ((x) << S_RD_DATA_ALIGN_FSM2)
+#define F_RD_DATA_ALIGN_FSM2    V_RD_DATA_ALIGN_FSM2(1U)
+
+#define S_RD_DATA_FETCH_FSM2    16
+#define V_RD_DATA_FETCH_FSM2(x) ((x) << S_RD_DATA_FETCH_FSM2)
+#define F_RD_DATA_FETCH_FSM2    V_RD_DATA_FETCH_FSM2(1U)
+
+#define S_COHERENCY_TX_FSM2    15
+#define V_COHERENCY_TX_FSM2(x) ((x) << S_COHERENCY_TX_FSM2)
+#define F_COHERENCY_TX_FSM2    V_COHERENCY_TX_FSM2(1U)
+
+#define S_COHERENCY_RX_FSM2    14
+#define V_COHERENCY_RX_FSM2(x) ((x) << S_COHERENCY_RX_FSM2)
+#define F_COHERENCY_RX_FSM2    V_COHERENCY_RX_FSM2(1U)
+
+#define S_ARB_REQ_FSM2    13
+#define V_ARB_REQ_FSM2(x) ((x) << S_ARB_REQ_FSM2)
+#define F_ARB_REQ_FSM2    V_ARB_REQ_FSM2(1U)
+
+#define S_CMD_SPLIT_FSM2    10
+#define M_CMD_SPLIT_FSM2    0x7U
+#define V_CMD_SPLIT_FSM2(x) ((x) << S_CMD_SPLIT_FSM2)
+#define G_CMD_SPLIT_FSM2(x) (((x) >> S_CMD_SPLIT_FSM2) & M_CMD_SPLIT_FSM2)
+
+#define A_MA_ULP_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe460
+
+#define S_WR_DATA_EXT_FIFO_CNT3    30
+#define M_WR_DATA_EXT_FIFO_CNT3    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT3(x) ((x) << S_WR_DATA_EXT_FIFO_CNT3)
+#define G_WR_DATA_EXT_FIFO_CNT3(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT3) & M_WR_DATA_EXT_FIFO_CNT3)
+
+#define S_WR_CMD_TAG_FIFO_CNT3    26
+#define M_WR_CMD_TAG_FIFO_CNT3    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT3(x) ((x) << S_WR_CMD_TAG_FIFO_CNT3)
+#define G_WR_CMD_TAG_FIFO_CNT3(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT3) & M_WR_CMD_TAG_FIFO_CNT3)
+
+#define S_WR_DATA_512B_FIFO_CNT3    18
+#define M_WR_DATA_512B_FIFO_CNT3    0xffU
+#define V_WR_DATA_512B_FIFO_CNT3(x) ((x) << S_WR_DATA_512B_FIFO_CNT3)
+#define G_WR_DATA_512B_FIFO_CNT3(x) (((x) >> S_WR_DATA_512B_FIFO_CNT3) & M_WR_DATA_512B_FIFO_CNT3)
+
+#define S_RD_DATA_ALIGN_FSM3    17
+#define V_RD_DATA_ALIGN_FSM3(x) ((x) << S_RD_DATA_ALIGN_FSM3)
+#define F_RD_DATA_ALIGN_FSM3    V_RD_DATA_ALIGN_FSM3(1U)
+
+#define S_RD_DATA_FETCH_FSM3    16
+#define V_RD_DATA_FETCH_FSM3(x) ((x) << S_RD_DATA_FETCH_FSM3)
+#define F_RD_DATA_FETCH_FSM3    V_RD_DATA_FETCH_FSM3(1U)
+
+#define S_COHERENCY_TX_FSM3    15
+#define V_COHERENCY_TX_FSM3(x) ((x) << S_COHERENCY_TX_FSM3)
+#define F_COHERENCY_TX_FSM3    V_COHERENCY_TX_FSM3(1U)
+
+#define S_COHERENCY_RX_FSM3    14
+#define V_COHERENCY_RX_FSM3(x) ((x) << S_COHERENCY_RX_FSM3)
+#define F_COHERENCY_RX_FSM3    V_COHERENCY_RX_FSM3(1U)
+
+#define S_ARB_REQ_FSM3    13
+#define V_ARB_REQ_FSM3(x) ((x) << S_ARB_REQ_FSM3)
+#define F_ARB_REQ_FSM3    V_ARB_REQ_FSM3(1U)
+
+#define S_CMD_SPLIT_FSM3    10
+#define M_CMD_SPLIT_FSM3    0x7U
+#define V_CMD_SPLIT_FSM3(x) ((x) << S_CMD_SPLIT_FSM3)
+#define G_CMD_SPLIT_FSM3(x) (((x) >> S_CMD_SPLIT_FSM3) & M_CMD_SPLIT_FSM3)
+
+#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe480
+
+#define S_WR_DATA_EXT_FIFO_CNT4    30
+#define M_WR_DATA_EXT_FIFO_CNT4    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT4(x) ((x) << S_WR_DATA_EXT_FIFO_CNT4)
+#define G_WR_DATA_EXT_FIFO_CNT4(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT4) & M_WR_DATA_EXT_FIFO_CNT4)
+
+#define S_WR_CMD_TAG_FIFO_CNT4    26
+#define M_WR_CMD_TAG_FIFO_CNT4    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT4(x) ((x) << S_WR_CMD_TAG_FIFO_CNT4)
+#define G_WR_CMD_TAG_FIFO_CNT4(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT4) & M_WR_CMD_TAG_FIFO_CNT4)
+
+#define S_WR_DATA_512B_FIFO_CNT4    18
+#define M_WR_DATA_512B_FIFO_CNT4    0xffU
+#define V_WR_DATA_512B_FIFO_CNT4(x) ((x) << S_WR_DATA_512B_FIFO_CNT4)
+#define G_WR_DATA_512B_FIFO_CNT4(x) (((x) >> S_WR_DATA_512B_FIFO_CNT4) & M_WR_DATA_512B_FIFO_CNT4)
+
+#define S_RD_DATA_ALIGN_FSM4    17
+#define V_RD_DATA_ALIGN_FSM4(x) ((x) << S_RD_DATA_ALIGN_FSM4)
+#define F_RD_DATA_ALIGN_FSM4    V_RD_DATA_ALIGN_FSM4(1U)
+
+#define S_RD_DATA_FETCH_FSM4    16
+#define V_RD_DATA_FETCH_FSM4(x) ((x) << S_RD_DATA_FETCH_FSM4)
+#define F_RD_DATA_FETCH_FSM4    V_RD_DATA_FETCH_FSM4(1U)
+
+#define S_COHERENCY_TX_FSM4    15
+#define V_COHERENCY_TX_FSM4(x) ((x) << S_COHERENCY_TX_FSM4)
+#define F_COHERENCY_TX_FSM4    V_COHERENCY_TX_FSM4(1U)
+
+#define S_COHERENCY_RX_FSM4    14
+#define V_COHERENCY_RX_FSM4(x) ((x) << S_COHERENCY_RX_FSM4)
+#define F_COHERENCY_RX_FSM4    V_COHERENCY_RX_FSM4(1U)
+
+#define S_ARB_REQ_FSM4    13
+#define V_ARB_REQ_FSM4(x) ((x) << S_ARB_REQ_FSM4)
+#define F_ARB_REQ_FSM4    V_ARB_REQ_FSM4(1U)
+
+#define S_CMD_SPLIT_FSM4    10
+#define M_CMD_SPLIT_FSM4    0x7U
+#define V_CMD_SPLIT_FSM4(x) ((x) << S_CMD_SPLIT_FSM4)
+#define G_CMD_SPLIT_FSM4(x) (((x) >> S_CMD_SPLIT_FSM4) & M_CMD_SPLIT_FSM4)
+
+#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG1 0xe4a0
+
+#define S_WR_DATA_EXT_FIFO_CNT5    30
+#define M_WR_DATA_EXT_FIFO_CNT5    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT5(x) ((x) << S_WR_DATA_EXT_FIFO_CNT5)
+#define G_WR_DATA_EXT_FIFO_CNT5(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT5) & M_WR_DATA_EXT_FIFO_CNT5)
+
+#define S_WR_CMD_TAG_FIFO_CNT5    26
+#define M_WR_CMD_TAG_FIFO_CNT5    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT5(x) ((x) << S_WR_CMD_TAG_FIFO_CNT5)
+#define G_WR_CMD_TAG_FIFO_CNT5(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT5) & M_WR_CMD_TAG_FIFO_CNT5)
+
+#define S_WR_DATA_512B_FIFO_CNT5    18
+#define M_WR_DATA_512B_FIFO_CNT5    0xffU
+#define V_WR_DATA_512B_FIFO_CNT5(x) ((x) << S_WR_DATA_512B_FIFO_CNT5)
+#define G_WR_DATA_512B_FIFO_CNT5(x) (((x) >> S_WR_DATA_512B_FIFO_CNT5) & M_WR_DATA_512B_FIFO_CNT5)
+
+#define S_RD_DATA_ALIGN_FSM5    17
+#define V_RD_DATA_ALIGN_FSM5(x) ((x) << S_RD_DATA_ALIGN_FSM5)
+#define F_RD_DATA_ALIGN_FSM5    V_RD_DATA_ALIGN_FSM5(1U)
+
+#define S_RD_DATA_FETCH_FSM5    16
+#define V_RD_DATA_FETCH_FSM5(x) ((x) << S_RD_DATA_FETCH_FSM5)
+#define F_RD_DATA_FETCH_FSM5    V_RD_DATA_FETCH_FSM5(1U)
+
+#define S_COHERENCY_TX_FSM5    15
+#define V_COHERENCY_TX_FSM5(x) ((x) << S_COHERENCY_TX_FSM5)
+#define F_COHERENCY_TX_FSM5    V_COHERENCY_TX_FSM5(1U)
+
+#define S_COHERENCY_RX_FSM5    14
+#define V_COHERENCY_RX_FSM5(x) ((x) << S_COHERENCY_RX_FSM5)
+#define F_COHERENCY_RX_FSM5    V_COHERENCY_RX_FSM5(1U)
+
+#define S_ARB_REQ_FSM5    13
+#define V_ARB_REQ_FSM5(x) ((x) << S_ARB_REQ_FSM5)
+#define F_ARB_REQ_FSM5    V_ARB_REQ_FSM5(1U)
+
+#define S_CMD_SPLIT_FSM5    10
+#define M_CMD_SPLIT_FSM5    0x7U
+#define V_CMD_SPLIT_FSM5(x) ((x) << S_CMD_SPLIT_FSM5)
+#define G_CMD_SPLIT_FSM5(x) (((x) >> S_CMD_SPLIT_FSM5) & M_CMD_SPLIT_FSM5)
+
+#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG1 0xe4c0
+
+#define S_WR_DATA_EXT_FIFO_CNT6    30
+#define M_WR_DATA_EXT_FIFO_CNT6    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT6(x) ((x) << S_WR_DATA_EXT_FIFO_CNT6)
+#define G_WR_DATA_EXT_FIFO_CNT6(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT6) & M_WR_DATA_EXT_FIFO_CNT6)
+
+#define S_WR_CMD_TAG_FIFO_CNT6    26
+#define M_WR_CMD_TAG_FIFO_CNT6    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT6(x) ((x) << S_WR_CMD_TAG_FIFO_CNT6)
+#define G_WR_CMD_TAG_FIFO_CNT6(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT6) & M_WR_CMD_TAG_FIFO_CNT6)
+
+#define S_WR_DATA_512B_FIFO_CNT6    18
+#define M_WR_DATA_512B_FIFO_CNT6    0xffU
+#define V_WR_DATA_512B_FIFO_CNT6(x) ((x) << S_WR_DATA_512B_FIFO_CNT6)
+#define G_WR_DATA_512B_FIFO_CNT6(x) (((x) >> S_WR_DATA_512B_FIFO_CNT6) & M_WR_DATA_512B_FIFO_CNT6)
+
+#define S_RD_DATA_ALIGN_FSM6    17
+#define V_RD_DATA_ALIGN_FSM6(x) ((x) << S_RD_DATA_ALIGN_FSM6)
+#define F_RD_DATA_ALIGN_FSM6    V_RD_DATA_ALIGN_FSM6(1U)
+
+#define S_RD_DATA_FETCH_FSM6    16
+#define V_RD_DATA_FETCH_FSM6(x) ((x) << S_RD_DATA_FETCH_FSM6)
+#define F_RD_DATA_FETCH_FSM6    V_RD_DATA_FETCH_FSM6(1U)
+
+#define S_COHERENCY_TX_FSM6    15
+#define V_COHERENCY_TX_FSM6(x) ((x) << S_COHERENCY_TX_FSM6)
+#define F_COHERENCY_TX_FSM6    V_COHERENCY_TX_FSM6(1U)
+
+#define S_COHERENCY_RX_FSM6    14
+#define V_COHERENCY_RX_FSM6(x) ((x) << S_COHERENCY_RX_FSM6)
+#define F_COHERENCY_RX_FSM6    V_COHERENCY_RX_FSM6(1U)
+
+#define S_ARB_REQ_FSM6    13
+#define V_ARB_REQ_FSM6(x) ((x) << S_ARB_REQ_FSM6)
+#define F_ARB_REQ_FSM6    V_ARB_REQ_FSM6(1U)
+
+#define S_CMD_SPLIT_FSM6    10
+#define M_CMD_SPLIT_FSM6    0x7U
+#define V_CMD_SPLIT_FSM6(x) ((x) << S_CMD_SPLIT_FSM6)
+#define G_CMD_SPLIT_FSM6(x) (((x) >> S_CMD_SPLIT_FSM6) & M_CMD_SPLIT_FSM6)
+
+#define A_MA_LE_CLIENT_INTERFACE_INTERNAL_REG1 0xe4e0
+
+#define S_WR_DATA_EXT_FIFO_CNT7    30
+#define M_WR_DATA_EXT_FIFO_CNT7    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT7(x) ((x) << S_WR_DATA_EXT_FIFO_CNT7)
+#define G_WR_DATA_EXT_FIFO_CNT7(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT7) & M_WR_DATA_EXT_FIFO_CNT7)
+
+#define S_WR_CMD_TAG_FIFO_CNT7    26
+#define M_WR_CMD_TAG_FIFO_CNT7    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT7(x) ((x) << S_WR_CMD_TAG_FIFO_CNT7)
+#define G_WR_CMD_TAG_FIFO_CNT7(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT7) & M_WR_CMD_TAG_FIFO_CNT7)
+
+#define S_WR_DATA_512B_FIFO_CNT7    18
+#define M_WR_DATA_512B_FIFO_CNT7    0xffU
+#define V_WR_DATA_512B_FIFO_CNT7(x) ((x) << S_WR_DATA_512B_FIFO_CNT7)
+#define G_WR_DATA_512B_FIFO_CNT7(x) (((x) >> S_WR_DATA_512B_FIFO_CNT7) & M_WR_DATA_512B_FIFO_CNT7)
+
+#define S_RD_DATA_ALIGN_FSM7    17
+#define V_RD_DATA_ALIGN_FSM7(x) ((x) << S_RD_DATA_ALIGN_FSM7)
+#define F_RD_DATA_ALIGN_FSM7    V_RD_DATA_ALIGN_FSM7(1U)
+
+#define S_RD_DATA_FETCH_FSM7    16
+#define V_RD_DATA_FETCH_FSM7(x) ((x) << S_RD_DATA_FETCH_FSM7)
+#define F_RD_DATA_FETCH_FSM7    V_RD_DATA_FETCH_FSM7(1U)
+
+#define S_COHERENCY_TX_FSM7    15
+#define V_COHERENCY_TX_FSM7(x) ((x) << S_COHERENCY_TX_FSM7)
+#define F_COHERENCY_TX_FSM7    V_COHERENCY_TX_FSM7(1U)
+
+#define S_COHERENCY_RX_FSM7    14
+#define V_COHERENCY_RX_FSM7(x) ((x) << S_COHERENCY_RX_FSM7)
+#define F_COHERENCY_RX_FSM7    V_COHERENCY_RX_FSM7(1U)
+
+#define S_ARB_REQ_FSM7    13
+#define V_ARB_REQ_FSM7(x) ((x) << S_ARB_REQ_FSM7)
+#define F_ARB_REQ_FSM7    V_ARB_REQ_FSM7(1U)
+
+#define S_CMD_SPLIT_FSM7    10
+#define M_CMD_SPLIT_FSM7    0x7U
+#define V_CMD_SPLIT_FSM7(x) ((x) << S_CMD_SPLIT_FSM7)
+#define G_CMD_SPLIT_FSM7(x) (((x) >> S_CMD_SPLIT_FSM7) & M_CMD_SPLIT_FSM7)
+
+#define A_MA_CIM_CLIENT_INTERFACE_INTERNAL_REG1 0xe500
+
+#define S_WR_DATA_EXT_FIFO_CNT8    30
+#define M_WR_DATA_EXT_FIFO_CNT8    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT8(x) ((x) << S_WR_DATA_EXT_FIFO_CNT8)
+#define G_WR_DATA_EXT_FIFO_CNT8(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT8) & M_WR_DATA_EXT_FIFO_CNT8)
+
+#define S_WR_CMD_TAG_FIFO_CNT8    26
+#define M_WR_CMD_TAG_FIFO_CNT8    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT8(x) ((x) << S_WR_CMD_TAG_FIFO_CNT8)
+#define G_WR_CMD_TAG_FIFO_CNT8(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT8) & M_WR_CMD_TAG_FIFO_CNT8)
+
+#define S_WR_DATA_512B_FIFO_CNT8    18
+#define M_WR_DATA_512B_FIFO_CNT8    0xffU
+#define V_WR_DATA_512B_FIFO_CNT8(x) ((x) << S_WR_DATA_512B_FIFO_CNT8)
+#define G_WR_DATA_512B_FIFO_CNT8(x) (((x) >> S_WR_DATA_512B_FIFO_CNT8) & M_WR_DATA_512B_FIFO_CNT8)
+
+#define S_RD_DATA_ALIGN_FSM8    17
+#define V_RD_DATA_ALIGN_FSM8(x) ((x) << S_RD_DATA_ALIGN_FSM8)
+#define F_RD_DATA_ALIGN_FSM8    V_RD_DATA_ALIGN_FSM8(1U)
+
+#define S_RD_DATA_FETCH_FSM8    16
+#define V_RD_DATA_FETCH_FSM8(x) ((x) << S_RD_DATA_FETCH_FSM8)
+#define F_RD_DATA_FETCH_FSM8    V_RD_DATA_FETCH_FSM8(1U)
+
+#define S_COHERENCY_TX_FSM8    15
+#define V_COHERENCY_TX_FSM8(x) ((x) << S_COHERENCY_TX_FSM8)
+#define F_COHERENCY_TX_FSM8    V_COHERENCY_TX_FSM8(1U)
+
+#define S_COHERENCY_RX_FSM8    14
+#define V_COHERENCY_RX_FSM8(x) ((x) << S_COHERENCY_RX_FSM8)
+#define F_COHERENCY_RX_FSM8    V_COHERENCY_RX_FSM8(1U)
+
+#define S_ARB_REQ_FSM8    13
+#define V_ARB_REQ_FSM8(x) ((x) << S_ARB_REQ_FSM8)
+#define F_ARB_REQ_FSM8    V_ARB_REQ_FSM8(1U)
+
+#define S_CMD_SPLIT_FSM8    10
+#define M_CMD_SPLIT_FSM8    0x7U
+#define V_CMD_SPLIT_FSM8(x) ((x) << S_CMD_SPLIT_FSM8)
+#define G_CMD_SPLIT_FSM8(x) (((x) >> S_CMD_SPLIT_FSM8) & M_CMD_SPLIT_FSM8)
+
+#define A_MA_PCIE_CLIENT_INTERFACE_INTERNAL_REG1 0xe520
+
+#define S_WR_DATA_EXT_FIFO_CNT9    30
+#define M_WR_DATA_EXT_FIFO_CNT9    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT9(x) ((x) << S_WR_DATA_EXT_FIFO_CNT9)
+#define G_WR_DATA_EXT_FIFO_CNT9(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT9) & M_WR_DATA_EXT_FIFO_CNT9)
+
+#define S_WR_CMD_TAG_FIFO_CNT9    26
+#define M_WR_CMD_TAG_FIFO_CNT9    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT9(x) ((x) << S_WR_CMD_TAG_FIFO_CNT9)
+#define G_WR_CMD_TAG_FIFO_CNT9(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT9) & M_WR_CMD_TAG_FIFO_CNT9)
+
+#define S_WR_DATA_512B_FIFO_CNT9    18
+#define M_WR_DATA_512B_FIFO_CNT9    0xffU
+#define V_WR_DATA_512B_FIFO_CNT9(x) ((x) << S_WR_DATA_512B_FIFO_CNT9)
+#define G_WR_DATA_512B_FIFO_CNT9(x) (((x) >> S_WR_DATA_512B_FIFO_CNT9) & M_WR_DATA_512B_FIFO_CNT9)
+
+#define S_RD_DATA_ALIGN_FSM9    17
+#define V_RD_DATA_ALIGN_FSM9(x) ((x) << S_RD_DATA_ALIGN_FSM9)
+#define F_RD_DATA_ALIGN_FSM9    V_RD_DATA_ALIGN_FSM9(1U)
+
+#define S_RD_DATA_FETCH_FSM9    16
+#define V_RD_DATA_FETCH_FSM9(x) ((x) << S_RD_DATA_FETCH_FSM9)
+#define F_RD_DATA_FETCH_FSM9    V_RD_DATA_FETCH_FSM9(1U)
+
+#define S_COHERENCY_TX_FSM9    15
+#define V_COHERENCY_TX_FSM9(x) ((x) << S_COHERENCY_TX_FSM9)
+#define F_COHERENCY_TX_FSM9    V_COHERENCY_TX_FSM9(1U)
+
+#define S_COHERENCY_RX_FSM9    14
+#define V_COHERENCY_RX_FSM9(x) ((x) << S_COHERENCY_RX_FSM9)
+#define F_COHERENCY_RX_FSM9    V_COHERENCY_RX_FSM9(1U)
+
+#define S_ARB_REQ_FSM9    13
+#define V_ARB_REQ_FSM9(x) ((x) << S_ARB_REQ_FSM9)
+#define F_ARB_REQ_FSM9    V_ARB_REQ_FSM9(1U)
+
+#define S_CMD_SPLIT_FSM9    10
+#define M_CMD_SPLIT_FSM9    0x7U
+#define V_CMD_SPLIT_FSM9(x) ((x) << S_CMD_SPLIT_FSM9)
+#define G_CMD_SPLIT_FSM9(x) (((x) >> S_CMD_SPLIT_FSM9) & M_CMD_SPLIT_FSM9)
+
+#define A_MA_PM_TX_CLIENT_INTERFACE_INTERNAL_REG1 0xe540
+
+#define S_WR_DATA_EXT_FIFO_CNT10    30
+#define M_WR_DATA_EXT_FIFO_CNT10    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT10(x) ((x) << S_WR_DATA_EXT_FIFO_CNT10)
+#define G_WR_DATA_EXT_FIFO_CNT10(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT10) & M_WR_DATA_EXT_FIFO_CNT10)
+
+#define S_WR_CMD_TAG_FIFO_CNT10    26
+#define M_WR_CMD_TAG_FIFO_CNT10    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT10(x) ((x) << S_WR_CMD_TAG_FIFO_CNT10)
+#define G_WR_CMD_TAG_FIFO_CNT10(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT10) & M_WR_CMD_TAG_FIFO_CNT10)
+
+#define S_WR_DATA_512B_FIFO_CNT10    18
+#define M_WR_DATA_512B_FIFO_CNT10    0xffU
+#define V_WR_DATA_512B_FIFO_CNT10(x) ((x) << S_WR_DATA_512B_FIFO_CNT10)
+#define G_WR_DATA_512B_FIFO_CNT10(x) (((x) >> S_WR_DATA_512B_FIFO_CNT10) & M_WR_DATA_512B_FIFO_CNT10)
+
+#define S_RD_DATA_ALIGN_FSM10    17
+#define V_RD_DATA_ALIGN_FSM10(x) ((x) << S_RD_DATA_ALIGN_FSM10)
+#define F_RD_DATA_ALIGN_FSM10    V_RD_DATA_ALIGN_FSM10(1U)
+
+#define S_RD_DATA_FETCH_FSM10    16
+#define V_RD_DATA_FETCH_FSM10(x) ((x) << S_RD_DATA_FETCH_FSM10)
+#define F_RD_DATA_FETCH_FSM10    V_RD_DATA_FETCH_FSM10(1U)
+
+#define S_COHERENCY_TX_FSM10    15
+#define V_COHERENCY_TX_FSM10(x) ((x) << S_COHERENCY_TX_FSM10)
+#define F_COHERENCY_TX_FSM10    V_COHERENCY_TX_FSM10(1U)
+
+#define S_COHERENCY_RX_FSM10    14
+#define V_COHERENCY_RX_FSM10(x) ((x) << S_COHERENCY_RX_FSM10)
+#define F_COHERENCY_RX_FSM10    V_COHERENCY_RX_FSM10(1U)
+
+#define S_ARB_REQ_FSM10    13
+#define V_ARB_REQ_FSM10(x) ((x) << S_ARB_REQ_FSM10)
+#define F_ARB_REQ_FSM10    V_ARB_REQ_FSM10(1U)
+
+#define S_CMD_SPLIT_FSM10    10
+#define M_CMD_SPLIT_FSM10    0x7U
+#define V_CMD_SPLIT_FSM10(x) ((x) << S_CMD_SPLIT_FSM10)
+#define G_CMD_SPLIT_FSM10(x) (((x) >> S_CMD_SPLIT_FSM10) & M_CMD_SPLIT_FSM10)
+
+#define A_MA_PM_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe560
+
+#define S_WR_DATA_EXT_FIFO_CNT11    30
+#define M_WR_DATA_EXT_FIFO_CNT11    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT11(x) ((x) << S_WR_DATA_EXT_FIFO_CNT11)
+#define G_WR_DATA_EXT_FIFO_CNT11(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT11) & M_WR_DATA_EXT_FIFO_CNT11)
+
+#define S_WR_CMD_TAG_FIFO_CNT11    26
+#define M_WR_CMD_TAG_FIFO_CNT11    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT11(x) ((x) << S_WR_CMD_TAG_FIFO_CNT11)
+#define G_WR_CMD_TAG_FIFO_CNT11(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT11) & M_WR_CMD_TAG_FIFO_CNT11)
+
+#define S_WR_DATA_512B_FIFO_CNT11    18
+#define M_WR_DATA_512B_FIFO_CNT11    0xffU
+#define V_WR_DATA_512B_FIFO_CNT11(x) ((x) << S_WR_DATA_512B_FIFO_CNT11)
+#define G_WR_DATA_512B_FIFO_CNT11(x) (((x) >> S_WR_DATA_512B_FIFO_CNT11) & M_WR_DATA_512B_FIFO_CNT11)
+
+#define S_RD_DATA_ALIGN_FSM11    17
+#define V_RD_DATA_ALIGN_FSM11(x) ((x) << S_RD_DATA_ALIGN_FSM11)
+#define F_RD_DATA_ALIGN_FSM11    V_RD_DATA_ALIGN_FSM11(1U)
+
+#define S_RD_DATA_FETCH_FSM11    16
+#define V_RD_DATA_FETCH_FSM11(x) ((x) << S_RD_DATA_FETCH_FSM11)
+#define F_RD_DATA_FETCH_FSM11    V_RD_DATA_FETCH_FSM11(1U)
+
+#define S_COHERENCY_TX_FSM11    15
+#define V_COHERENCY_TX_FSM11(x) ((x) << S_COHERENCY_TX_FSM11)
+#define F_COHERENCY_TX_FSM11    V_COHERENCY_TX_FSM11(1U)
+
+#define S_COHERENCY_RX_FSM11    14
+#define V_COHERENCY_RX_FSM11(x) ((x) << S_COHERENCY_RX_FSM11)
+#define F_COHERENCY_RX_FSM11    V_COHERENCY_RX_FSM11(1U)
+
+#define S_ARB_REQ_FSM11    13
+#define V_ARB_REQ_FSM11(x) ((x) << S_ARB_REQ_FSM11)
+#define F_ARB_REQ_FSM11    V_ARB_REQ_FSM11(1U)
+
+#define S_CMD_SPLIT_FSM11    10
+#define M_CMD_SPLIT_FSM11    0x7U
+#define V_CMD_SPLIT_FSM11(x) ((x) << S_CMD_SPLIT_FSM11)
+#define G_CMD_SPLIT_FSM11(x) (((x) >> S_CMD_SPLIT_FSM11) & M_CMD_SPLIT_FSM11)
+
+#define A_MA_HMA_CLIENT_INTERFACE_INTERNAL_REG1 0xe580
+
+#define S_WR_DATA_EXT_FIFO_CNT12    30
+#define M_WR_DATA_EXT_FIFO_CNT12    0x3U
+#define V_WR_DATA_EXT_FIFO_CNT12(x) ((x) << S_WR_DATA_EXT_FIFO_CNT12)
+#define G_WR_DATA_EXT_FIFO_CNT12(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT12) & M_WR_DATA_EXT_FIFO_CNT12)
+
+#define S_WR_CMD_TAG_FIFO_CNT12    26
+#define M_WR_CMD_TAG_FIFO_CNT12    0xfU
+#define V_WR_CMD_TAG_FIFO_CNT12(x) ((x) << S_WR_CMD_TAG_FIFO_CNT12)
+#define G_WR_CMD_TAG_FIFO_CNT12(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT12) & M_WR_CMD_TAG_FIFO_CNT12)
+
+#define S_WR_DATA_512B_FIFO_CNT12    18
+#define M_WR_DATA_512B_FIFO_CNT12    0xffU
+#define V_WR_DATA_512B_FIFO_CNT12(x) ((x) << S_WR_DATA_512B_FIFO_CNT12)
+#define G_WR_DATA_512B_FIFO_CNT12(x) (((x) >> S_WR_DATA_512B_FIFO_CNT12) & M_WR_DATA_512B_FIFO_CNT12)
+
+#define S_RD_DATA_ALIGN_FSM12    17
+#define V_RD_DATA_ALIGN_FSM12(x) ((x) << S_RD_DATA_ALIGN_FSM12)
+#define F_RD_DATA_ALIGN_FSM12    V_RD_DATA_ALIGN_FSM12(1U)
+
+#define S_RD_DATA_FETCH_FSM12    16
+#define V_RD_DATA_FETCH_FSM12(x) ((x) << S_RD_DATA_FETCH_FSM12)
+#define F_RD_DATA_FETCH_FSM12    V_RD_DATA_FETCH_FSM12(1U)
+
+#define S_COHERENCY_TX_FSM12    15
+#define V_COHERENCY_TX_FSM12(x) ((x) << S_COHERENCY_TX_FSM12)
+#define F_COHERENCY_TX_FSM12    V_COHERENCY_TX_FSM12(1U)
+
+#define S_COHERENCY_RX_FSM12    14
+#define V_COHERENCY_RX_FSM12(x) ((x) << S_COHERENCY_RX_FSM12)
+#define F_COHERENCY_RX_FSM12    V_COHERENCY_RX_FSM12(1U)
+
+#define S_ARB_REQ_FSM12    13
+#define V_ARB_REQ_FSM12(x) ((x) << S_ARB_REQ_FSM12)
+#define F_ARB_REQ_FSM12    V_ARB_REQ_FSM12(1U)
+
+#define S_CMD_SPLIT_FSM12    10
+#define M_CMD_SPLIT_FSM12    0x7U
+#define V_CMD_SPLIT_FSM12(x) ((x) << S_CMD_SPLIT_FSM12)
+#define G_CMD_SPLIT_FSM12(x) (((x) >> S_CMD_SPLIT_FSM12) & M_CMD_SPLIT_FSM12)
+
+#define A_MA_TARGET_0_ARBITER_INTERFACE_INTERNAL_REG1 0xe5a0
+
+#define S_RD_CMD_TAG_FIFO_CNT0    8
+#define M_RD_CMD_TAG_FIFO_CNT0    0xffU
+#define V_RD_CMD_TAG_FIFO_CNT0(x) ((x) << S_RD_CMD_TAG_FIFO_CNT0)
+#define G_RD_CMD_TAG_FIFO_CNT0(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT0) & M_RD_CMD_TAG_FIFO_CNT0)
+
+#define S_RD_DATA_FIFO_CNT0    0
+#define M_RD_DATA_FIFO_CNT0    0xffU
+#define V_RD_DATA_FIFO_CNT0(x) ((x) << S_RD_DATA_FIFO_CNT0)
+#define G_RD_DATA_FIFO_CNT0(x) (((x) >> S_RD_DATA_FIFO_CNT0) & M_RD_DATA_FIFO_CNT0)
+
+#define A_MA_TARGET_1_ARBITER_INTERFACE_INTERNAL_REG1 0xe5c0
+
+#define S_RD_CMD_TAG_FIFO_CNT1    8
+#define M_RD_CMD_TAG_FIFO_CNT1    0xffU
+#define V_RD_CMD_TAG_FIFO_CNT1(x) ((x) << S_RD_CMD_TAG_FIFO_CNT1)
+#define G_RD_CMD_TAG_FIFO_CNT1(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT1) & M_RD_CMD_TAG_FIFO_CNT1)
+
+#define S_RD_DATA_FIFO_CNT1    0
+#define M_RD_DATA_FIFO_CNT1    0xffU
+#define V_RD_DATA_FIFO_CNT1(x) ((x) << S_RD_DATA_FIFO_CNT1)
+#define G_RD_DATA_FIFO_CNT1(x) (((x) >> S_RD_DATA_FIFO_CNT1) & M_RD_DATA_FIFO_CNT1)
+
+#define A_MA_TARGET_2_ARBITER_INTERFACE_INTERNAL_REG1 0xe5e0
+
+#define S_RD_CMD_TAG_FIFO_CNT2    8
+#define M_RD_CMD_TAG_FIFO_CNT2    0xffU
+#define V_RD_CMD_TAG_FIFO_CNT2(x) ((x) << S_RD_CMD_TAG_FIFO_CNT2)
+#define G_RD_CMD_TAG_FIFO_CNT2(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT2) & M_RD_CMD_TAG_FIFO_CNT2)
+
+#define S_RD_DATA_FIFO_CNT2    0
+#define M_RD_DATA_FIFO_CNT2    0xffU
+#define V_RD_DATA_FIFO_CNT2(x) ((x) << S_RD_DATA_FIFO_CNT2)
+#define G_RD_DATA_FIFO_CNT2(x) (((x) >> S_RD_DATA_FIFO_CNT2) & M_RD_DATA_FIFO_CNT2)
+
+#define A_MA_TARGET_3_ARBITER_INTERFACE_INTERNAL_REG1 0xe600
+
+#define S_RD_CMD_TAG_FIFO_CNT3    8
+#define M_RD_CMD_TAG_FIFO_CNT3    0xffU
+#define V_RD_CMD_TAG_FIFO_CNT3(x) ((x) << S_RD_CMD_TAG_FIFO_CNT3)
+#define G_RD_CMD_TAG_FIFO_CNT3(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT3) & M_RD_CMD_TAG_FIFO_CNT3)
+
+#define S_RD_DATA_FIFO_CNT3    0
+#define M_RD_DATA_FIFO_CNT3    0xffU
+#define V_RD_DATA_FIFO_CNT3(x) ((x) << S_RD_DATA_FIFO_CNT3)
+#define G_RD_DATA_FIFO_CNT3(x) (((x) >> S_RD_DATA_FIFO_CNT3) & M_RD_DATA_FIFO_CNT3)
+
+#define A_MA_SGE_THREAD_0_CLNT_EXP_WR_CYC_CNT_LO 0xe640
+#define A_MA_SGE_THREAD_1_CLNT_EXP_WR_CYC_CNT_LO 0xe660
+#define A_MA_ULP_TX_CLNT_EXP_WR_CYC_CNT_LO 0xe680
+#define A_MA_ULP_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe6a0
+#define A_MA_ULP_TX_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe6c0
+#define A_MA_TP_THREAD_0_CLNT_EXP_WR_CYC_CNT_LO 0xe6e0
+#define A_MA_TP_THREAD_1_CLNT_EXP_WR_CYC_CNT_LO 0xe700
+#define A_MA_LE_CLNT_EXP_WR_CYC_CNT_LO 0xe720
+#define A_MA_CIM_CLNT_EXP_WR_CYC_CNT_LO 0xe740
+#define A_MA_PCIE_CLNT_EXP_WR_CYC_CNT_LO 0xe760
+#define A_MA_PM_TX_CLNT_EXP_WR_CYC_CNT_LO 0xe780
+#define A_MA_PM_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe7a0
+#define A_MA_HMA_CLNT_EXP_WR_CYC_CNT_LO 0xe7c0
+#define A_MA_EDRAM0_WR_REQ_CNT_HI 0xe800
+#define A_MA_EDRAM0_WR_REQ_CNT_LO 0xe820
+#define A_MA_EDRAM1_WR_REQ_CNT_HI 0xe840
+#define A_MA_EDRAM1_WR_REQ_CNT_LO 0xe860
+#define A_MA_EXT_MEMORY0_WR_REQ_CNT_HI 0xe880
+#define A_MA_EXT_MEMORY0_WR_REQ_CNT_LO 0xe8a0
+#define A_MA_EXT_MEMORY1_WR_REQ_CNT_HI 0xe8c0
+#define A_MA_EXT_MEMORY1_WR_REQ_CNT_LO 0xe8e0
+#define A_MA_EDRAM0_RD_REQ_CNT_HI 0xe900
+#define A_MA_EDRAM0_RD_REQ_CNT_LO 0xe920
+#define A_MA_EDRAM1_RD_REQ_CNT_HI 0xe940
+#define A_MA_EDRAM1_RD_REQ_CNT_LO 0xe960
+#define A_MA_EXT_MEMORY0_RD_REQ_CNT_HI 0xe980
+#define A_MA_EXT_MEMORY0_RD_REQ_CNT_LO 0xe9a0
+#define A_MA_EXT_MEMORY1_RD_REQ_CNT_HI 0xe9c0
+#define A_MA_EXT_MEMORY1_RD_REQ_CNT_LO 0xe9e0
+#define A_MA_SGE_THREAD_0_CLNT_ACT_RD_CYC_CNT_HI 0xec00
+#define A_MA_SGE_THREAD_0_CLNT_ACT_RD_CYC_CNT_LO 0xec20
+#define A_MA_SGE_THREAD_1_CLNT_ACT_RD_CYC_CNT_HI 0xec40
+#define A_MA_SGE_THREAD_1_CLNT_ACT_RD_CYC_CNT_LO 0xec60
+#define A_MA_ULP_TX_CLNT_ACT_RD_CYC_CNT_HI 0xec80
+#define A_MA_ULP_TX_CLNT_ACT_RD_CYC_CNT_LO 0xeca0
+#define A_MA_ULP_RX_CLNT_ACT_RD_CYC_CNT_HI 0xecc0
+#define A_MA_ULP_RX_CLNT_ACT_RD_CYC_CNT_LO 0xece0
+#define A_MA_ULP_TX_RX_CLNT_ACT_RD_CYC_CNT_HI 0xed00
+#define A_MA_ULP_TX_RX_CLNT_ACT_RD_CYC_CNT_LO 0xed20
+#define A_MA_TP_THREAD_0_CLNT_ACT_RD_CYC_CNT_HI 0xed40
+#define A_MA_TP_THREAD_0_CLNT_ACT_RD_CYC_CNT_LO 0xed60
+#define A_MA_TP_THREAD_1_CLNT_ACT_RD_CYC_CNT_HI 0xed80
+#define A_MA_TP_THREAD_1_CLNT_ACT_RD_CYC_CNT_LO 0xeda0
+#define A_MA_LE_CLNT_ACT_RD_CYC_CNT_HI 0xedc0
+#define A_MA_LE_CLNT_ACT_RD_CYC_CNT_LO 0xede0
+#define A_MA_CIM_CLNT_ACT_RD_CYC_CNT_HI 0xee00
+#define A_MA_CIM_CLNT_ACT_RD_CYC_CNT_LO 0xee20
+#define A_MA_PCIE_CLNT_ACT_RD_CYC_CNT_HI 0xee40
+#define A_MA_PCIE_CLNT_ACT_RD_CYC_CNT_LO 0xee60
+#define A_MA_PM_TX_CLNT_ACT_RD_CYC_CNT_HI 0xee80
+#define A_MA_PM_TX_CLNT_ACT_RD_CYC_CNT_LO 0xeea0
+#define A_MA_PM_RX_CLNT_ACT_RD_CYC_CNT_HI 0xeec0
+#define A_MA_PM_RX_CLNT_ACT_RD_CYC_CNT_LO 0xeee0
+#define A_MA_HMA_CLNT_ACT_RD_CYC_CNT_HI 0xef00
+#define A_MA_HMA_CLNT_ACT_RD_CYC_CNT_LO 0xef20
+#define A_MA_PM_TX_RD_THROTTLE_STATUS 0xf000
+
+#define S_PTMAXTRANS    16
+#define V_PTMAXTRANS(x) ((x) << S_PTMAXTRANS)
+#define F_PTMAXTRANS    V_PTMAXTRANS(1U)
+
+#define S_PTFLITCNT    0
+#define M_PTFLITCNT    0xffU
+#define V_PTFLITCNT(x) ((x) << S_PTFLITCNT)
+#define G_PTFLITCNT(x) (((x) >> S_PTFLITCNT) & M_PTFLITCNT)
+
+#define A_MA_PM_RX_RD_THROTTLE_STATUS 0xf020
+
+#define S_PRMAXTRANS    16
+#define V_PRMAXTRANS(x) ((x) << S_PRMAXTRANS)
+#define F_PRMAXTRANS    V_PRMAXTRANS(1U)
+
+#define S_PRFLITCNT    0
+#define M_PRFLITCNT    0xffU
+#define V_PRFLITCNT(x) ((x) << S_PRFLITCNT)
+#define G_PRFLITCNT(x) (((x) >> S_PRFLITCNT) & M_PRFLITCNT)
+
 /* registers for module EDC_0 */
 #define EDC_0_BASE_ADDR 0x7900
 
@@ -7011,6 +20384,7 @@
 #define V_MBMSGRDYINT(x) ((x) << S_MBMSGRDYINT)
 #define F_MBMSGRDYINT    V_MBMSGRDYINT(1U)
 
+#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290
 #define A_CIM_BOOT_CFG 0x7b00
 
 #define S_BOOTADDR    8
@@ -7180,6 +20554,46 @@
 #define V_PREFDROPINTEN(x) ((x) << S_PREFDROPINTEN)
 #define F_PREFDROPINTEN    V_PREFDROPINTEN(1U)
 
+#define S_MA_CIM_INTFPERR    28
+#define V_MA_CIM_INTFPERR(x) ((x) << S_MA_CIM_INTFPERR)
+#define F_MA_CIM_INTFPERR    V_MA_CIM_INTFPERR(1U)
+
+#define S_PLCIM_MSTRSPDATAPARERR    27
+#define V_PLCIM_MSTRSPDATAPARERR(x) ((x) << S_PLCIM_MSTRSPDATAPARERR)
+#define F_PLCIM_MSTRSPDATAPARERR    V_PLCIM_MSTRSPDATAPARERR(1U)
+
+#define S_NCSI2CIMINTFPARERR    26
+#define V_NCSI2CIMINTFPARERR(x) ((x) << S_NCSI2CIMINTFPARERR)
+#define F_NCSI2CIMINTFPARERR    V_NCSI2CIMINTFPARERR(1U)
+
+#define S_SGE2CIMINTFPARERR    25
+#define V_SGE2CIMINTFPARERR(x) ((x) << S_SGE2CIMINTFPARERR)
+#define F_SGE2CIMINTFPARERR    V_SGE2CIMINTFPARERR(1U)
+
+#define S_ULP2CIMINTFPARERR    24
+#define V_ULP2CIMINTFPARERR(x) ((x) << S_ULP2CIMINTFPARERR)
+#define F_ULP2CIMINTFPARERR    V_ULP2CIMINTFPARERR(1U)
+
+#define S_TP2CIMINTFPARERR    23
+#define V_TP2CIMINTFPARERR(x) ((x) << S_TP2CIMINTFPARERR)
+#define F_TP2CIMINTFPARERR    V_TP2CIMINTFPARERR(1U)
+
+#define S_OBQSGERX1PARERR    22
+#define V_OBQSGERX1PARERR(x) ((x) << S_OBQSGERX1PARERR)
+#define F_OBQSGERX1PARERR    V_OBQSGERX1PARERR(1U)
+
+#define S_OBQSGERX0PARERR    21
+#define V_OBQSGERX0PARERR(x) ((x) << S_OBQSGERX0PARERR)
+#define F_OBQSGERX0PARERR    V_OBQSGERX0PARERR(1U)
+
+#define S_PCIE2CIMINTFPARERR    29
+#define V_PCIE2CIMINTFPARERR(x) ((x) << S_PCIE2CIMINTFPARERR)
+#define F_PCIE2CIMINTFPARERR    V_PCIE2CIMINTFPARERR(1U)
+
+#define S_IBQPCIEPARERR    12
+#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR)
+#define F_IBQPCIEPARERR    V_IBQPCIEPARERR(1U)
+
 #define A_CIM_HOST_INT_CAUSE 0x7b2c
 
 #define S_TIEQOUTPARERRINT    20
@@ -7508,6 +20922,10 @@
 #define V_QUEFULLTHRSH(x) ((x) << S_QUEFULLTHRSH)
 #define G_QUEFULLTHRSH(x) (((x) >> S_QUEFULLTHRSH) & M_QUEFULLTHRSH)
 
+#define S_CIMQ1KEN    30
+#define V_CIMQ1KEN(x) ((x) << S_CIMQ1KEN)
+#define F_CIMQ1KEN    V_CIMQ1KEN(1U)
+
 #define A_CIM_HOST_ACC_CTRL 0x7b50
 
 #define S_HOSTBUSY    17
@@ -7724,6 +21142,11 @@
 #define V_DADDRTIMEOUT(x) ((x) << S_DADDRTIMEOUT)
 #define G_DADDRTIMEOUT(x) (((x) >> S_DADDRTIMEOUT) & M_DADDRTIMEOUT)
 
+#define S_DADDRTIMEOUTTYPE    0
+#define M_DADDRTIMEOUTTYPE    0x3U
+#define V_DADDRTIMEOUTTYPE(x) ((x) << S_DADDRTIMEOUTTYPE)
+#define G_DADDRTIMEOUTTYPE(x) (((x) >> S_DADDRTIMEOUTTYPE) & M_DADDRTIMEOUTTYPE)
+
 #define A_CIM_DEBUG_ADDR_ILLEGAL 0x7c0c
 
 #define S_DADDRILLEGAL    2
@@ -7731,6 +21154,11 @@
 #define V_DADDRILLEGAL(x) ((x) << S_DADDRILLEGAL)
 #define G_DADDRILLEGAL(x) (((x) >> S_DADDRILLEGAL) & M_DADDRILLEGAL)
 
+#define S_DADDRILLEGALTYPE    0
+#define M_DADDRILLEGALTYPE    0x3U
+#define V_DADDRILLEGALTYPE(x) ((x) << S_DADDRILLEGALTYPE)
+#define G_DADDRILLEGALTYPE(x) (((x) >> S_DADDRILLEGALTYPE) & M_DADDRILLEGALTYPE)
+
 #define A_CIM_DEBUG_PIF_CAUSE_MASK 0x7c10
 
 #define S_DPIFHOSTMASK    0
@@ -7738,6 +21166,16 @@
 #define V_DPIFHOSTMASK(x) ((x) << S_DPIFHOSTMASK)
 #define G_DPIFHOSTMASK(x) (((x) >> S_DPIFHOSTMASK) & M_DPIFHOSTMASK)
 
+#define S_T5_DPIFHOSTMASK    0
+#define M_T5_DPIFHOSTMASK    0x1fffffffU
+#define V_T5_DPIFHOSTMASK(x) ((x) << S_T5_DPIFHOSTMASK)
+#define G_T5_DPIFHOSTMASK(x) (((x) >> S_T5_DPIFHOSTMASK) & M_T5_DPIFHOSTMASK)
+
+#define S_T6_T5_DPIFHOSTMASK    0
+#define M_T6_T5_DPIFHOSTMASK    0x3fffffffU
+#define V_T6_T5_DPIFHOSTMASK(x) ((x) << S_T6_T5_DPIFHOSTMASK)
+#define G_T6_T5_DPIFHOSTMASK(x) (((x) >> S_T6_T5_DPIFHOSTMASK) & M_T6_T5_DPIFHOSTMASK)
+
 #define A_CIM_DEBUG_PIF_UPACC_CAUSE_MASK 0x7c14
 
 #define S_DPIFHUPAMASK    0
@@ -7752,6 +21190,16 @@
 #define V_DUPMASK(x) ((x) << S_DUPMASK)
 #define G_DUPMASK(x) (((x) >> S_DUPMASK) & M_DUPMASK)
 
+#define S_T5_DUPMASK    0
+#define M_T5_DUPMASK    0x1fffffffU
+#define V_T5_DUPMASK(x) ((x) << S_T5_DUPMASK)
+#define G_T5_DUPMASK(x) (((x) >> S_T5_DUPMASK) & M_T5_DUPMASK)
+
+#define S_T6_T5_DUPMASK    0
+#define M_T6_T5_DUPMASK    0x3fffffffU
+#define V_T6_T5_DUPMASK(x) ((x) << S_T6_T5_DUPMASK)
+#define G_T6_T5_DUPMASK(x) (((x) >> S_T6_T5_DUPMASK) & M_T6_T5_DUPMASK)
+
 #define A_CIM_DEBUG_UP_UPACC_CAUSE_MASK 0x7c1c
 
 #define S_DUPUACCMASK    0
@@ -7767,6 +21215,16 @@
 #define V_PERREN(x) ((x) << S_PERREN)
 #define G_PERREN(x) (((x) >> S_PERREN) & M_PERREN)
 
+#define S_T5_PERREN    0
+#define M_T5_PERREN    0x1fffffffU
+#define V_T5_PERREN(x) ((x) << S_T5_PERREN)
+#define G_T5_PERREN(x) (((x) >> S_T5_PERREN) & M_T5_PERREN)
+
+#define S_T6_T5_PERREN    0
+#define M_T6_T5_PERREN    0x3fffffffU
+#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN)
+#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN)
+
 #define A_CIM_EEPROM_BUSY_BIT 0x7c28
 
 #define S_EEPROMBUSY    0
@@ -7779,6 +21237,10 @@
 #define V_MA_TIMER_ENABLE(x) ((x) << S_MA_TIMER_ENABLE)
 #define F_MA_TIMER_ENABLE    V_MA_TIMER_ENABLE(1U)
 
+#define S_SLOW_TIMER_ENABLE    1
+#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE)
+#define F_SLOW_TIMER_ENABLE    V_SLOW_TIMER_ENABLE(1U)
+
 #define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
 
 #define S_UP_PO_SINGLE_OUTSTANDING    0
@@ -7787,7 +21249,79 @@
 
 #define A_CIM_CIM_DEBUG_SPARE 0x7c34
 #define A_CIM_UP_OPERATION_FREQ 0x7c38
+#define A_CIM_CIM_IBQ_ERR_CODE 0x7c3c
 
+#define S_CIM_ULP_TX_PKT_ERR_CODE    16
+#define M_CIM_ULP_TX_PKT_ERR_CODE    0xffU
+#define V_CIM_ULP_TX_PKT_ERR_CODE(x) ((x) << S_CIM_ULP_TX_PKT_ERR_CODE)
+#define G_CIM_ULP_TX_PKT_ERR_CODE(x) (((x) >> S_CIM_ULP_TX_PKT_ERR_CODE) & M_CIM_ULP_TX_PKT_ERR_CODE)
+
+#define S_CIM_SGE1_PKT_ERR_CODE    8
+#define M_CIM_SGE1_PKT_ERR_CODE    0xffU
+#define V_CIM_SGE1_PKT_ERR_CODE(x) ((x) << S_CIM_SGE1_PKT_ERR_CODE)
+#define G_CIM_SGE1_PKT_ERR_CODE(x) (((x) >> S_CIM_SGE1_PKT_ERR_CODE) & M_CIM_SGE1_PKT_ERR_CODE)
+
+#define S_CIM_SGE0_PKT_ERR_CODE    0
+#define M_CIM_SGE0_PKT_ERR_CODE    0xffU
+#define V_CIM_SGE0_PKT_ERR_CODE(x) ((x) << S_CIM_SGE0_PKT_ERR_CODE)
+#define G_CIM_SGE0_PKT_ERR_CODE(x) (((x) >> S_CIM_SGE0_PKT_ERR_CODE) & M_CIM_SGE0_PKT_ERR_CODE)
+
+#define S_CIM_PCIE_PKT_ERR_CODE    8
+#define M_CIM_PCIE_PKT_ERR_CODE    0xffU
+#define V_CIM_PCIE_PKT_ERR_CODE(x) ((x) << S_CIM_PCIE_PKT_ERR_CODE)
+#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE)
+
+#define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40
+#define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44
+
+#define S_PIO_UP_MST_CFG_SEL    0
+#define V_PIO_UP_MST_CFG_SEL(x) ((x) << S_PIO_UP_MST_CFG_SEL)
+#define F_PIO_UP_MST_CFG_SEL    V_PIO_UP_MST_CFG_SEL(1U)
+
+#define A_CIM_CGEN 0x7c48
+
+#define S_TSCH_CGEN    0
+#define V_TSCH_CGEN(x) ((x) << S_TSCH_CGEN)
+#define F_TSCH_CGEN    V_TSCH_CGEN(1U)
+
+#define A_CIM_QUEUE_FEATURE_DISABLE 0x7c4c
+
+#define S_OBQ_THROUTTLE_ON_EOP    4
+#define V_OBQ_THROUTTLE_ON_EOP(x) ((x) << S_OBQ_THROUTTLE_ON_EOP)
+#define F_OBQ_THROUTTLE_ON_EOP    V_OBQ_THROUTTLE_ON_EOP(1U)
+
+#define S_OBQ_READ_CTL_PERF_MODE_DISABLE    3
+#define V_OBQ_READ_CTL_PERF_MODE_DISABLE(x) ((x) << S_OBQ_READ_CTL_PERF_MODE_DISABLE)
+#define F_OBQ_READ_CTL_PERF_MODE_DISABLE    V_OBQ_READ_CTL_PERF_MODE_DISABLE(1U)
+
+#define S_OBQ_WAIT_FOR_EOP_FLUSH_DISABLE    2
+#define V_OBQ_WAIT_FOR_EOP_FLUSH_DISABLE(x) ((x) << S_OBQ_WAIT_FOR_EOP_FLUSH_DISABLE)
+#define F_OBQ_WAIT_FOR_EOP_FLUSH_DISABLE    V_OBQ_WAIT_FOR_EOP_FLUSH_DISABLE(1U)
+
+#define S_IBQ_RRA_DSBL    1
+#define V_IBQ_RRA_DSBL(x) ((x) << S_IBQ_RRA_DSBL)
+#define F_IBQ_RRA_DSBL    V_IBQ_RRA_DSBL(1U)
+
+#define S_IBQ_SKID_FIFO_EOP_FLSH_DSBL    0
+#define V_IBQ_SKID_FIFO_EOP_FLSH_DSBL(x) ((x) << S_IBQ_SKID_FIFO_EOP_FLSH_DSBL)
+#define F_IBQ_SKID_FIFO_EOP_FLSH_DSBL    V_IBQ_SKID_FIFO_EOP_FLSH_DSBL(1U)
+
+#define S_PCIE_OBQ_IF_DISABLE    5
+#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE)
+#define F_PCIE_OBQ_IF_DISABLE    V_PCIE_OBQ_IF_DISABLE(1U)
+
+#define A_CIM_CGEN_GLOBAL 0x7c50
+
+#define S_CGEN_GLOBAL    0
+#define V_CGEN_GLOBAL(x) ((x) << S_CGEN_GLOBAL)
+#define F_CGEN_GLOBAL    V_CGEN_GLOBAL(1U)
+
+#define A_CIM_DPSLP_EN 0x7c54
+
+#define S_PIFDBGLA_DPSLP_EN    0
+#define V_PIFDBGLA_DPSLP_EN(x) ((x) << S_PIFDBGLA_DPSLP_EN)
+#define F_PIFDBGLA_DPSLP_EN    V_PIFDBGLA_DPSLP_EN(1U)
+
 /* registers for module TP */
 #define TP_BASE_ADDR 0x7d00
 
@@ -7897,6 +21431,70 @@
 #define V_CTUNNEL(x) ((x) << S_CTUNNEL)
 #define F_CTUNNEL    V_CTUNNEL(1U)
 
+#define S_VLANEXTENPORT3    31
+#define V_VLANEXTENPORT3(x) ((x) << S_VLANEXTENPORT3)
+#define F_VLANEXTENPORT3    V_VLANEXTENPORT3(1U)
+
+#define S_VLANEXTENPORT2    30
+#define V_VLANEXTENPORT2(x) ((x) << S_VLANEXTENPORT2)
+#define F_VLANEXTENPORT2    V_VLANEXTENPORT2(1U)
+
+#define S_VLANEXTENPORT1    29
+#define V_VLANEXTENPORT1(x) ((x) << S_VLANEXTENPORT1)
+#define F_VLANEXTENPORT1    V_VLANEXTENPORT1(1U)
+
+#define S_VLANEXTENPORT0    28
+#define V_VLANEXTENPORT0(x) ((x) << S_VLANEXTENPORT0)
+#define F_VLANEXTENPORT0    V_VLANEXTENPORT0(1U)
+
+#define S_VNTAGDEFAULTVAL    13
+#define V_VNTAGDEFAULTVAL(x) ((x) << S_VNTAGDEFAULTVAL)
+#define F_VNTAGDEFAULTVAL    V_VNTAGDEFAULTVAL(1U)
+
+#define S_ECHECKUDPLEN    12
+#define V_ECHECKUDPLEN(x) ((x) << S_ECHECKUDPLEN)
+#define F_ECHECKUDPLEN    V_ECHECKUDPLEN(1U)
+
+#define S_FCOEFPMA    10
+#define V_FCOEFPMA(x) ((x) << S_FCOEFPMA)
+#define F_FCOEFPMA    V_FCOEFPMA(1U)
+
+#define S_VNTAGETHENABLE    8
+#define V_VNTAGETHENABLE(x) ((x) << S_VNTAGETHENABLE)
+#define F_VNTAGETHENABLE    V_VNTAGETHENABLE(1U)
+
+#define S_IP_CCSM    7
+#define V_IP_CCSM(x) ((x) << S_IP_CCSM)
+#define F_IP_CCSM    V_IP_CCSM(1U)
+
+#define S_CCHECKSUMCHECKUDP    6
+#define V_CCHECKSUMCHECKUDP(x) ((x) << S_CCHECKSUMCHECKUDP)
+#define F_CCHECKSUMCHECKUDP    V_CCHECKSUMCHECKUDP(1U)
+
+#define S_TCP_CCSM    5
+#define V_TCP_CCSM(x) ((x) << S_TCP_CCSM)
+#define F_TCP_CCSM    V_TCP_CCSM(1U)
+
+#define S_CDEMUX    3
+#define V_CDEMUX(x) ((x) << S_CDEMUX)
+#define F_CDEMUX    V_CDEMUX(1U)
+
+#define S_ETHUPEN    2
+#define V_ETHUPEN(x) ((x) << S_ETHUPEN)
+#define F_ETHUPEN    V_ETHUPEN(1U)
+
+#define S_CXOFFOVERRIDE    3
+#define V_CXOFFOVERRIDE(x) ((x) << S_CXOFFOVERRIDE)
+#define F_CXOFFOVERRIDE    V_CXOFFOVERRIDE(1U)
+
+#define S_EGREDROPEN    1
+#define V_EGREDROPEN(x) ((x) << S_EGREDROPEN)
+#define F_EGREDROPEN    V_EGREDROPEN(1U)
+
+#define S_CFASTDEMUXEN    0
+#define V_CFASTDEMUXEN(x) ((x) << S_CFASTDEMUXEN)
+#define F_CFASTDEMUXEN    V_CFASTDEMUXEN(1U)
+
 #define A_TP_OUT_CONFIG 0x7d04
 
 #define S_PORTQFCEN    28
@@ -7988,6 +21586,34 @@
 #define V_CETHERNET(x) ((x) << S_CETHERNET)
 #define F_CETHERNET    V_CETHERNET(1U)
 
+#define S_EVNTAGEN    9
+#define V_EVNTAGEN(x) ((x) << S_EVNTAGEN)
+#define F_EVNTAGEN    V_EVNTAGEN(1U)
+
+#define S_CCPLACKMODE    13
+#define V_CCPLACKMODE(x) ((x) << S_CCPLACKMODE)
+#define F_CCPLACKMODE    V_CCPLACKMODE(1U)
+
+#define S_RMWHINTENABLE    12
+#define V_RMWHINTENABLE(x) ((x) << S_RMWHINTENABLE)
+#define F_RMWHINTENABLE    V_RMWHINTENABLE(1U)
+
+#define S_EV6FLWEN    8
+#define V_EV6FLWEN(x) ((x) << S_EV6FLWEN)
+#define F_EV6FLWEN    V_EV6FLWEN(1U)
+
+#define S_EVLANPRIO    6
+#define V_EVLANPRIO(x) ((x) << S_EVLANPRIO)
+#define F_EVLANPRIO    V_EVLANPRIO(1U)
+
+#define S_CRXPKTENC    3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC    V_CRXPKTENC(1U)
+
+#define S_CRXPKTXT    1
+#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT)
+#define F_CRXPKTXT    V_CRXPKTXT(1U)
+
 #define A_TP_GLOBAL_CONFIG 0x7d08
 
 #define S_SYNCOOKIEPARAMS    26
@@ -8066,6 +21692,18 @@
 #define V_IPTTL(x) ((x) << S_IPTTL)
 #define G_IPTTL(x) (((x) >> S_IPTTL) & M_IPTTL)
 
+#define S_RSSSYNSTEERENABLE    12
+#define V_RSSSYNSTEERENABLE(x) ((x) << S_RSSSYNSTEERENABLE)
+#define F_RSSSYNSTEERENABLE    V_RSSSYNSTEERENABLE(1U)
+
+#define S_ISSFROMCPLENABLE    11
+#define V_ISSFROMCPLENABLE(x) ((x) << S_ISSFROMCPLENABLE)
+#define F_ISSFROMCPLENABLE    V_ISSFROMCPLENABLE(1U)
+
+#define S_ACTIVEFILTERCOUNTS    22
+#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS)
+#define F_ACTIVEFILTERCOUNTS    V_ACTIVEFILTERCOUNTS(1U)
+
 #define A_TP_DB_CONFIG 0x7d0c
 
 #define S_DBMAXOPCNT    24
@@ -8346,6 +21984,22 @@
 #define V_TXDATAACKPAGEENABLE(x) ((x) << S_TXDATAACKPAGEENABLE)
 #define F_TXDATAACKPAGEENABLE    V_TXDATAACKPAGEENABLE(1U)
 
+#define S_ENABLEFILTERNAT    5
+#define V_ENABLEFILTERNAT(x) ((x) << S_ENABLEFILTERNAT)
+#define F_ENABLEFILTERNAT    V_ENABLEFILTERNAT(1U)
+
+#define S_ENABLEFINCHECK    31
+#define V_ENABLEFINCHECK(x) ((x) << S_ENABLEFINCHECK)
+#define F_ENABLEFINCHECK    V_ENABLEFINCHECK(1U)
+
+#define S_ENABLEMIBVFPLD    21
+#define V_ENABLEMIBVFPLD(x) ((x) << S_ENABLEMIBVFPLD)
+#define F_ENABLEMIBVFPLD    V_ENABLEMIBVFPLD(1U)
+
+#define S_DISABLESEPPSHFLAG    4
+#define V_DISABLESEPPSHFLAG(x) ((x) << S_DISABLESEPPSHFLAG)
+#define F_DISABLESEPPSHFLAG    V_DISABLESEPPSHFLAG(1U)
+
 #define A_TP_PC_CONFIG2 0x7d4c
 
 #define S_ENABLEMTUVFMODE    31
@@ -8476,6 +22130,10 @@
 #define V_ENABLETNLOFDCLOSED(x) ((x) << S_ENABLETNLOFDCLOSED)
 #define F_ENABLETNLOFDCLOSED    V_ENABLETNLOFDCLOSED(1U)
 
+#define S_ENABLEFINDDPOFF    14
+#define V_ENABLEFINDDPOFF(x) ((x) << S_ENABLEFINDDPOFF)
+#define F_ENABLEFINDDPOFF    V_ENABLEFINDDPOFF(1U)
+
 #define A_TP_TCP_BACKOFF_REG0 0x7d50
 
 #define S_TIMERBACKOFFINDEX3    24
@@ -8626,6 +22284,44 @@
 #define V_SWSTIMER(x) ((x) << S_SWSTIMER)
 #define F_SWSTIMER    V_SWSTIMER(1U)
 
+#define S_LIMTXTHRESH    28
+#define M_LIMTXTHRESH    0xfU
+#define V_LIMTXTHRESH(x) ((x) << S_LIMTXTHRESH)
+#define G_LIMTXTHRESH(x) (((x) >> S_LIMTXTHRESH) & M_LIMTXTHRESH)
+
+#define S_CHNERRENABLE    14
+#define V_CHNERRENABLE(x) ((x) << S_CHNERRENABLE)
+#define F_CHNERRENABLE    V_CHNERRENABLE(1U)
+
+#define S_SETTIMEENABLE    13
+#define V_SETTIMEENABLE(x) ((x) << S_SETTIMEENABLE)
+#define F_SETTIMEENABLE    V_SETTIMEENABLE(1U)
+
+#define S_ECNCNGFIFO    19
+#define V_ECNCNGFIFO(x) ((x) << S_ECNCNGFIFO)
+#define F_ECNCNGFIFO    V_ECNCNGFIFO(1U)
+
+#define S_ECNSYNACK    18
+#define V_ECNSYNACK(x) ((x) << S_ECNSYNACK)
+#define F_ECNSYNACK    V_ECNSYNACK(1U)
+
+#define S_ECNTHRESH    16
+#define M_ECNTHRESH    0x3U
+#define V_ECNTHRESH(x) ((x) << S_ECNTHRESH)
+#define G_ECNTHRESH(x) (((x) >> S_ECNTHRESH) & M_ECNTHRESH)
+
+#define S_ECNMODE    15
+#define V_ECNMODE(x) ((x) << S_ECNMODE)
+#define F_ECNMODE    V_ECNMODE(1U)
+
+#define S_ECNMODECWR    14
+#define V_ECNMODECWR(x) ((x) << S_ECNMODECWR)
+#define F_ECNMODECWR    V_ECNMODECWR(1U)
+
+#define S_FORCESHOVE    10
+#define V_FORCESHOVE(x) ((x) << S_FORCESHOVE)
+#define F_FORCESHOVE    V_FORCESHOVE(1U)
+
 #define A_TP_PARA_REG1 0x7d64
 
 #define S_INITRWND    16
@@ -8777,6 +22473,74 @@
 #define V_RENOCFG(x) ((x) << S_RENOCFG)
 #define G_RENOCFG(x) (((x) >> S_RENOCFG) & M_RENOCFG)
 
+#define S_IDLECWNDHIGHSPEED    28
+#define V_IDLECWNDHIGHSPEED(x) ((x) << S_IDLECWNDHIGHSPEED)
+#define F_IDLECWNDHIGHSPEED    V_IDLECWNDHIGHSPEED(1U)
+
+#define S_RXMTCWNDHIGHSPEED    27
+#define V_RXMTCWNDHIGHSPEED(x) ((x) << S_RXMTCWNDHIGHSPEED)
+#define F_RXMTCWNDHIGHSPEED    V_RXMTCWNDHIGHSPEED(1U)
+
+#define S_OVERDRIVEHIGHSPEED    25
+#define M_OVERDRIVEHIGHSPEED    0x3U
+#define V_OVERDRIVEHIGHSPEED(x) ((x) << S_OVERDRIVEHIGHSPEED)
+#define G_OVERDRIVEHIGHSPEED(x) (((x) >> S_OVERDRIVEHIGHSPEED) & M_OVERDRIVEHIGHSPEED)
+
+#define S_BYTECOUNTHIGHSPEED    24
+#define V_BYTECOUNTHIGHSPEED(x) ((x) << S_BYTECOUNTHIGHSPEED)
+#define F_BYTECOUNTHIGHSPEED    V_BYTECOUNTHIGHSPEED(1U)
+
+#define S_IDLECWNDNEWRENO    20
+#define V_IDLECWNDNEWRENO(x) ((x) << S_IDLECWNDNEWRENO)
+#define F_IDLECWNDNEWRENO    V_IDLECWNDNEWRENO(1U)
+
+#define S_RXMTCWNDNEWRENO    19
+#define V_RXMTCWNDNEWRENO(x) ((x) << S_RXMTCWNDNEWRENO)
+#define F_RXMTCWNDNEWRENO    V_RXMTCWNDNEWRENO(1U)
+
+#define S_OVERDRIVENEWRENO    17
+#define M_OVERDRIVENEWRENO    0x3U
+#define V_OVERDRIVENEWRENO(x) ((x) << S_OVERDRIVENEWRENO)
+#define G_OVERDRIVENEWRENO(x) (((x) >> S_OVERDRIVENEWRENO) & M_OVERDRIVENEWRENO)
+
+#define S_BYTECOUNTNEWRENO    16
+#define V_BYTECOUNTNEWRENO(x) ((x) << S_BYTECOUNTNEWRENO)
+#define F_BYTECOUNTNEWRENO    V_BYTECOUNTNEWRENO(1U)
+
+#define S_IDLECWNDTAHOE    12
+#define V_IDLECWNDTAHOE(x) ((x) << S_IDLECWNDTAHOE)
+#define F_IDLECWNDTAHOE    V_IDLECWNDTAHOE(1U)
+
+#define S_RXMTCWNDTAHOE    11
+#define V_RXMTCWNDTAHOE(x) ((x) << S_RXMTCWNDTAHOE)
+#define F_RXMTCWNDTAHOE    V_RXMTCWNDTAHOE(1U)
+
+#define S_OVERDRIVETAHOE    9
+#define M_OVERDRIVETAHOE    0x3U
+#define V_OVERDRIVETAHOE(x) ((x) << S_OVERDRIVETAHOE)
+#define G_OVERDRIVETAHOE(x) (((x) >> S_OVERDRIVETAHOE) & M_OVERDRIVETAHOE)
+
+#define S_BYTECOUNTTAHOE    8
+#define V_BYTECOUNTTAHOE(x) ((x) << S_BYTECOUNTTAHOE)
+#define F_BYTECOUNTTAHOE    V_BYTECOUNTTAHOE(1U)
+
+#define S_IDLECWNDRENO    4
+#define V_IDLECWNDRENO(x) ((x) << S_IDLECWNDRENO)
+#define F_IDLECWNDRENO    V_IDLECWNDRENO(1U)
+
+#define S_RXMTCWNDRENO    3
+#define V_RXMTCWNDRENO(x) ((x) << S_RXMTCWNDRENO)
+#define F_RXMTCWNDRENO    V_RXMTCWNDRENO(1U)
+
+#define S_OVERDRIVERENO    1
+#define M_OVERDRIVERENO    0x3U
+#define V_OVERDRIVERENO(x) ((x) << S_OVERDRIVERENO)
+#define G_OVERDRIVERENO(x) (((x) >> S_OVERDRIVERENO) & M_OVERDRIVERENO)
+
+#define S_BYTECOUNTRENO    0
+#define V_BYTECOUNTRENO(x) ((x) << S_BYTECOUNTRENO)
+#define F_BYTECOUNTRENO    V_BYTECOUNTRENO(1U)
+
 #define A_TP_PARA_REG5 0x7d74
 
 #define S_INDICATESIZE    16
@@ -8825,6 +22589,26 @@
 #define V_PUSHTIMERENABLE(x) ((x) << S_PUSHTIMERENABLE)
 #define F_PUSHTIMERENABLE    V_PUSHTIMERENABLE(1U)
 
+#define S_ENABLEXOFFPDU    7
+#define V_ENABLEXOFFPDU(x) ((x) << S_ENABLEXOFFPDU)
+#define F_ENABLEXOFFPDU    V_ENABLEXOFFPDU(1U)
+
+#define S_ENABLENEWFAR    6
+#define V_ENABLENEWFAR(x) ((x) << S_ENABLENEWFAR)
+#define F_ENABLENEWFAR    V_ENABLENEWFAR(1U)
+
+#define S_ENABLEFRAGCHECK    5
+#define V_ENABLEFRAGCHECK(x) ((x) << S_ENABLEFRAGCHECK)
+#define F_ENABLEFRAGCHECK    V_ENABLEFRAGCHECK(1U)
+
+#define S_ENABLEFCOECHECK    6
+#define V_ENABLEFCOECHECK(x) ((x) << S_ENABLEFCOECHECK)
+#define F_ENABLEFCOECHECK    V_ENABLEFCOECHECK(1U)
+
+#define S_ENABLERDMAFIX    1
+#define V_ENABLERDMAFIX(x) ((x) << S_ENABLERDMAFIX)
+#define F_ENABLERDMAFIX    V_ENABLERDMAFIX(1U)
+
 #define A_TP_PARA_REG6 0x7d78
 
 #define S_TXPDUSIZEADJ    24
@@ -8917,6 +22701,18 @@
 #define V_DISABLEPDUXMT(x) ((x) << S_DISABLEPDUXMT)
 #define F_DISABLEPDUXMT    V_DISABLEPDUXMT(1U)
 
+#define S_DISABLEPDUACK    20
+#define V_DISABLEPDUACK(x) ((x) << S_DISABLEPDUACK)
+#define F_DISABLEPDUACK    V_DISABLEPDUACK(1U)
+
+#define S_TXTCAMKEY    22
+#define V_TXTCAMKEY(x) ((x) << S_TXTCAMKEY)
+#define F_TXTCAMKEY    V_TXTCAMKEY(1U)
+
+#define S_ENABLECBYP    21
+#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP)
+#define F_ENABLECBYP    V_ENABLECBYP(1U)
+
 #define A_TP_PARA_REG7 0x7d7c
 
 #define S_PMMAXXFERLEN1    16
@@ -8966,6 +22762,20 @@
 #define V_ENGINELATENCYBASE(x) ((x) << S_ENGINELATENCYBASE)
 #define G_ENGINELATENCYBASE(x) (((x) >> S_ENGINELATENCYBASE) & M_ENGINELATENCYBASE)
 
+#define A_TP_PARA_REG8 0x7d84
+
+#define S_ECNACKECT    2
+#define V_ECNACKECT(x) ((x) << S_ECNACKECT)
+#define F_ECNACKECT    V_ECNACKECT(1U)
+
+#define S_ECNFINECT    1
+#define V_ECNFINECT(x) ((x) << S_ECNFINECT)
+#define F_ECNFINECT    V_ECNFINECT(1U)
+
+#define S_ECNSYNECT    0
+#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT)
+#define F_ECNSYNECT    V_ECNSYNECT(1U)
+
 #define A_TP_ERR_CONFIG 0x7d8c
 
 #define S_TNLERRORPING    30
@@ -9072,6 +22882,30 @@
 #define V_DROPERRORANY(x) ((x) << S_DROPERRORANY)
 #define F_DROPERRORANY    V_DROPERRORANY(1U)
 
+#define S_TNLERRORFPMA    31
+#define V_TNLERRORFPMA(x) ((x) << S_TNLERRORFPMA)
+#define F_TNLERRORFPMA    V_TNLERRORFPMA(1U)
+
+#define S_DROPERRORFPMA    15
+#define V_DROPERRORFPMA(x) ((x) << S_DROPERRORFPMA)
+#define F_DROPERRORFPMA    V_DROPERRORFPMA(1U)
+
+#define S_TNLERROROPAQUE    27
+#define V_TNLERROROPAQUE(x) ((x) << S_TNLERROROPAQUE)
+#define F_TNLERROROPAQUE    V_TNLERROROPAQUE(1U)
+
+#define S_TNLERRORIP6OPT    26
+#define V_TNLERRORIP6OPT(x) ((x) << S_TNLERRORIP6OPT)
+#define F_TNLERRORIP6OPT    V_TNLERRORIP6OPT(1U)
+
+#define S_DROPERROROPAQUE    11
+#define V_DROPERROROPAQUE(x) ((x) << S_DROPERROROPAQUE)
+#define F_DROPERROROPAQUE    V_DROPERROROPAQUE(1U)
+
+#define S_DROPERRORIP6OPT    10
+#define V_DROPERRORIP6OPT(x) ((x) << S_DROPERRORIP6OPT)
+#define F_DROPERRORIP6OPT    V_DROPERRORIP6OPT(1U)
+
 #define A_TP_TIMER_RESOLUTION 0x7d90
 
 #define S_TIMERRESOLUTION    16
@@ -9208,6 +23042,11 @@
 #define V_KEEPALIVEMAXR2(x) ((x) << S_KEEPALIVEMAXR2)
 #define G_KEEPALIVEMAXR2(x) (((x) >> S_KEEPALIVEMAXR2) & M_KEEPALIVEMAXR2)
 
+#define S_T6_SYNSHIFTMAX    24
+#define M_T6_SYNSHIFTMAX    0xfU
+#define V_T6_SYNSHIFTMAX(x) ((x) << S_T6_SYNSHIFTMAX)
+#define G_T6_SYNSHIFTMAX(x) (((x) >> S_T6_SYNSHIFTMAX) & M_T6_SYNSHIFTMAX)
+
 #define A_TP_TM_CONFIG 0x7dc4
 
 #define S_CMTIMERMAXNUM    0
@@ -9313,6 +23152,78 @@
 #define V_ULPTYPE0FIELD(x) ((x) << S_ULPTYPE0FIELD)
 #define G_ULPTYPE0FIELD(x) (((x) >> S_ULPTYPE0FIELD) & M_ULPTYPE0FIELD)
 
+#define S_ULPTYPE7LENGTH    31
+#define V_ULPTYPE7LENGTH(x) ((x) << S_ULPTYPE7LENGTH)
+#define F_ULPTYPE7LENGTH    V_ULPTYPE7LENGTH(1U)
+
+#define S_ULPTYPE7OFFSET    28
+#define M_ULPTYPE7OFFSET    0x7U
+#define V_ULPTYPE7OFFSET(x) ((x) << S_ULPTYPE7OFFSET)
+#define G_ULPTYPE7OFFSET(x) (((x) >> S_ULPTYPE7OFFSET) & M_ULPTYPE7OFFSET)
+
+#define S_ULPTYPE6LENGTH    27
+#define V_ULPTYPE6LENGTH(x) ((x) << S_ULPTYPE6LENGTH)
+#define F_ULPTYPE6LENGTH    V_ULPTYPE6LENGTH(1U)
+
+#define S_ULPTYPE6OFFSET    24
+#define M_ULPTYPE6OFFSET    0x7U
+#define V_ULPTYPE6OFFSET(x) ((x) << S_ULPTYPE6OFFSET)
+#define G_ULPTYPE6OFFSET(x) (((x) >> S_ULPTYPE6OFFSET) & M_ULPTYPE6OFFSET)
+
+#define S_ULPTYPE5LENGTH    23
+#define V_ULPTYPE5LENGTH(x) ((x) << S_ULPTYPE5LENGTH)
+#define F_ULPTYPE5LENGTH    V_ULPTYPE5LENGTH(1U)
+
+#define S_ULPTYPE5OFFSET    20
+#define M_ULPTYPE5OFFSET    0x7U
+#define V_ULPTYPE5OFFSET(x) ((x) << S_ULPTYPE5OFFSET)
+#define G_ULPTYPE5OFFSET(x) (((x) >> S_ULPTYPE5OFFSET) & M_ULPTYPE5OFFSET)
+
+#define S_ULPTYPE4LENGTH    19
+#define V_ULPTYPE4LENGTH(x) ((x) << S_ULPTYPE4LENGTH)
+#define F_ULPTYPE4LENGTH    V_ULPTYPE4LENGTH(1U)
+
+#define S_ULPTYPE4OFFSET    16
+#define M_ULPTYPE4OFFSET    0x7U
+#define V_ULPTYPE4OFFSET(x) ((x) << S_ULPTYPE4OFFSET)
+#define G_ULPTYPE4OFFSET(x) (((x) >> S_ULPTYPE4OFFSET) & M_ULPTYPE4OFFSET)
+
+#define S_ULPTYPE3LENGTH    15
+#define V_ULPTYPE3LENGTH(x) ((x) << S_ULPTYPE3LENGTH)
+#define F_ULPTYPE3LENGTH    V_ULPTYPE3LENGTH(1U)
+
+#define S_ULPTYPE3OFFSET    12
+#define M_ULPTYPE3OFFSET    0x7U
+#define V_ULPTYPE3OFFSET(x) ((x) << S_ULPTYPE3OFFSET)
+#define G_ULPTYPE3OFFSET(x) (((x) >> S_ULPTYPE3OFFSET) & M_ULPTYPE3OFFSET)
+
+#define S_ULPTYPE2LENGTH    11
+#define V_ULPTYPE2LENGTH(x) ((x) << S_ULPTYPE2LENGTH)
+#define F_ULPTYPE2LENGTH    V_ULPTYPE2LENGTH(1U)
+
+#define S_ULPTYPE2OFFSET    8
+#define M_ULPTYPE2OFFSET    0x7U
+#define V_ULPTYPE2OFFSET(x) ((x) << S_ULPTYPE2OFFSET)
+#define G_ULPTYPE2OFFSET(x) (((x) >> S_ULPTYPE2OFFSET) & M_ULPTYPE2OFFSET)
+
+#define S_ULPTYPE1LENGTH    7
+#define V_ULPTYPE1LENGTH(x) ((x) << S_ULPTYPE1LENGTH)
+#define F_ULPTYPE1LENGTH    V_ULPTYPE1LENGTH(1U)
+
+#define S_ULPTYPE1OFFSET    4
+#define M_ULPTYPE1OFFSET    0x7U
+#define V_ULPTYPE1OFFSET(x) ((x) << S_ULPTYPE1OFFSET)
+#define G_ULPTYPE1OFFSET(x) (((x) >> S_ULPTYPE1OFFSET) & M_ULPTYPE1OFFSET)
+
+#define S_ULPTYPE0LENGTH    3
+#define V_ULPTYPE0LENGTH(x) ((x) << S_ULPTYPE0LENGTH)
+#define F_ULPTYPE0LENGTH    V_ULPTYPE0LENGTH(1U)
+
+#define S_ULPTYPE0OFFSET    0
+#define M_ULPTYPE0OFFSET    0x7U
+#define V_ULPTYPE0OFFSET(x) ((x) << S_ULPTYPE0OFFSET)
+#define G_ULPTYPE0OFFSET(x) (((x) >> S_ULPTYPE0OFFSET) & M_ULPTYPE0OFFSET)
+
 #define A_TP_RSS_LKP_TABLE 0x7dec
 
 #define S_LKPTBLROWVLD    31
@@ -9334,6 +23245,11 @@
 #define V_LKPTBLQUEUE0(x) ((x) << S_LKPTBLQUEUE0)
 #define G_LKPTBLQUEUE0(x) (((x) >> S_LKPTBLQUEUE0) & M_LKPTBLQUEUE0)
 
+#define S_T6_LKPTBLROWIDX    20
+#define M_T6_LKPTBLROWIDX    0x7ffU
+#define V_T6_LKPTBLROWIDX(x) ((x) << S_T6_LKPTBLROWIDX)
+#define G_T6_LKPTBLROWIDX(x) (((x) >> S_T6_LKPTBLROWIDX) & M_T6_LKPTBLROWIDX)
+
 #define A_TP_RSS_CONFIG 0x7df0
 
 #define S_TNL4TUPENIPV6    31
@@ -9448,6 +23364,22 @@
 #define V_DISABLE(x) ((x) << S_DISABLE)
 #define F_DISABLE    V_DISABLE(1U)
 
+#define S_TNLFCOEMODE    23
+#define V_TNLFCOEMODE(x) ((x) << S_TNLFCOEMODE)
+#define F_TNLFCOEMODE    V_TNLFCOEMODE(1U)
+
+#define S_TNLFCOEEN    21
+#define V_TNLFCOEEN(x) ((x) << S_TNLFCOEEN)
+#define F_TNLFCOEEN    V_TNLFCOEEN(1U)
+
+#define S_HASHXOR    20
+#define V_HASHXOR(x) ((x) << S_HASHXOR)
+#define F_HASHXOR    V_HASHXOR(1U)
+
+#define S_TNLFCOESID    22
+#define V_TNLFCOESID(x) ((x) << S_TNLFCOESID)
+#define F_TNLFCOESID    V_TNLFCOESID(1U)
+
 #define A_TP_RSS_CONFIG_TNL 0x7df4
 
 #define S_MASKSIZE    28
@@ -9464,6 +23396,14 @@
 #define V_USEWIRECH(x) ((x) << S_USEWIRECH)
 #define F_USEWIRECH    V_USEWIRECH(1U)
 
+#define S_HASHALL    2
+#define V_HASHALL(x) ((x) << S_HASHALL)
+#define F_HASHALL    V_HASHALL(1U)
+
+#define S_HASHETH    1
+#define V_HASHETH(x) ((x) << S_HASHETH)
+#define F_HASHETH    V_HASHETH(1U)
+
 #define A_TP_RSS_CONFIG_OFD 0x7df8
 
 #define S_RRCPLMAPEN    20
@@ -9475,6 +23415,11 @@
 #define V_RRCPLQUEWIDTH(x) ((x) << S_RRCPLQUEWIDTH)
 #define G_RRCPLQUEWIDTH(x) (((x) >> S_RRCPLQUEWIDTH) & M_RRCPLQUEWIDTH)
 
+#define S_FRMWRQUEMASK    12
+#define M_FRMWRQUEMASK    0xfU
+#define V_FRMWRQUEMASK(x) ((x) << S_FRMWRQUEMASK)
+#define G_FRMWRQUEMASK(x) (((x) >> S_FRMWRQUEMASK) & M_FRMWRQUEMASK)
+
 #define A_TP_RSS_CONFIG_SYN 0x7dfc
 #define A_TP_RSS_CONFIG_VRT 0x7e00
 
@@ -9530,6 +23475,28 @@
 #define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
 #define G_KEYWRADDR(x) (((x) >> S_KEYWRADDR) & M_KEYWRADDR)
 
+#define S_VFVLANEN    21
+#define V_VFVLANEN(x) ((x) << S_VFVLANEN)
+#define F_VFVLANEN    V_VFVLANEN(1U)
+
+#define S_VFFWEN    20
+#define V_VFFWEN(x) ((x) << S_VFFWEN)
+#define F_VFFWEN    V_VFFWEN(1U)
+
+#define S_KEYWRADDRX    30
+#define M_KEYWRADDRX    0x3U
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+#define G_KEYWRADDRX(x) (((x) >> S_KEYWRADDRX) & M_KEYWRADDRX)
+
+#define S_KEYEXTEND    26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND    V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR    8
+#define M_T6_VFWRADDR    0xffU
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+#define G_T6_VFWRADDR(x) (((x) >> S_T6_VFWRADDR) & M_T6_VFWRADDR)
+
 #define A_TP_RSS_CONFIG_CNG 0x7e04
 
 #define S_CHNCOUNT3    31
@@ -9909,6 +23876,14 @@
 #define V_DELINVFIFOPERR(x) ((x) << S_DELINVFIFOPERR)
 #define F_DELINVFIFOPERR    V_DELINVFIFOPERR(1U)
 
+#define S_CTPOUTPLDFIFOPERR    7
+#define V_CTPOUTPLDFIFOPERR(x) ((x) << S_CTPOUTPLDFIFOPERR)
+#define F_CTPOUTPLDFIFOPERR    V_CTPOUTPLDFIFOPERR(1U)
+
+#define S_SRQTABLEPERR    1
+#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR)
+#define F_SRQTABLEPERR    V_SRQTABLEPERR(1U)
+
 #define A_TP_INT_CAUSE 0x7e74
 #define A_TP_PER_ENABLE 0x7e78
 #define A_TP_FLM_FREE_PS_CNT 0x7e80
@@ -9958,6 +23933,7 @@
 #define V_DISABLETIMEFREEZE(x) ((x) << S_DISABLETIMEFREEZE)
 #define F_DISABLETIMEFREEZE    V_DISABLETIMEFREEZE(1U)
 
+#define A_TP_STAMP_TIME 0x7ea8
 #define A_TP_DEBUG_FLAGS 0x7eac
 
 #define S_RXTIMERDACKFIRST    26
@@ -10052,6 +24028,18 @@
 #define V_TXRCVADVLTMSS(x) ((x) << S_TXRCVADVLTMSS)
 #define F_TXRCVADVLTMSS    V_TXRCVADVLTMSS(1U)
 
+#define S_RXTIMERCOMPBUFFER    27
+#define V_RXTIMERCOMPBUFFER(x) ((x) << S_RXTIMERCOMPBUFFER)
+#define F_RXTIMERCOMPBUFFER    V_RXTIMERCOMPBUFFER(1U)
+
+#define S_TXDFRFAST    13
+#define V_TXDFRFAST(x) ((x) << S_TXDFRFAST)
+#define F_TXDFRFAST    V_TXDFRFAST(1U)
+
+#define S_TXRXMMISC    12
+#define V_TXRXMMISC(x) ((x) << S_TXRXMMISC)
+#define F_TXRXMMISC    V_TXRXMMISC(1U)
+
 #define A_TP_RX_SCHED 0x7eb0
 
 #define S_RXCOMMITRESET1    31
@@ -10601,6 +24589,14 @@
 #define V_TXMAPCHANNEL0(x) ((x) << S_TXMAPCHANNEL0)
 #define G_TXMAPCHANNEL0(x) (((x) >> S_TXMAPCHANNEL0) & M_TXMAPCHANNEL0)
 
+#define S_TXLPKCHANNEL1    17
+#define V_TXLPKCHANNEL1(x) ((x) << S_TXLPKCHANNEL1)
+#define F_TXLPKCHANNEL1    V_TXLPKCHANNEL1(1U)
+
+#define S_TXLPKCHANNEL0    16
+#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0)
+#define F_TXLPKCHANNEL0    V_TXLPKCHANNEL0(1U)
+
 #define A_TP_TX_SCHED_HDR 0x23
 
 #define S_TXMAPHDRCHANNEL7    28
@@ -10852,6 +24848,28 @@
 #define V_TXPPPENPORT0(x) ((x) << S_TXPPPENPORT0)
 #define G_TXPPPENPORT0(x) (((x) >> S_TXPPPENPORT0) & M_TXPPPENPORT0)
 
+#define A_TP_RX_SCHED_FIFO 0x2b
+
+#define S_COMMITLIMIT1H    24
+#define M_COMMITLIMIT1H    0xffU
+#define V_COMMITLIMIT1H(x) ((x) << S_COMMITLIMIT1H)
+#define G_COMMITLIMIT1H(x) (((x) >> S_COMMITLIMIT1H) & M_COMMITLIMIT1H)
+
+#define S_COMMITLIMIT1L    16
+#define M_COMMITLIMIT1L    0xffU
+#define V_COMMITLIMIT1L(x) ((x) << S_COMMITLIMIT1L)
+#define G_COMMITLIMIT1L(x) (((x) >> S_COMMITLIMIT1L) & M_COMMITLIMIT1L)
+
+#define S_COMMITLIMIT0H    8
+#define M_COMMITLIMIT0H    0xffU
+#define V_COMMITLIMIT0H(x) ((x) << S_COMMITLIMIT0H)
+#define G_COMMITLIMIT0H(x) (((x) >> S_COMMITLIMIT0H) & M_COMMITLIMIT0H)
+
+#define S_COMMITLIMIT0L    0
+#define M_COMMITLIMIT0L    0xffU
+#define V_COMMITLIMIT0L(x) ((x) << S_COMMITLIMIT0L)
+#define G_COMMITLIMIT0L(x) (((x) >> S_COMMITLIMIT0L) & M_COMMITLIMIT0L)
+
 #define A_TP_IPMI_CFG1 0x2e
 
 #define S_VLANENABLE    31
@@ -10936,13 +24954,56 @@
 #define V_CH0DEFAULTQUEUE(x) ((x) << S_CH0DEFAULTQUEUE)
 #define G_CH0DEFAULTQUEUE(x) (((x) >> S_CH0DEFAULTQUEUE) & M_CH0DEFAULTQUEUE)
 
+#define S_PRIENABLE    30
+#define V_PRIENABLE(x) ((x) << S_PRIENABLE)
+#define F_PRIENABLE    V_PRIENABLE(1U)
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF1_CONFIG 0x31
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF2_CONFIG 0x32
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF3_CONFIG 0x33
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF4_CONFIG 0x34
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF5_CONFIG 0x35
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF6_CONFIG 0x36
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF7_CONFIG 0x37
+
+#define S_T6_CHNENABLE    29
+#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
+#define F_T6_CHNENABLE    V_T6_CHNENABLE(1U)
+
 #define A_TP_RSS_PF_MAP 0x38
 
 #define S_LKPIDXSIZE    24
@@ -11120,6 +25181,205 @@
 #define V_IPV4TYPE(x) ((x) << S_IPV4TYPE)
 #define G_IPV4TYPE(x) (((x) >> S_IPV4TYPE) & M_IPV4TYPE)
 
+#define A_TP_ETHER_TYPE_FW 0x52
+
+#define S_ETHTYPE1    16
+#define M_ETHTYPE1    0xffffU
+#define V_ETHTYPE1(x) ((x) << S_ETHTYPE1)
+#define G_ETHTYPE1(x) (((x) >> S_ETHTYPE1) & M_ETHTYPE1)
+
+#define S_ETHTYPE0    0
+#define M_ETHTYPE0    0xffffU
+#define V_ETHTYPE0(x) ((x) << S_ETHTYPE0)
+#define G_ETHTYPE0(x) (((x) >> S_ETHTYPE0) & M_ETHTYPE0)
+
+#define A_TP_VXLAN_HEADER 0x53
+
+#define S_VXLANPORT    0
+#define M_VXLANPORT    0xffffU
+#define V_VXLANPORT(x) ((x) << S_VXLANPORT)
+#define G_VXLANPORT(x) (((x) >> S_VXLANPORT) & M_VXLANPORT)
+
+#define A_TP_CORE_POWER 0x54
+
+#define S_SLEEPRDYVNT    12
+#define V_SLEEPRDYVNT(x) ((x) << S_SLEEPRDYVNT)
+#define F_SLEEPRDYVNT    V_SLEEPRDYVNT(1U)
+
+#define S_SLEEPRDYTBL    11
+#define V_SLEEPRDYTBL(x) ((x) << S_SLEEPRDYTBL)
+#define F_SLEEPRDYTBL    V_SLEEPRDYTBL(1U)
+
+#define S_SLEEPRDYMIB    10
+#define V_SLEEPRDYMIB(x) ((x) << S_SLEEPRDYMIB)
+#define F_SLEEPRDYMIB    V_SLEEPRDYMIB(1U)
+
+#define S_SLEEPRDYARP    9
+#define V_SLEEPRDYARP(x) ((x) << S_SLEEPRDYARP)
+#define F_SLEEPRDYARP    V_SLEEPRDYARP(1U)
+
+#define S_SLEEPRDYRSS    8
+#define V_SLEEPRDYRSS(x) ((x) << S_SLEEPRDYRSS)
+#define F_SLEEPRDYRSS    V_SLEEPRDYRSS(1U)
+
+#define S_SLEEPREQVNT    4
+#define V_SLEEPREQVNT(x) ((x) << S_SLEEPREQVNT)
+#define F_SLEEPREQVNT    V_SLEEPREQVNT(1U)
+
+#define S_SLEEPREQTBL    3
+#define V_SLEEPREQTBL(x) ((x) << S_SLEEPREQTBL)
+#define F_SLEEPREQTBL    V_SLEEPREQTBL(1U)
+
+#define S_SLEEPREQMIB    2
+#define V_SLEEPREQMIB(x) ((x) << S_SLEEPREQMIB)
+#define F_SLEEPREQMIB    V_SLEEPREQMIB(1U)
+
+#define S_SLEEPREQARP    1
+#define V_SLEEPREQARP(x) ((x) << S_SLEEPREQARP)
+#define F_SLEEPREQARP    V_SLEEPREQARP(1U)
+
+#define S_SLEEPREQRSS    0
+#define V_SLEEPREQRSS(x) ((x) << S_SLEEPREQRSS)
+#define F_SLEEPREQRSS    V_SLEEPREQRSS(1U)
+
+#define A_TP_CORE_RDMA 0x55
+
+#define S_IMMEDIATEOP    20
+#define M_IMMEDIATEOP    0xfU
+#define V_IMMEDIATEOP(x) ((x) << S_IMMEDIATEOP)
+#define G_IMMEDIATEOP(x) (((x) >> S_IMMEDIATEOP) & M_IMMEDIATEOP)
+
+#define S_IMMEDIATESE    16
+#define M_IMMEDIATESE    0xfU
+#define V_IMMEDIATESE(x) ((x) << S_IMMEDIATESE)
+#define G_IMMEDIATESE(x) (((x) >> S_IMMEDIATESE) & M_IMMEDIATESE)
+
+#define S_ATOMICREQOP    12
+#define M_ATOMICREQOP    0xfU
+#define V_ATOMICREQOP(x) ((x) << S_ATOMICREQOP)
+#define G_ATOMICREQOP(x) (((x) >> S_ATOMICREQOP) & M_ATOMICREQOP)
+
+#define S_ATOMICRSPOP    8
+#define M_ATOMICRSPOP    0xfU
+#define V_ATOMICRSPOP(x) ((x) << S_ATOMICRSPOP)
+#define G_ATOMICRSPOP(x) (((x) >> S_ATOMICRSPOP) & M_ATOMICRSPOP)
+
+#define S_IMMEDIASEEN    1
+#define V_IMMEDIASEEN(x) ((x) << S_IMMEDIASEEN)
+#define F_IMMEDIASEEN    V_IMMEDIASEEN(1U)
+
+#define S_IMMEDIATEEN    0
+#define V_IMMEDIATEEN(x) ((x) << S_IMMEDIATEEN)
+#define F_IMMEDIATEEN    V_IMMEDIATEEN(1U)
+
+#define S_SHAREDRQEN    31
+#define V_SHAREDRQEN(x) ((x) << S_SHAREDRQEN)
+#define F_SHAREDRQEN    V_SHAREDRQEN(1U)
+
+#define S_SHAREDXRC    30
+#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC)
+#define F_SHAREDXRC    V_SHAREDXRC(1U)
+
+#define A_TP_FRAG_CONFIG 0x56
+
+#define S_TLSMODE    16
+#define M_TLSMODE    0x3U
+#define V_TLSMODE(x) ((x) << S_TLSMODE)
+#define G_TLSMODE(x) (((x) >> S_TLSMODE) & M_TLSMODE)
+
+#define S_USERMODE    14
+#define M_USERMODE    0x3U
+#define V_USERMODE(x) ((x) << S_USERMODE)
+#define G_USERMODE(x) (((x) >> S_USERMODE) & M_USERMODE)
+
+#define S_FCOEMODE    12
+#define M_FCOEMODE    0x3U
+#define V_FCOEMODE(x) ((x) << S_FCOEMODE)
+#define G_FCOEMODE(x) (((x) >> S_FCOEMODE) & M_FCOEMODE)
+
+#define S_IANDPMODE    10
+#define M_IANDPMODE    0x3U
+#define V_IANDPMODE(x) ((x) << S_IANDPMODE)
+#define G_IANDPMODE(x) (((x) >> S_IANDPMODE) & M_IANDPMODE)
+
+#define S_RDDPMODE    8
+#define M_RDDPMODE    0x3U
+#define V_RDDPMODE(x) ((x) << S_RDDPMODE)
+#define G_RDDPMODE(x) (((x) >> S_RDDPMODE) & M_RDDPMODE)
+
+#define S_IWARPMODE    6
+#define M_IWARPMODE    0x3U
+#define V_IWARPMODE(x) ((x) << S_IWARPMODE)
+#define G_IWARPMODE(x) (((x) >> S_IWARPMODE) & M_IWARPMODE)
+
+#define S_ISCSIMODE    4
+#define M_ISCSIMODE    0x3U
+#define V_ISCSIMODE(x) ((x) << S_ISCSIMODE)
+#define G_ISCSIMODE(x) (((x) >> S_ISCSIMODE) & M_ISCSIMODE)
+
+#define S_DDPMODE    2
+#define M_DDPMODE    0x3U
+#define V_DDPMODE(x) ((x) << S_DDPMODE)
+#define G_DDPMODE(x) (((x) >> S_DDPMODE) & M_DDPMODE)
+
+#define S_PASSMODE    0
+#define M_PASSMODE    0x3U
+#define V_PASSMODE(x) ((x) << S_PASSMODE)
+#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE)
+
+#define A_TP_CMM_CONFIG 0x57
+
+#define S_WRCNTIDLE    16
+#define M_WRCNTIDLE    0xffffU
+#define V_WRCNTIDLE(x) ((x) << S_WRCNTIDLE)
+#define G_WRCNTIDLE(x) (((x) >> S_WRCNTIDLE) & M_WRCNTIDLE)
+
+#define S_RDTHRESHOLD    8
+#define M_RDTHRESHOLD    0x3fU
+#define V_RDTHRESHOLD(x) ((x) << S_RDTHRESHOLD)
+#define G_RDTHRESHOLD(x) (((x) >> S_RDTHRESHOLD) & M_RDTHRESHOLD)
+
+#define S_WRTHRLEVEL2    7
+#define V_WRTHRLEVEL2(x) ((x) << S_WRTHRLEVEL2)
+#define F_WRTHRLEVEL2    V_WRTHRLEVEL2(1U)
+
+#define S_WRTHRLEVEL1    6
+#define V_WRTHRLEVEL1(x) ((x) << S_WRTHRLEVEL1)
+#define F_WRTHRLEVEL1    V_WRTHRLEVEL1(1U)
+
+#define S_WRTHRTHRESHEN    5
+#define V_WRTHRTHRESHEN(x) ((x) << S_WRTHRTHRESHEN)
+#define F_WRTHRTHRESHEN    V_WRTHRTHRESHEN(1U)
+
+#define S_WRTHRTHRESH    0
+#define M_WRTHRTHRESH    0x1fU
+#define V_WRTHRTHRESH(x) ((x) << S_WRTHRTHRESH)
+#define G_WRTHRTHRESH(x) (((x) >> S_WRTHRTHRESH) & M_WRTHRTHRESH)
+
+#define A_TP_VXLAN_CONFIG 0x58
+
+#define S_VXLANFLAGS    16
+#define M_VXLANFLAGS    0xffffU
+#define V_VXLANFLAGS(x) ((x) << S_VXLANFLAGS)
+#define G_VXLANFLAGS(x) (((x) >> S_VXLANFLAGS) & M_VXLANFLAGS)
+
+#define S_VXLANTYPE    0
+#define M_VXLANTYPE    0xffffU
+#define V_VXLANTYPE(x) ((x) << S_VXLANTYPE)
+#define G_VXLANTYPE(x) (((x) >> S_VXLANTYPE) & M_VXLANTYPE)
+
+#define A_TP_NVGRE_CONFIG 0x59
+
+#define S_GREFLAGS    16
+#define M_GREFLAGS    0xffffU
+#define V_GREFLAGS(x) ((x) << S_GREFLAGS)
+#define G_GREFLAGS(x) (((x) >> S_GREFLAGS) & M_GREFLAGS)
+
+#define S_GRETYPE    0
+#define M_GRETYPE    0xffffU
+#define V_GRETYPE(x) ((x) << S_GRETYPE)
+#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE)
+
 #define A_TP_DBG_CLEAR 0x60
 #define A_TP_DBG_CORE_HDR0 0x61
 
@@ -11239,16 +25499,16 @@
 #define V_SRAMFATAL(x) ((x) << S_SRAMFATAL)
 #define F_SRAMFATAL    V_SRAMFATAL(1U)
 
-#define S_EPCMDCONG    24
-#define M_EPCMDCONG    0xfU
+#define S_CPCMDCONG    24
+#define M_CPCMDCONG    0xfU
+#define V_CPCMDCONG(x) ((x) << S_CPCMDCONG)
+#define G_CPCMDCONG(x) (((x) >> S_CPCMDCONG) & M_CPCMDCONG)
+
+#define S_EPCMDCONG    22
+#define M_EPCMDCONG    0x3U
 #define V_EPCMDCONG(x) ((x) << S_EPCMDCONG)
 #define G_EPCMDCONG(x) (((x) >> S_EPCMDCONG) & M_EPCMDCONG)
 
-#define S_CPCMDCONG    22
-#define M_CPCMDCONG    0x3U
-#define V_CPCMDCONG(x) ((x) << S_CPCMDCONG)
-#define G_CPCMDCONG(x) (((x) >> S_CPCMDCONG) & M_CPCMDCONG)
-
 #define S_CPCMDLENFATAL    21
 #define V_CPCMDLENFATAL(x) ((x) << S_CPCMDLENFATAL)
 #define F_CPCMDLENFATAL    V_CPCMDLENFATAL(1U)
@@ -11295,6 +25555,14 @@
 #define V_CPCMDEOICNT(x) ((x) << S_CPCMDEOICNT)
 #define G_CPCMDEOICNT(x) (((x) >> S_CPCMDEOICNT) & M_CPCMDEOICNT)
 
+#define S_CPCMDTTLFATAL    6
+#define V_CPCMDTTLFATAL(x) ((x) << S_CPCMDTTLFATAL)
+#define F_CPCMDTTLFATAL    V_CPCMDTTLFATAL(1U)
+
+#define S_CDATACHNFATAL    5
+#define V_CDATACHNFATAL(x) ((x) << S_CDATACHNFATAL)
+#define F_CDATACHNFATAL    V_CDATACHNFATAL(1U)
+
 #define A_TP_DBG_CORE_OUT 0x64
 
 #define S_CCPLENC    26
@@ -11385,6 +25653,46 @@
 #define V_EPLDTXZEROPDRDY(x) ((x) << S_EPLDTXZEROPDRDY)
 #define F_EPLDTXZEROPDRDY    V_EPLDTXZEROPDRDY(1U)
 
+#define S_CRXBUSYOUT    31
+#define V_CRXBUSYOUT(x) ((x) << S_CRXBUSYOUT)
+#define F_CRXBUSYOUT    V_CRXBUSYOUT(1U)
+
+#define S_CTXBUSYOUT    30
+#define V_CTXBUSYOUT(x) ((x) << S_CTXBUSYOUT)
+#define F_CTXBUSYOUT    V_CTXBUSYOUT(1U)
+
+#define S_CRDCPLPKT    29
+#define V_CRDCPLPKT(x) ((x) << S_CRDCPLPKT)
+#define F_CRDCPLPKT    V_CRDCPLPKT(1U)
+
+#define S_CRDTCPPKT    28
+#define V_CRDTCPPKT(x) ((x) << S_CRDTCPPKT)
+#define F_CRDTCPPKT    V_CRDTCPPKT(1U)
+
+#define S_CNEWMSG    27
+#define V_CNEWMSG(x) ((x) << S_CNEWMSG)
+#define F_CNEWMSG    V_CNEWMSG(1U)
+
+#define S_ERXBUSYOUT    15
+#define V_ERXBUSYOUT(x) ((x) << S_ERXBUSYOUT)
+#define F_ERXBUSYOUT    V_ERXBUSYOUT(1U)
+
+#define S_ETXBUSYOUT    14
+#define V_ETXBUSYOUT(x) ((x) << S_ETXBUSYOUT)
+#define F_ETXBUSYOUT    V_ETXBUSYOUT(1U)
+
+#define S_ERDCPLPKT    13
+#define V_ERDCPLPKT(x) ((x) << S_ERDCPLPKT)
+#define F_ERDCPLPKT    V_ERDCPLPKT(1U)
+
+#define S_ERDTCPPKT    12
+#define V_ERDTCPPKT(x) ((x) << S_ERDTCPPKT)
+#define F_ERDTCPPKT    V_ERDTCPPKT(1U)
+
+#define S_ENEWMSG    11
+#define V_ENEWMSG(x) ((x) << S_ENEWMSG)
+#define F_ENEWMSG    V_ENEWMSG(1U)
+
 #define A_TP_DBG_CORE_TID 0x65
 
 #define S_LINENUMBER    24
@@ -11405,6 +25713,11 @@
 #define V_TIDVALUE(x) ((x) << S_TIDVALUE)
 #define G_TIDVALUE(x) (((x) >> S_TIDVALUE) & M_TIDVALUE)
 
+#define S_SRC    21
+#define M_SRC    0x3U
+#define V_SRC(x) ((x) << S_SRC)
+#define G_SRC(x) (((x) >> S_SRC) & M_SRC)
+
 #define A_TP_DBG_ENG_RES0 0x66
 
 #define S_RESOURCESREADY    31
@@ -11502,14 +25815,39 @@
 #define V_CPCMDBUSY(x) ((x) << S_CPCMDBUSY)
 #define F_CPCMDBUSY    V_CPCMDBUSY(1U)
 
-#define S_ETXBUSY    1
+#define S_EPCMDBUSY    1
+#define V_EPCMDBUSY(x) ((x) << S_EPCMDBUSY)
+#define F_EPCMDBUSY    V_EPCMDBUSY(1U)
+
+#define S_ETXBUSY    0
 #define V_ETXBUSY(x) ((x) << S_ETXBUSY)
 #define F_ETXBUSY    V_ETXBUSY(1U)
 
-#define S_EPCMDBUSY    0
-#define V_EPCMDBUSY(x) ((x) << S_EPCMDBUSY)
-#define F_EPCMDBUSY    V_EPCMDBUSY(1U)
+#define S_EFFOPCODEOUT    16
+#define M_EFFOPCODEOUT    0xfU
+#define V_EFFOPCODEOUT(x) ((x) << S_EFFOPCODEOUT)
+#define G_EFFOPCODEOUT(x) (((x) >> S_EFFOPCODEOUT) & M_EFFOPCODEOUT)
 
+#define S_DELDRDY    14
+#define V_DELDRDY(x) ((x) << S_DELDRDY)
+#define F_DELDRDY    V_DELDRDY(1U)
+
+#define S_T5_ETXBUSY    1
+#define V_T5_ETXBUSY(x) ((x) << S_T5_ETXBUSY)
+#define F_T5_ETXBUSY    V_T5_ETXBUSY(1U)
+
+#define S_T5_EPCMDBUSY    0
+#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY)
+#define F_T5_EPCMDBUSY    V_T5_EPCMDBUSY(1U)
+
+#define S_T6_ETXBUSY    1
+#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY)
+#define F_T6_ETXBUSY    V_T6_ETXBUSY(1U)
+
+#define S_T6_EPCMDBUSY    0
+#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY)
+#define F_T6_EPCMDBUSY    V_T6_EPCMDBUSY(1U)
+
 #define A_TP_DBG_ENG_RES1 0x67
 
 #define S_RXCPLSRDY    31
@@ -11599,6 +25937,10 @@
 #define V_RCFDATACMRDY(x) ((x) << S_RCFDATACMRDY)
 #define F_RCFDATACMRDY    V_RCFDATACMRDY(1U)
 
+#define S_RXISSSRDY    28
+#define V_RXISSSRDY(x) ((x) << S_RXISSSRDY)
+#define F_RXISSSRDY    V_RXISSSRDY(1U)
+
 #define A_TP_DBG_ENG_RES2 0x68
 
 #define S_CPLCMDRAW    24
@@ -11759,6 +26101,97 @@
 #define V_RXMODXOFF(x) ((x) << S_RXMODXOFF)
 #define G_RXMODXOFF(x) (((x) >> S_RXMODXOFF) & M_RXMODXOFF)
 
+#define S_T5_RXFIFOCNG    20
+#define M_T5_RXFIFOCNG    0xfU
+#define V_T5_RXFIFOCNG(x) ((x) << S_T5_RXFIFOCNG)
+#define G_T5_RXFIFOCNG(x) (((x) >> S_T5_RXFIFOCNG) & M_T5_RXFIFOCNG)
+
+#define S_T5_RXPCMDCNG    14
+#define M_T5_RXPCMDCNG    0x3U
+#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG)
+#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG)
+
+#define S_T6_RXFIFOCNG    20
+#define M_T6_RXFIFOCNG    0xfU
+#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG)
+#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG)
+
+#define S_T6_RXPCMDCNG    14
+#define M_T6_RXPCMDCNG    0x3U
+#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG)
+#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG)
+
+#define A_TP_DBG_ERROR_CNT 0x6c
+#define A_TP_DBG_CORE_CPL 0x6d
+
+#define S_CPLCMDOUT3    24
+#define M_CPLCMDOUT3    0xffU
+#define V_CPLCMDOUT3(x) ((x) << S_CPLCMDOUT3)
+#define G_CPLCMDOUT3(x) (((x) >> S_CPLCMDOUT3) & M_CPLCMDOUT3)
+
+#define S_CPLCMDOUT2    16
+#define M_CPLCMDOUT2    0xffU
+#define V_CPLCMDOUT2(x) ((x) << S_CPLCMDOUT2)
+#define G_CPLCMDOUT2(x) (((x) >> S_CPLCMDOUT2) & M_CPLCMDOUT2)
+
+#define S_CPLCMDOUT1    8
+#define M_CPLCMDOUT1    0xffU
+#define V_CPLCMDOUT1(x) ((x) << S_CPLCMDOUT1)
+#define G_CPLCMDOUT1(x) (((x) >> S_CPLCMDOUT1) & M_CPLCMDOUT1)
+
+#define S_CPLCMDOUT0    0
+#define M_CPLCMDOUT0    0xffU
+#define V_CPLCMDOUT0(x) ((x) << S_CPLCMDOUT0)
+#define G_CPLCMDOUT0(x) (((x) >> S_CPLCMDOUT0) & M_CPLCMDOUT0)
+
+#define A_TP_MIB_DEBUG 0x6f
+
+#define S_SRC3    31
+#define V_SRC3(x) ((x) << S_SRC3)
+#define F_SRC3    V_SRC3(1U)
+
+#define S_LINENUM3    24
+#define M_LINENUM3    0x7fU
+#define V_LINENUM3(x) ((x) << S_LINENUM3)
+#define G_LINENUM3(x) (((x) >> S_LINENUM3) & M_LINENUM3)
+
+#define S_SRC2    23
+#define V_SRC2(x) ((x) << S_SRC2)
+#define F_SRC2    V_SRC2(1U)
+
+#define S_LINENUM2    16
+#define M_LINENUM2    0x7fU
+#define V_LINENUM2(x) ((x) << S_LINENUM2)
+#define G_LINENUM2(x) (((x) >> S_LINENUM2) & M_LINENUM2)
+
+#define S_SRC1    15
+#define V_SRC1(x) ((x) << S_SRC1)
+#define F_SRC1    V_SRC1(1U)
+
+#define S_LINENUM1    8
+#define M_LINENUM1    0x7fU
+#define V_LINENUM1(x) ((x) << S_LINENUM1)
+#define G_LINENUM1(x) (((x) >> S_LINENUM1) & M_LINENUM1)
+
+#define S_SRC0    7
+#define V_SRC0(x) ((x) << S_SRC0)
+#define F_SRC0    V_SRC0(1U)
+
+#define S_LINENUM0    0
+#define M_LINENUM0    0x7fU
+#define V_LINENUM0(x) ((x) << S_LINENUM0)
+#define G_LINENUM0(x) (((x) >> S_LINENUM0) & M_LINENUM0)
+
+#define A_TP_DBG_CACHE_WR_ALL 0x70
+#define A_TP_DBG_CACHE_WR_HIT 0x71
+#define A_TP_DBG_CACHE_RD_ALL 0x72
+#define A_TP_DBG_CACHE_RD_HIT 0x73
+#define A_TP_DBG_CACHE_MC_REQ 0x74
+#define A_TP_DBG_CACHE_MC_RSP 0x75
+#define A_TP_T5_TX_DROP_CNT_CH0 0x120
+#define A_TP_T5_TX_DROP_CNT_CH1 0x121
+#define A_TP_TX_DROP_CNT_CH2 0x122
+#define A_TP_TX_DROP_CNT_CH3 0x123
 #define A_TP_TX_DROP_CFG_CH0 0x12b
 
 #define S_TIMERENABLED    31
@@ -12224,7 +26657,36 @@
 #define V_TXFULL(x) ((x) << S_TXFULL)
 #define F_TXFULL    V_TXFULL(1U)
 
+#define S_FIFOGRERXVALID    15
+#define V_FIFOGRERXVALID(x) ((x) << S_FIFOGRERXVALID)
+#define F_FIFOGRERXVALID    V_FIFOGRERXVALID(1U)
+
+#define S_FIFOGRERXREADY    14
+#define V_FIFOGRERXREADY(x) ((x) << S_FIFOGRERXREADY)
+#define F_FIFOGRERXREADY    V_FIFOGRERXREADY(1U)
+
+#define S_FIFOGRERXSOCP    13
+#define V_FIFOGRERXSOCP(x) ((x) << S_FIFOGRERXSOCP)
+#define F_FIFOGRERXSOCP    V_FIFOGRERXSOCP(1U)
+
+#define S_T6_ESTATIC4    12
+#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
+#define F_T6_ESTATIC4    V_T6_ESTATIC4(1U)
+
+#define S_TXFULL_ESIDE0    0
+#define V_TXFULL_ESIDE0(x) ((x) << S_TXFULL_ESIDE0)
+#define F_TXFULL_ESIDE0    V_TXFULL_ESIDE0(1U)
+
 #define A_TP_DBG_ESIDE_DISP1 0x137
+
+#define S_T6_ESTATIC4    12
+#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
+#define F_T6_ESTATIC4    V_T6_ESTATIC4(1U)
+
+#define S_TXFULL_ESIDE1    0
+#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1)
+#define F_TXFULL_ESIDE1    V_TXFULL_ESIDE1(1U)
+
 #define A_TP_MAC_MATCH_MAP0 0x138
 
 #define S_MAPVALUEWR    16
@@ -12253,7 +26715,25 @@
 #define G_MAPVALUERD(x) (((x) >> S_MAPVALUERD) & M_MAPVALUERD)
 
 #define A_TP_DBG_ESIDE_DISP2 0x13a
+
+#define S_T6_ESTATIC4    12
+#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
+#define F_T6_ESTATIC4    V_T6_ESTATIC4(1U)
+
+#define S_TXFULL_ESIDE2    0
+#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2)
+#define F_TXFULL_ESIDE2    V_TXFULL_ESIDE2(1U)
+
 #define A_TP_DBG_ESIDE_DISP3 0x13b
+
+#define S_T6_ESTATIC4    12
+#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
+#define F_T6_ESTATIC4    V_T6_ESTATIC4(1U)
+
+#define S_TXFULL_ESIDE3    0
+#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3)
+#define F_TXFULL_ESIDE3    V_TXFULL_ESIDE3(1U)
+
 #define A_TP_DBG_ESIDE_HDR0 0x13c
 
 #define S_TCPSOPCNT    28
@@ -12341,6 +26821,18 @@
 #define V_FCOE(x) ((x) << S_FCOE)
 #define F_FCOE    V_FCOE(1U)
 
+#define S_FILTERMODE    15
+#define V_FILTERMODE(x) ((x) << S_FILTERMODE)
+#define F_FILTERMODE    V_FILTERMODE(1U)
+
+#define S_FCOEMASK    14
+#define V_FCOEMASK(x) ((x) << S_FCOEMASK)
+#define F_FCOEMASK    V_FCOEMASK(1U)
+
+#define S_SRVRSRAM    13
+#define V_SRVRSRAM(x) ((x) << S_SRVRSRAM)
+#define F_SRVRSRAM    V_SRVRSRAM(1U)
+
 #define A_TP_INGRESS_CONFIG 0x141
 
 #define S_OPAQUE_TYPE    16
@@ -12385,6 +26877,14 @@
 #define V_IPV6_EXT_HDR_SKIP(x) ((x) << S_IPV6_EXT_HDR_SKIP)
 #define G_IPV6_EXT_HDR_SKIP(x) (((x) >> S_IPV6_EXT_HDR_SKIP) & M_IPV6_EXT_HDR_SKIP)
 
+#define S_FRAG_LEN_MOD8_COMPAT    12
+#define V_FRAG_LEN_MOD8_COMPAT(x) ((x) << S_FRAG_LEN_MOD8_COMPAT)
+#define F_FRAG_LEN_MOD8_COMPAT    V_FRAG_LEN_MOD8_COMPAT(1U)
+
+#define S_USE_ENC_IDX    13
+#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
+#define F_USE_ENC_IDX    V_USE_ENC_IDX(1U)
+
 #define A_TP_TX_DROP_CFG_CH2 0x142
 #define A_TP_TX_DROP_CFG_CH3 0x143
 #define A_TP_EGRESS_CONFIG 0x145
@@ -12393,6 +26893,31 @@
 #define V_REWRITEFORCETOSIZE(x) ((x) << S_REWRITEFORCETOSIZE)
 #define F_REWRITEFORCETOSIZE    V_REWRITEFORCETOSIZE(1U)
 
+#define A_TP_INGRESS_CONFIG2 0x145
+
+#define S_IPV6_UDP_CSUM_COMPAT    31
+#define V_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_IPV6_UDP_CSUM_COMPAT)
+#define F_IPV6_UDP_CSUM_COMPAT    V_IPV6_UDP_CSUM_COMPAT(1U)
+
+#define S_VNTAGPLDENABLE    30
+#define V_VNTAGPLDENABLE(x) ((x) << S_VNTAGPLDENABLE)
+#define F_VNTAGPLDENABLE    V_VNTAGPLDENABLE(1U)
+
+#define S_TCP_PLD_FILTER_OFFSET    20
+#define M_TCP_PLD_FILTER_OFFSET    0x3ffU
+#define V_TCP_PLD_FILTER_OFFSET(x) ((x) << S_TCP_PLD_FILTER_OFFSET)
+#define G_TCP_PLD_FILTER_OFFSET(x) (((x) >> S_TCP_PLD_FILTER_OFFSET) & M_TCP_PLD_FILTER_OFFSET)
+
+#define S_UDP_PLD_FILTER_OFFSET    10
+#define M_UDP_PLD_FILTER_OFFSET    0x3ffU
+#define V_UDP_PLD_FILTER_OFFSET(x) ((x) << S_UDP_PLD_FILTER_OFFSET)
+#define G_UDP_PLD_FILTER_OFFSET(x) (((x) >> S_UDP_PLD_FILTER_OFFSET) & M_UDP_PLD_FILTER_OFFSET)
+
+#define S_TNL_PLD_FILTER_OFFSET    0
+#define M_TNL_PLD_FILTER_OFFSET    0x3ffU
+#define V_TNL_PLD_FILTER_OFFSET(x) ((x) << S_TNL_PLD_FILTER_OFFSET)
+#define G_TNL_PLD_FILTER_OFFSET(x) (((x) >> S_TNL_PLD_FILTER_OFFSET) & M_TNL_PLD_FILTER_OFFSET)
+
 #define A_TP_EHDR_CONFIG_LO 0x146
 
 #define S_CPLLIMIT    24
@@ -12480,6 +27005,14 @@
 #define V_TCPOPTTXFULL(x) ((x) << S_TCPOPTTXFULL)
 #define F_TCPOPTTXFULL    V_TCPOPTTXFULL(1U)
 
+#define S_PKTATTRSRDY    3
+#define V_PKTATTRSRDY(x) ((x) << S_PKTATTRSRDY)
+#define F_PKTATTRSRDY    V_PKTATTRSRDY(1U)
+
+#define S_PKTATTRDRDY    2
+#define V_PKTATTRDRDY(x) ((x) << S_PKTATTRDRDY)
+#define F_PKTATTRDRDY    V_PKTATTRDRDY(1U)
+
 #define A_TP_DBG_ESIDE_DEMUX 0x149
 
 #define S_EALLDONE    28
@@ -12652,6 +27185,22 @@
 #define V_RX_PKT_ATTR_DRDY(x) ((x) << S_RX_PKT_ATTR_DRDY)
 #define F_RX_PKT_ATTR_DRDY    V_RX_PKT_ATTR_DRDY(1U)
 
+#define S_RXRUNT    25
+#define V_RXRUNT(x) ((x) << S_RXRUNT)
+#define F_RXRUNT    V_RXRUNT(1U)
+
+#define S_RXRUNTPARSER    24
+#define V_RXRUNTPARSER(x) ((x) << S_RXRUNTPARSER)
+#define F_RXRUNTPARSER    V_RXRUNTPARSER(1U)
+
+#define S_ERROR_SRDY    5
+#define V_ERROR_SRDY(x) ((x) << S_ERROR_SRDY)
+#define F_ERROR_SRDY    V_ERROR_SRDY(1U)
+
+#define S_ERROR_DRDY    4
+#define V_ERROR_DRDY(x) ((x) << S_ERROR_DRDY)
+#define F_ERROR_DRDY    V_ERROR_DRDY(1U)
+
 #define A_TP_DBG_ESIDE_IN1 0x14b
 #define A_TP_DBG_ESIDE_IN2 0x14c
 #define A_TP_DBG_ESIDE_IN3 0x14d
@@ -12731,6 +27280,11 @@
 #define V_ETXFULL(x) ((x) << S_ETXFULL)
 #define G_ETXFULL(x) (((x) >> S_ETXFULL) & M_ETXFULL)
 
+#define S_TXERRORCNT    8
+#define M_TXERRORCNT    0xffffffU
+#define V_TXERRORCNT(x) ((x) << S_TXERRORCNT)
+#define G_TXERRORCNT(x) (((x) >> S_TXERRORCNT) & M_TXERRORCNT)
+
 #define A_TP_ESIDE_SVID_MASK 0x151
 #define A_TP_ESIDE_DVID_MASK 0x152
 #define A_TP_ESIDE_ALIGN_MASK 0x153
@@ -12754,6 +27308,185 @@
 #define V_SVID_ID_OFFSET(x) ((x) << S_SVID_ID_OFFSET)
 #define G_SVID_ID_OFFSET(x) (((x) >> S_SVID_ID_OFFSET) & M_SVID_ID_OFFSET)
 
+#define A_TP_DBG_ESIDE_OP 0x154
+
+#define S_OPT_PARSER_FATAL_CHANNEL0    29
+#define V_OPT_PARSER_FATAL_CHANNEL0(x) ((x) << S_OPT_PARSER_FATAL_CHANNEL0)
+#define F_OPT_PARSER_FATAL_CHANNEL0    V_OPT_PARSER_FATAL_CHANNEL0(1U)
+
+#define S_OPT_PARSER_BUSY_CHANNEL0    28
+#define V_OPT_PARSER_BUSY_CHANNEL0(x) ((x) << S_OPT_PARSER_BUSY_CHANNEL0)
+#define F_OPT_PARSER_BUSY_CHANNEL0    V_OPT_PARSER_BUSY_CHANNEL0(1U)
+
+#define S_OPT_PARSER_ITCP_STATE_CHANNEL0    26
+#define M_OPT_PARSER_ITCP_STATE_CHANNEL0    0x3U
+#define V_OPT_PARSER_ITCP_STATE_CHANNEL0(x) ((x) << S_OPT_PARSER_ITCP_STATE_CHANNEL0)
+#define G_OPT_PARSER_ITCP_STATE_CHANNEL0(x) (((x) >> S_OPT_PARSER_ITCP_STATE_CHANNEL0) & M_OPT_PARSER_ITCP_STATE_CHANNEL0)
+
+#define S_OPT_PARSER_OTK_STATE_CHANNEL0    24
+#define M_OPT_PARSER_OTK_STATE_CHANNEL0    0x3U
+#define V_OPT_PARSER_OTK_STATE_CHANNEL0(x) ((x) << S_OPT_PARSER_OTK_STATE_CHANNEL0)
+#define G_OPT_PARSER_OTK_STATE_CHANNEL0(x) (((x) >> S_OPT_PARSER_OTK_STATE_CHANNEL0) & M_OPT_PARSER_OTK_STATE_CHANNEL0)
+
+#define S_OPT_PARSER_FATAL_CHANNEL1    21
+#define V_OPT_PARSER_FATAL_CHANNEL1(x) ((x) << S_OPT_PARSER_FATAL_CHANNEL1)
+#define F_OPT_PARSER_FATAL_CHANNEL1    V_OPT_PARSER_FATAL_CHANNEL1(1U)
+
+#define S_OPT_PARSER_BUSY_CHANNEL1    20
+#define V_OPT_PARSER_BUSY_CHANNEL1(x) ((x) << S_OPT_PARSER_BUSY_CHANNEL1)
+#define F_OPT_PARSER_BUSY_CHANNEL1    V_OPT_PARSER_BUSY_CHANNEL1(1U)
+
+#define S_OPT_PARSER_ITCP_STATE_CHANNEL1    18
+#define M_OPT_PARSER_ITCP_STATE_CHANNEL1    0x3U
+#define V_OPT_PARSER_ITCP_STATE_CHANNEL1(x) ((x) << S_OPT_PARSER_ITCP_STATE_CHANNEL1)
+#define G_OPT_PARSER_ITCP_STATE_CHANNEL1(x) (((x) >> S_OPT_PARSER_ITCP_STATE_CHANNEL1) & M_OPT_PARSER_ITCP_STATE_CHANNEL1)
+
+#define S_OPT_PARSER_OTK_STATE_CHANNEL1    16
+#define M_OPT_PARSER_OTK_STATE_CHANNEL1    0x3U
+#define V_OPT_PARSER_OTK_STATE_CHANNEL1(x) ((x) << S_OPT_PARSER_OTK_STATE_CHANNEL1)
+#define G_OPT_PARSER_OTK_STATE_CHANNEL1(x) (((x) >> S_OPT_PARSER_OTK_STATE_CHANNEL1) & M_OPT_PARSER_OTK_STATE_CHANNEL1)
+
+#define S_OPT_PARSER_FATAL_CHANNEL2    13
+#define V_OPT_PARSER_FATAL_CHANNEL2(x) ((x) << S_OPT_PARSER_FATAL_CHANNEL2)
+#define F_OPT_PARSER_FATAL_CHANNEL2    V_OPT_PARSER_FATAL_CHANNEL2(1U)
+
+#define S_OPT_PARSER_BUSY_CHANNEL2    12
+#define V_OPT_PARSER_BUSY_CHANNEL2(x) ((x) << S_OPT_PARSER_BUSY_CHANNEL2)
+#define F_OPT_PARSER_BUSY_CHANNEL2    V_OPT_PARSER_BUSY_CHANNEL2(1U)
+
+#define S_OPT_PARSER_ITCP_STATE_CHANNEL2    10
+#define M_OPT_PARSER_ITCP_STATE_CHANNEL2    0x3U
+#define V_OPT_PARSER_ITCP_STATE_CHANNEL2(x) ((x) << S_OPT_PARSER_ITCP_STATE_CHANNEL2)
+#define G_OPT_PARSER_ITCP_STATE_CHANNEL2(x) (((x) >> S_OPT_PARSER_ITCP_STATE_CHANNEL2) & M_OPT_PARSER_ITCP_STATE_CHANNEL2)
+
+#define S_OPT_PARSER_OTK_STATE_CHANNEL2    8
+#define M_OPT_PARSER_OTK_STATE_CHANNEL2    0x3U
+#define V_OPT_PARSER_OTK_STATE_CHANNEL2(x) ((x) << S_OPT_PARSER_OTK_STATE_CHANNEL2)
+#define G_OPT_PARSER_OTK_STATE_CHANNEL2(x) (((x) >> S_OPT_PARSER_OTK_STATE_CHANNEL2) & M_OPT_PARSER_OTK_STATE_CHANNEL2)
+
+#define S_OPT_PARSER_FATAL_CHANNEL3    5
+#define V_OPT_PARSER_FATAL_CHANNEL3(x) ((x) << S_OPT_PARSER_FATAL_CHANNEL3)
+#define F_OPT_PARSER_FATAL_CHANNEL3    V_OPT_PARSER_FATAL_CHANNEL3(1U)
+
+#define S_OPT_PARSER_BUSY_CHANNEL3    4
+#define V_OPT_PARSER_BUSY_CHANNEL3(x) ((x) << S_OPT_PARSER_BUSY_CHANNEL3)
+#define F_OPT_PARSER_BUSY_CHANNEL3    V_OPT_PARSER_BUSY_CHANNEL3(1U)
+
+#define S_OPT_PARSER_ITCP_STATE_CHANNEL3    2
+#define M_OPT_PARSER_ITCP_STATE_CHANNEL3    0x3U
+#define V_OPT_PARSER_ITCP_STATE_CHANNEL3(x) ((x) << S_OPT_PARSER_ITCP_STATE_CHANNEL3)
+#define G_OPT_PARSER_ITCP_STATE_CHANNEL3(x) (((x) >> S_OPT_PARSER_ITCP_STATE_CHANNEL3) & M_OPT_PARSER_ITCP_STATE_CHANNEL3)
+
+#define S_OPT_PARSER_OTK_STATE_CHANNEL3    0
+#define M_OPT_PARSER_OTK_STATE_CHANNEL3    0x3U
+#define V_OPT_PARSER_OTK_STATE_CHANNEL3(x) ((x) << S_OPT_PARSER_OTK_STATE_CHANNEL3)
+#define G_OPT_PARSER_OTK_STATE_CHANNEL3(x) (((x) >> S_OPT_PARSER_OTK_STATE_CHANNEL3) & M_OPT_PARSER_OTK_STATE_CHANNEL3)
+
+#define A_TP_DBG_ESIDE_OP_ALT 0x155
+
+#define S_OPT_PARSER_PSTATE_FATAL_CHANNEL0    29
+#define V_OPT_PARSER_PSTATE_FATAL_CHANNEL0(x) ((x) << S_OPT_PARSER_PSTATE_FATAL_CHANNEL0)
+#define F_OPT_PARSER_PSTATE_FATAL_CHANNEL0    V_OPT_PARSER_PSTATE_FATAL_CHANNEL0(1U)
+
+#define S_OPT_PARSER_PSTATE_ERRNO_CHANNEL0    24
+#define M_OPT_PARSER_PSTATE_ERRNO_CHANNEL0    0x1fU
+#define V_OPT_PARSER_PSTATE_ERRNO_CHANNEL0(x) ((x) << S_OPT_PARSER_PSTATE_ERRNO_CHANNEL0)
+#define G_OPT_PARSER_PSTATE_ERRNO_CHANNEL0(x) (((x) >> S_OPT_PARSER_PSTATE_ERRNO_CHANNEL0) & M_OPT_PARSER_PSTATE_ERRNO_CHANNEL0)
+
+#define S_OPT_PARSER_PSTATE_FATAL_CHANNEL1    21
+#define V_OPT_PARSER_PSTATE_FATAL_CHANNEL1(x) ((x) << S_OPT_PARSER_PSTATE_FATAL_CHANNEL1)
+#define F_OPT_PARSER_PSTATE_FATAL_CHANNEL1    V_OPT_PARSER_PSTATE_FATAL_CHANNEL1(1U)
+
+#define S_OPT_PARSER_PSTATE_ERRNO_CHANNEL1    16
+#define M_OPT_PARSER_PSTATE_ERRNO_CHANNEL1    0x1fU
+#define V_OPT_PARSER_PSTATE_ERRNO_CHANNEL1(x) ((x) << S_OPT_PARSER_PSTATE_ERRNO_CHANNEL1)
+#define G_OPT_PARSER_PSTATE_ERRNO_CHANNEL1(x) (((x) >> S_OPT_PARSER_PSTATE_ERRNO_CHANNEL1) & M_OPT_PARSER_PSTATE_ERRNO_CHANNEL1)
+
+#define S_OPT_PARSER_PSTATE_FATAL_CHANNEL2    13
+#define V_OPT_PARSER_PSTATE_FATAL_CHANNEL2(x) ((x) << S_OPT_PARSER_PSTATE_FATAL_CHANNEL2)
+#define F_OPT_PARSER_PSTATE_FATAL_CHANNEL2    V_OPT_PARSER_PSTATE_FATAL_CHANNEL2(1U)
+
+#define S_OPT_PARSER_PSTATE_ERRNO_CHANNEL2    8
+#define M_OPT_PARSER_PSTATE_ERRNO_CHANNEL2    0x1fU
+#define V_OPT_PARSER_PSTATE_ERRNO_CHANNEL2(x) ((x) << S_OPT_PARSER_PSTATE_ERRNO_CHANNEL2)
+#define G_OPT_PARSER_PSTATE_ERRNO_CHANNEL2(x) (((x) >> S_OPT_PARSER_PSTATE_ERRNO_CHANNEL2) & M_OPT_PARSER_PSTATE_ERRNO_CHANNEL2)
+
+#define S_OPT_PARSER_PSTATE_FATAL_CHANNEL3    5
+#define V_OPT_PARSER_PSTATE_FATAL_CHANNEL3(x) ((x) << S_OPT_PARSER_PSTATE_FATAL_CHANNEL3)
+#define F_OPT_PARSER_PSTATE_FATAL_CHANNEL3    V_OPT_PARSER_PSTATE_FATAL_CHANNEL3(1U)
+
+#define S_OPT_PARSER_PSTATE_ERRNO_CHANNEL3    0
+#define M_OPT_PARSER_PSTATE_ERRNO_CHANNEL3    0x1fU
+#define V_OPT_PARSER_PSTATE_ERRNO_CHANNEL3(x) ((x) << S_OPT_PARSER_PSTATE_ERRNO_CHANNEL3)
+#define G_OPT_PARSER_PSTATE_ERRNO_CHANNEL3(x) (((x) >> S_OPT_PARSER_PSTATE_ERRNO_CHANNEL3) & M_OPT_PARSER_PSTATE_ERRNO_CHANNEL3)
+
+#define A_TP_DBG_ESIDE_OP_BUSY 0x156
+
+#define S_OPT_PARSER_BUSY_VEC_CHANNEL3    24
+#define M_OPT_PARSER_BUSY_VEC_CHANNEL3    0xffU
+#define V_OPT_PARSER_BUSY_VEC_CHANNEL3(x) ((x) << S_OPT_PARSER_BUSY_VEC_CHANNEL3)
+#define G_OPT_PARSER_BUSY_VEC_CHANNEL3(x) (((x) >> S_OPT_PARSER_BUSY_VEC_CHANNEL3) & M_OPT_PARSER_BUSY_VEC_CHANNEL3)
+
+#define S_OPT_PARSER_BUSY_VEC_CHANNEL2    16
+#define M_OPT_PARSER_BUSY_VEC_CHANNEL2    0xffU
+#define V_OPT_PARSER_BUSY_VEC_CHANNEL2(x) ((x) << S_OPT_PARSER_BUSY_VEC_CHANNEL2)
+#define G_OPT_PARSER_BUSY_VEC_CHANNEL2(x) (((x) >> S_OPT_PARSER_BUSY_VEC_CHANNEL2) & M_OPT_PARSER_BUSY_VEC_CHANNEL2)
+
+#define S_OPT_PARSER_BUSY_VEC_CHANNEL1    8
+#define M_OPT_PARSER_BUSY_VEC_CHANNEL1    0xffU
+#define V_OPT_PARSER_BUSY_VEC_CHANNEL1(x) ((x) << S_OPT_PARSER_BUSY_VEC_CHANNEL1)
+#define G_OPT_PARSER_BUSY_VEC_CHANNEL1(x) (((x) >> S_OPT_PARSER_BUSY_VEC_CHANNEL1) & M_OPT_PARSER_BUSY_VEC_CHANNEL1)
+
+#define S_OPT_PARSER_BUSY_VEC_CHANNEL0    0
+#define M_OPT_PARSER_BUSY_VEC_CHANNEL0    0xffU
+#define V_OPT_PARSER_BUSY_VEC_CHANNEL0(x) ((x) << S_OPT_PARSER_BUSY_VEC_CHANNEL0)
+#define G_OPT_PARSER_BUSY_VEC_CHANNEL0(x) (((x) >> S_OPT_PARSER_BUSY_VEC_CHANNEL0) & M_OPT_PARSER_BUSY_VEC_CHANNEL0)
+
+#define A_TP_DBG_ESIDE_OP_COOKIE 0x157
+
+#define S_OPT_PARSER_COOKIE_CHANNEL3    24
+#define M_OPT_PARSER_COOKIE_CHANNEL3    0xffU
+#define V_OPT_PARSER_COOKIE_CHANNEL3(x) ((x) << S_OPT_PARSER_COOKIE_CHANNEL3)
+#define G_OPT_PARSER_COOKIE_CHANNEL3(x) (((x) >> S_OPT_PARSER_COOKIE_CHANNEL3) & M_OPT_PARSER_COOKIE_CHANNEL3)
+
+#define S_OPT_PARSER_COOKIE_CHANNEL2    16
+#define M_OPT_PARSER_COOKIE_CHANNEL2    0xffU
+#define V_OPT_PARSER_COOKIE_CHANNEL2(x) ((x) << S_OPT_PARSER_COOKIE_CHANNEL2)
+#define G_OPT_PARSER_COOKIE_CHANNEL2(x) (((x) >> S_OPT_PARSER_COOKIE_CHANNEL2) & M_OPT_PARSER_COOKIE_CHANNEL2)
+
+#define S_OPT_PARSER_COOKIE_CHANNEL1    8
+#define M_OPT_PARSER_COOKIE_CHANNEL1    0xffU
+#define V_OPT_PARSER_COOKIE_CHANNEL1(x) ((x) << S_OPT_PARSER_COOKIE_CHANNEL1)
+#define G_OPT_PARSER_COOKIE_CHANNEL1(x) (((x) >> S_OPT_PARSER_COOKIE_CHANNEL1) & M_OPT_PARSER_COOKIE_CHANNEL1)
+
+#define S_OPT_PARSER_COOKIE_CHANNEL0    0
+#define M_OPT_PARSER_COOKIE_CHANNEL0    0xffU
+#define V_OPT_PARSER_COOKIE_CHANNEL0(x) ((x) << S_OPT_PARSER_COOKIE_CHANNEL0)
+#define G_OPT_PARSER_COOKIE_CHANNEL0(x) (((x) >> S_OPT_PARSER_COOKIE_CHANNEL0) & M_OPT_PARSER_COOKIE_CHANNEL0)
+
+#define A_TP_DBG_ESIDE_DEMUX_WAIT0 0x158
+#define A_TP_DBG_ESIDE_DEMUX_WAIT1 0x159
+#define A_TP_DBG_ESIDE_DEMUX_CNT0 0x15a
+#define A_TP_DBG_ESIDE_DEMUX_CNT1 0x15b
+#define A_TP_ESIDE_CONFIG 0x160
+
+#define S_VNI_EN    26
+#define V_VNI_EN(x) ((x) << S_VNI_EN)
+#define F_VNI_EN    V_VNI_EN(1U)
+
+#define S_ENC_RX_EN    25
+#define V_ENC_RX_EN(x) ((x) << S_ENC_RX_EN)
+#define F_ENC_RX_EN    V_ENC_RX_EN(1U)
+
+#define S_TNL_LKP_INNER_SEL    24
+#define V_TNL_LKP_INNER_SEL(x) ((x) << S_TNL_LKP_INNER_SEL)
+#define F_TNL_LKP_INNER_SEL    V_TNL_LKP_INNER_SEL(1U)
+
+#define S_ROCEV2UDPPORT    0
+#define M_ROCEV2UDPPORT    0xffffU
+#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT)
+#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT)
+
 #define A_TP_DBG_CSIDE_RX0 0x230
 
 #define S_CRXSOPCNT    28
@@ -13166,7 +27899,116 @@
 #define V_CMD_SEL(x) ((x) << S_CMD_SEL)
 #define F_CMD_SEL    V_CMD_SEL(1U)
 
+#define S_T5_TXFULL    31
+#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
+#define F_T5_TXFULL    V_T5_TXFULL(1U)
+
+#define S_CPL5RXFULL    26
+#define V_CPL5RXFULL(x) ((x) << S_CPL5RXFULL)
+#define F_CPL5RXFULL    V_CPL5RXFULL(1U)
+
+#define S_T5_PLD_RXZEROP_SRDY    25
+#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
+#define F_T5_PLD_RXZEROP_SRDY    V_T5_PLD_RXZEROP_SRDY(1U)
+
+#define S_PLD2XRXVALID    23
+#define V_PLD2XRXVALID(x) ((x) << S_PLD2XRXVALID)
+#define F_PLD2XRXVALID    V_PLD2XRXVALID(1U)
+
+#define S_T5_DDP_SRDY    22
+#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
+#define F_T5_DDP_SRDY    V_T5_DDP_SRDY(1U)
+
+#define S_T5_DDP_DRDY    21
+#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
+#define F_T5_DDP_DRDY    V_T5_DDP_DRDY(1U)
+
+#define S_DDPSTATE    16
+#define M_DDPSTATE    0x1fU
+#define V_DDPSTATE(x) ((x) << S_DDPSTATE)
+#define G_DDPSTATE(x) (((x) >> S_DDPSTATE) & M_DDPSTATE)
+
+#define S_DDPMSGCODE    12
+#define M_DDPMSGCODE    0xfU
+#define V_DDPMSGCODE(x) ((x) << S_DDPMSGCODE)
+#define G_DDPMSGCODE(x) (((x) >> S_DDPMSGCODE) & M_DDPMSGCODE)
+
+#define S_CPL5SOCPCNT    8
+#define M_CPL5SOCPCNT    0xfU
+#define V_CPL5SOCPCNT(x) ((x) << S_CPL5SOCPCNT)
+#define G_CPL5SOCPCNT(x) (((x) >> S_CPL5SOCPCNT) & M_CPL5SOCPCNT)
+
+#define S_PLDRXZEROPCNT    4
+#define M_PLDRXZEROPCNT    0xfU
+#define V_PLDRXZEROPCNT(x) ((x) << S_PLDRXZEROPCNT)
+#define G_PLDRXZEROPCNT(x) (((x) >> S_PLDRXZEROPCNT) & M_PLDRXZEROPCNT)
+
+#define S_TXFRMERR2    3
+#define V_TXFRMERR2(x) ((x) << S_TXFRMERR2)
+#define F_TXFRMERR2    V_TXFRMERR2(1U)
+
+#define S_TXFRMERR1    2
+#define V_TXFRMERR1(x) ((x) << S_TXFRMERR1)
+#define F_TXFRMERR1    V_TXFRMERR1(1U)
+
+#define S_TXVALID2X    1
+#define V_TXVALID2X(x) ((x) << S_TXVALID2X)
+#define F_TXVALID2X    V_TXVALID2X(1U)
+
+#define S_TXFULL2X    0
+#define V_TXFULL2X(x) ((x) << S_TXFULL2X)
+#define F_TXFULL2X    V_TXFULL2X(1U)
+
+#define S_T6_TXFULL    31
+#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
+#define F_T6_TXFULL    V_T6_TXFULL(1U)
+
+#define S_T6_PLD_RXZEROP_SRDY    25
+#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
+#define F_T6_PLD_RXZEROP_SRDY    V_T6_PLD_RXZEROP_SRDY(1U)
+
+#define S_T6_DDP_SRDY    22
+#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
+#define F_T6_DDP_SRDY    V_T6_DDP_SRDY(1U)
+
+#define S_T6_DDP_DRDY    21
+#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
+#define F_T6_DDP_DRDY    V_T6_DDP_DRDY(1U)
+
 #define A_TP_DBG_CSIDE_DISP1 0x23b
+
+#define S_T5_TXFULL    31
+#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
+#define F_T5_TXFULL    V_T5_TXFULL(1U)
+
+#define S_T5_PLD_RXZEROP_SRDY    25
+#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
+#define F_T5_PLD_RXZEROP_SRDY    V_T5_PLD_RXZEROP_SRDY(1U)
+
+#define S_T5_DDP_SRDY    22
+#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
+#define F_T5_DDP_SRDY    V_T5_DDP_SRDY(1U)
+
+#define S_T5_DDP_DRDY    21
+#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
+#define F_T5_DDP_DRDY    V_T5_DDP_DRDY(1U)
+
+#define S_T6_TXFULL    31
+#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
+#define F_T6_TXFULL    V_T6_TXFULL(1U)
+
+#define S_T6_PLD_RXZEROP_SRDY    25
+#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
+#define F_T6_PLD_RXZEROP_SRDY    V_T6_PLD_RXZEROP_SRDY(1U)
+
+#define S_T6_DDP_SRDY    22
+#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
+#define F_T6_DDP_SRDY    V_T6_DDP_SRDY(1U)
+
+#define S_T6_DDP_DRDY    21
+#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
+#define F_T6_DDP_DRDY    V_T6_DDP_DRDY(1U)
+
 #define A_TP_DBG_CSIDE_DDP0 0x23c
 
 #define S_DDPMSGLATEST7    28
@@ -13365,6 +28207,52 @@
 #define V_WRITEZEROOP(x) ((x) << S_WRITEZEROOP)
 #define G_WRITEZEROOP(x) (((x) >> S_WRITEZEROOP) & M_WRITEZEROOP)
 
+#define S_STARTSKIPPLD    7
+#define V_STARTSKIPPLD(x) ((x) << S_STARTSKIPPLD)
+#define F_STARTSKIPPLD    V_STARTSKIPPLD(1U)
+
+#define S_ATOMICCMDEN    5
+#define V_ATOMICCMDEN(x) ((x) << S_ATOMICCMDEN)
+#define F_ATOMICCMDEN    V_ATOMICCMDEN(1U)
+
+#define S_ISCSICMDMODE    28
+#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE)
+#define F_ISCSICMDMODE    V_ISCSICMDMODE(1U)
+
+#define A_TP_CSPI_POWER 0x243
+
+#define S_GATECHNTX3    11
+#define V_GATECHNTX3(x) ((x) << S_GATECHNTX3)
+#define F_GATECHNTX3    V_GATECHNTX3(1U)
+
+#define S_GATECHNTX2    10
+#define V_GATECHNTX2(x) ((x) << S_GATECHNTX2)
+#define F_GATECHNTX2    V_GATECHNTX2(1U)
+
+#define S_GATECHNTX1    9
+#define V_GATECHNTX1(x) ((x) << S_GATECHNTX1)
+#define F_GATECHNTX1    V_GATECHNTX1(1U)
+
+#define S_GATECHNTX0    8
+#define V_GATECHNTX0(x) ((x) << S_GATECHNTX0)
+#define F_GATECHNTX0    V_GATECHNTX0(1U)
+
+#define S_GATECHNRX1    7
+#define V_GATECHNRX1(x) ((x) << S_GATECHNRX1)
+#define F_GATECHNRX1    V_GATECHNRX1(1U)
+
+#define S_GATECHNRX0    6
+#define V_GATECHNRX0(x) ((x) << S_GATECHNRX0)
+#define F_GATECHNRX0    V_GATECHNRX0(1U)
+
+#define S_SLEEPRDYUTRN    4
+#define V_SLEEPRDYUTRN(x) ((x) << S_SLEEPRDYUTRN)
+#define F_SLEEPRDYUTRN    V_SLEEPRDYUTRN(1U)
+
+#define S_SLEEPREQUTRN    0
+#define V_SLEEPREQUTRN(x) ((x) << S_SLEEPREQUTRN)
+#define F_SLEEPREQUTRN    V_SLEEPREQUTRN(1U)
+
 #define A_TP_TRC_CONFIG 0x244
 
 #define S_TRCRR    1
@@ -13404,6 +28292,110 @@
 #define V_CPRSSTATE0(x) ((x) << S_CPRSSTATE0)
 #define G_CPRSSTATE0(x) (((x) >> S_CPRSSTATE0) & M_CPRSSTATE0)
 
+#define S_C4TUPBUSY3    31
+#define V_C4TUPBUSY3(x) ((x) << S_C4TUPBUSY3)
+#define F_C4TUPBUSY3    V_C4TUPBUSY3(1U)
+
+#define S_CDBVALID3    30
+#define V_CDBVALID3(x) ((x) << S_CDBVALID3)
+#define F_CDBVALID3    V_CDBVALID3(1U)
+
+#define S_CRXVALID3    29
+#define V_CRXVALID3(x) ((x) << S_CRXVALID3)
+#define F_CRXVALID3    V_CRXVALID3(1U)
+
+#define S_CRXFULL3    28
+#define V_CRXFULL3(x) ((x) << S_CRXFULL3)
+#define F_CRXFULL3    V_CRXFULL3(1U)
+
+#define S_T5_CPRSSTATE3    24
+#define M_T5_CPRSSTATE3    0xfU
+#define V_T5_CPRSSTATE3(x) ((x) << S_T5_CPRSSTATE3)
+#define G_T5_CPRSSTATE3(x) (((x) >> S_T5_CPRSSTATE3) & M_T5_CPRSSTATE3)
+
+#define S_C4TUPBUSY2    23
+#define V_C4TUPBUSY2(x) ((x) << S_C4TUPBUSY2)
+#define F_C4TUPBUSY2    V_C4TUPBUSY2(1U)
+
+#define S_CDBVALID2    22
+#define V_CDBVALID2(x) ((x) << S_CDBVALID2)
+#define F_CDBVALID2    V_CDBVALID2(1U)
+
+#define S_CRXVALID2    21
+#define V_CRXVALID2(x) ((x) << S_CRXVALID2)
+#define F_CRXVALID2    V_CRXVALID2(1U)
+
+#define S_CRXFULL2    20
+#define V_CRXFULL2(x) ((x) << S_CRXFULL2)
+#define F_CRXFULL2    V_CRXFULL2(1U)
+
+#define S_T5_CPRSSTATE2    16
+#define M_T5_CPRSSTATE2    0xfU
+#define V_T5_CPRSSTATE2(x) ((x) << S_T5_CPRSSTATE2)
+#define G_T5_CPRSSTATE2(x) (((x) >> S_T5_CPRSSTATE2) & M_T5_CPRSSTATE2)
+
+#define S_C4TUPBUSY1    15
+#define V_C4TUPBUSY1(x) ((x) << S_C4TUPBUSY1)
+#define F_C4TUPBUSY1    V_C4TUPBUSY1(1U)
+
+#define S_CDBVALID1    14
+#define V_CDBVALID1(x) ((x) << S_CDBVALID1)
+#define F_CDBVALID1    V_CDBVALID1(1U)
+
+#define S_CRXVALID1    13
+#define V_CRXVALID1(x) ((x) << S_CRXVALID1)
+#define F_CRXVALID1    V_CRXVALID1(1U)
+
+#define S_CRXFULL1    12
+#define V_CRXFULL1(x) ((x) << S_CRXFULL1)
+#define F_CRXFULL1    V_CRXFULL1(1U)
+
+#define S_T5_CPRSSTATE1    8
+#define M_T5_CPRSSTATE1    0xfU
+#define V_T5_CPRSSTATE1(x) ((x) << S_T5_CPRSSTATE1)
+#define G_T5_CPRSSTATE1(x) (((x) >> S_T5_CPRSSTATE1) & M_T5_CPRSSTATE1)
+
+#define S_C4TUPBUSY0    7
+#define V_C4TUPBUSY0(x) ((x) << S_C4TUPBUSY0)
+#define F_C4TUPBUSY0    V_C4TUPBUSY0(1U)
+
+#define S_CDBVALID0    6
+#define V_CDBVALID0(x) ((x) << S_CDBVALID0)
+#define F_CDBVALID0    V_CDBVALID0(1U)
+
+#define S_CRXVALID0    5
+#define V_CRXVALID0(x) ((x) << S_CRXVALID0)
+#define F_CRXVALID0    V_CRXVALID0(1U)
+
+#define S_CRXFULL0    4
+#define V_CRXFULL0(x) ((x) << S_CRXFULL0)
+#define F_CRXFULL0    V_CRXFULL0(1U)
+
+#define S_T5_CPRSSTATE0    0
+#define M_T5_CPRSSTATE0    0xfU
+#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0)
+#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0)
+
+#define S_T6_CPRSSTATE3    24
+#define M_T6_CPRSSTATE3    0xfU
+#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3)
+#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3)
+
+#define S_T6_CPRSSTATE2    16
+#define M_T6_CPRSSTATE2    0xfU
+#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2)
+#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2)
+
+#define S_T6_CPRSSTATE1    8
+#define M_T6_CPRSSTATE1    0xfU
+#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1)
+#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1)
+
+#define S_T6_CPRSSTATE0    0
+#define M_T6_CPRSSTATE0    0xfU
+#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0)
+#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0)
+
 #define A_TP_DBG_CSIDE_DEMUX 0x247
 
 #define S_CALLDONE    28
@@ -13446,6 +28438,195 @@
 #define V_CTXPKTCSUMDONE(x) ((x) << S_CTXPKTCSUMDONE)
 #define G_CTXPKTCSUMDONE(x) (((x) >> S_CTXPKTCSUMDONE) & M_CTXPKTCSUMDONE)
 
+#define S_CARBVALID    28
+#define M_CARBVALID    0xfU
+#define V_CARBVALID(x) ((x) << S_CARBVALID)
+#define G_CARBVALID(x) (((x) >> S_CARBVALID) & M_CARBVALID)
+
+#define S_CCPL5DONE    24
+#define M_CCPL5DONE    0xfU
+#define V_CCPL5DONE(x) ((x) << S_CCPL5DONE)
+#define G_CCPL5DONE(x) (((x) >> S_CCPL5DONE) & M_CCPL5DONE)
+
+#define S_CTCPOPDONE    12
+#define M_CTCPOPDONE    0xfU
+#define V_CTCPOPDONE(x) ((x) << S_CTCPOPDONE)
+#define G_CTCPOPDONE(x) (((x) >> S_CTCPOPDONE) & M_CTCPOPDONE)
+
+#define A_TP_DBG_CSIDE_ARBIT 0x248
+
+#define S_CPLVALID3    31
+#define V_CPLVALID3(x) ((x) << S_CPLVALID3)
+#define F_CPLVALID3    V_CPLVALID3(1U)
+
+#define S_PLDVALID3    30
+#define V_PLDVALID3(x) ((x) << S_PLDVALID3)
+#define F_PLDVALID3    V_PLDVALID3(1U)
+
+#define S_CRCVALID3    29
+#define V_CRCVALID3(x) ((x) << S_CRCVALID3)
+#define F_CRCVALID3    V_CRCVALID3(1U)
+
+#define S_ISSVALID3    28
+#define V_ISSVALID3(x) ((x) << S_ISSVALID3)
+#define F_ISSVALID3    V_ISSVALID3(1U)
+
+#define S_DBVALID3    27
+#define V_DBVALID3(x) ((x) << S_DBVALID3)
+#define F_DBVALID3    V_DBVALID3(1U)
+
+#define S_CHKVALID3    26
+#define V_CHKVALID3(x) ((x) << S_CHKVALID3)
+#define F_CHKVALID3    V_CHKVALID3(1U)
+
+#define S_ZRPVALID3    25
+#define V_ZRPVALID3(x) ((x) << S_ZRPVALID3)
+#define F_ZRPVALID3    V_ZRPVALID3(1U)
+
+#define S_ERRVALID3    24
+#define V_ERRVALID3(x) ((x) << S_ERRVALID3)
+#define F_ERRVALID3    V_ERRVALID3(1U)
+
+#define S_CPLVALID2    23
+#define V_CPLVALID2(x) ((x) << S_CPLVALID2)
+#define F_CPLVALID2    V_CPLVALID2(1U)
+
+#define S_PLDVALID2    22
+#define V_PLDVALID2(x) ((x) << S_PLDVALID2)
+#define F_PLDVALID2    V_PLDVALID2(1U)
+
+#define S_CRCVALID2    21
+#define V_CRCVALID2(x) ((x) << S_CRCVALID2)
+#define F_CRCVALID2    V_CRCVALID2(1U)
+
+#define S_ISSVALID2    20
+#define V_ISSVALID2(x) ((x) << S_ISSVALID2)
+#define F_ISSVALID2    V_ISSVALID2(1U)
+
+#define S_DBVALID2    19
+#define V_DBVALID2(x) ((x) << S_DBVALID2)
+#define F_DBVALID2    V_DBVALID2(1U)
+
+#define S_CHKVALID2    18
+#define V_CHKVALID2(x) ((x) << S_CHKVALID2)
+#define F_CHKVALID2    V_CHKVALID2(1U)
+
+#define S_ZRPVALID2    17
+#define V_ZRPVALID2(x) ((x) << S_ZRPVALID2)
+#define F_ZRPVALID2    V_ZRPVALID2(1U)
+
+#define S_ERRVALID2    16
+#define V_ERRVALID2(x) ((x) << S_ERRVALID2)
+#define F_ERRVALID2    V_ERRVALID2(1U)
+
+#define S_CPLVALID1    15
+#define V_CPLVALID1(x) ((x) << S_CPLVALID1)
+#define F_CPLVALID1    V_CPLVALID1(1U)
+
+#define S_PLDVALID1    14
+#define V_PLDVALID1(x) ((x) << S_PLDVALID1)
+#define F_PLDVALID1    V_PLDVALID1(1U)
+
+#define S_CRCVALID1    13
+#define V_CRCVALID1(x) ((x) << S_CRCVALID1)
+#define F_CRCVALID1    V_CRCVALID1(1U)
+
+#define S_ISSVALID1    12
+#define V_ISSVALID1(x) ((x) << S_ISSVALID1)
+#define F_ISSVALID1    V_ISSVALID1(1U)
+
+#define S_DBVALID1    11
+#define V_DBVALID1(x) ((x) << S_DBVALID1)
+#define F_DBVALID1    V_DBVALID1(1U)
+
+#define S_CHKVALID1    10
+#define V_CHKVALID1(x) ((x) << S_CHKVALID1)
+#define F_CHKVALID1    V_CHKVALID1(1U)
+
+#define S_ZRPVALID1    9
+#define V_ZRPVALID1(x) ((x) << S_ZRPVALID1)
+#define F_ZRPVALID1    V_ZRPVALID1(1U)
+
+#define S_ERRVALID1    8
+#define V_ERRVALID1(x) ((x) << S_ERRVALID1)
+#define F_ERRVALID1    V_ERRVALID1(1U)
+
+#define S_CPLVALID0    7
+#define V_CPLVALID0(x) ((x) << S_CPLVALID0)
+#define F_CPLVALID0    V_CPLVALID0(1U)
+
+#define S_PLDVALID0    6
+#define V_PLDVALID0(x) ((x) << S_PLDVALID0)
+#define F_PLDVALID0    V_PLDVALID0(1U)
+
+#define S_CRCVALID0    5
+#define V_CRCVALID0(x) ((x) << S_CRCVALID0)
+#define F_CRCVALID0    V_CRCVALID0(1U)
+
+#define S_ISSVALID0    4
+#define V_ISSVALID0(x) ((x) << S_ISSVALID0)
+#define F_ISSVALID0    V_ISSVALID0(1U)
+
+#define S_DBVALID0    3
+#define V_DBVALID0(x) ((x) << S_DBVALID0)
+#define F_DBVALID0    V_DBVALID0(1U)
+
+#define S_CHKVALID0    2
+#define V_CHKVALID0(x) ((x) << S_CHKVALID0)
+#define F_CHKVALID0    V_CHKVALID0(1U)
+
+#define S_ZRPVALID0    1
+#define V_ZRPVALID0(x) ((x) << S_ZRPVALID0)
+#define F_ZRPVALID0    V_ZRPVALID0(1U)
+
+#define S_ERRVALID0    0
+#define V_ERRVALID0(x) ((x) << S_ERRVALID0)
+#define F_ERRVALID0    V_ERRVALID0(1U)
+
+#define A_TP_DBG_CSIDE_TRACE_CNT 0x24a
+
+#define S_TRCSOPCNT    24
+#define M_TRCSOPCNT    0xffU
+#define V_TRCSOPCNT(x) ((x) << S_TRCSOPCNT)
+#define G_TRCSOPCNT(x) (((x) >> S_TRCSOPCNT) & M_TRCSOPCNT)
+
+#define S_TRCEOPCNT    16
+#define M_TRCEOPCNT    0xffU
+#define V_TRCEOPCNT(x) ((x) << S_TRCEOPCNT)
+#define G_TRCEOPCNT(x) (((x) >> S_TRCEOPCNT) & M_TRCEOPCNT)
+
+#define S_TRCFLTHIT    12
+#define M_TRCFLTHIT    0xfU
+#define V_TRCFLTHIT(x) ((x) << S_TRCFLTHIT)
+#define G_TRCFLTHIT(x) (((x) >> S_TRCFLTHIT) & M_TRCFLTHIT)
+
+#define S_TRCRNTPKT    8
+#define M_TRCRNTPKT    0xfU
+#define V_TRCRNTPKT(x) ((x) << S_TRCRNTPKT)
+#define G_TRCRNTPKT(x) (((x) >> S_TRCRNTPKT) & M_TRCRNTPKT)
+
+#define S_TRCPKTLEN    0
+#define M_TRCPKTLEN    0xffU
+#define V_TRCPKTLEN(x) ((x) << S_TRCPKTLEN)
+#define G_TRCPKTLEN(x) (((x) >> S_TRCPKTLEN) & M_TRCPKTLEN)
+
+#define A_TP_DBG_CSIDE_TRACE_RSS 0x24b
+#define A_TP_VLN_CONFIG 0x24c
+
+#define S_ETHTYPEQINQ    16
+#define M_ETHTYPEQINQ    0xffffU
+#define V_ETHTYPEQINQ(x) ((x) << S_ETHTYPEQINQ)
+#define G_ETHTYPEQINQ(x) (((x) >> S_ETHTYPEQINQ) & M_ETHTYPEQINQ)
+
+#define S_ETHTYPEVLAN    0
+#define M_ETHTYPEVLAN    0xffffU
+#define V_ETHTYPEVLAN(x) ((x) << S_ETHTYPEVLAN)
+#define G_ETHTYPEVLAN(x) (((x) >> S_ETHTYPEVLAN) & M_ETHTYPEVLAN)
+
+#define A_TP_DBG_CSIDE_ARBIT_WAIT0 0x24d
+#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e
+#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f
+#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250
 #define A_TP_FIFO_CONFIG 0x8c0
 
 #define S_CH1_OUTPUT    27
@@ -13573,12 +28754,20 @@
 #define A_TP_MIB_TID_INV 0x61
 #define A_TP_MIB_TID_ACT 0x62
 #define A_TP_MIB_TID_PAS 0x63
-#define A_TP_MIB_RQE_DFR_MOD 0x64
-#define A_TP_MIB_RQE_DFR_PKT 0x65
+#define A_TP_MIB_RQE_DFR_PKT 0x64
+#define A_TP_MIB_RQE_DFR_MOD 0x65
 #define A_TP_MIB_CPL_OUT_ERR_0 0x68
 #define A_TP_MIB_CPL_OUT_ERR_1 0x69
 #define A_TP_MIB_CPL_OUT_ERR_2 0x6a
 #define A_TP_MIB_CPL_OUT_ERR_3 0x6b
+#define A_TP_MIB_ENG_LINE_0 0x6c
+#define A_TP_MIB_ENG_LINE_1 0x6d
+#define A_TP_MIB_ENG_LINE_2 0x6e
+#define A_TP_MIB_ENG_LINE_3 0x6f
+#define A_TP_MIB_TNL_ERR_0 0x70
+#define A_TP_MIB_TNL_ERR_1 0x71
+#define A_TP_MIB_TNL_ERR_2 0x72
+#define A_TP_MIB_TNL_ERR_3 0x73
 
 /* registers for module ULP_TX */
 #define ULP_TX_BASE_ADDR 0x8dc0
@@ -13597,6 +28786,70 @@
 #define V_EXTRA_TAG_INSERTION_ENABLE(x) ((x) << S_EXTRA_TAG_INSERTION_ENABLE)
 #define F_EXTRA_TAG_INSERTION_ENABLE    V_EXTRA_TAG_INSERTION_ENABLE(1U)
 
+#define S_PHYS_ADDR_RESP_EN    6
+#define V_PHYS_ADDR_RESP_EN(x) ((x) << S_PHYS_ADDR_RESP_EN)
+#define F_PHYS_ADDR_RESP_EN    V_PHYS_ADDR_RESP_EN(1U)
+
+#define S_ENDIANESS_CHANGE    5
+#define V_ENDIANESS_CHANGE(x) ((x) << S_ENDIANESS_CHANGE)
+#define F_ENDIANESS_CHANGE    V_ENDIANESS_CHANGE(1U)
+
+#define S_ERR_RTAG_EN    4
+#define V_ERR_RTAG_EN(x) ((x) << S_ERR_RTAG_EN)
+#define F_ERR_RTAG_EN    V_ERR_RTAG_EN(1U)
+
+#define S_TSO_ETHLEN_EN    3
+#define V_TSO_ETHLEN_EN(x) ((x) << S_TSO_ETHLEN_EN)
+#define F_TSO_ETHLEN_EN    V_TSO_ETHLEN_EN(1U)
+
+#define S_EMSG_MORE_INFO    2
+#define V_EMSG_MORE_INFO(x) ((x) << S_EMSG_MORE_INFO)
+#define F_EMSG_MORE_INFO    V_EMSG_MORE_INFO(1U)
+
+#define S_LOSDR    1
+#define V_LOSDR(x) ((x) << S_LOSDR)
+#define F_LOSDR    V_LOSDR(1U)
+
+#define S_ULIMIT_EXCLUSIVE_FIX    16
+#define V_ULIMIT_EXCLUSIVE_FIX(x) ((x) << S_ULIMIT_EXCLUSIVE_FIX)
+#define F_ULIMIT_EXCLUSIVE_FIX    V_ULIMIT_EXCLUSIVE_FIX(1U)
+
+#define S_ISO_A_FLAG_EN    15
+#define V_ISO_A_FLAG_EN(x) ((x) << S_ISO_A_FLAG_EN)
+#define F_ISO_A_FLAG_EN    V_ISO_A_FLAG_EN(1U)
+
+#define S_IWARP_SEQ_FLIT_DIS    14
+#define V_IWARP_SEQ_FLIT_DIS(x) ((x) << S_IWARP_SEQ_FLIT_DIS)
+#define F_IWARP_SEQ_FLIT_DIS    V_IWARP_SEQ_FLIT_DIS(1U)
+
+#define S_MR_SIZE_FIX_EN    13
+#define V_MR_SIZE_FIX_EN(x) ((x) << S_MR_SIZE_FIX_EN)
+#define F_MR_SIZE_FIX_EN    V_MR_SIZE_FIX_EN(1U)
+
+#define S_T10_ISO_FIX_EN    12
+#define V_T10_ISO_FIX_EN(x) ((x) << S_T10_ISO_FIX_EN)
+#define F_T10_ISO_FIX_EN    V_T10_ISO_FIX_EN(1U)
+
+#define S_CPL_FLAGS_UPDATE_EN    11
+#define V_CPL_FLAGS_UPDATE_EN(x) ((x) << S_CPL_FLAGS_UPDATE_EN)
+#define F_CPL_FLAGS_UPDATE_EN    V_CPL_FLAGS_UPDATE_EN(1U)
+
+#define S_IWARP_SEQ_UPDATE_EN    10
+#define V_IWARP_SEQ_UPDATE_EN(x) ((x) << S_IWARP_SEQ_UPDATE_EN)
+#define F_IWARP_SEQ_UPDATE_EN    V_IWARP_SEQ_UPDATE_EN(1U)
+
+#define S_SEQ_UPDATE_EN    9
+#define V_SEQ_UPDATE_EN(x) ((x) << S_SEQ_UPDATE_EN)
+#define F_SEQ_UPDATE_EN    V_SEQ_UPDATE_EN(1U)
+
+#define S_ERR_ITT_EN    8
+#define V_ERR_ITT_EN(x) ((x) << S_ERR_ITT_EN)
+#define F_ERR_ITT_EN    V_ERR_ITT_EN(1U)
+
+#define S_ATOMIC_FIX_DIS    7
+#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS)
+#define F_ATOMIC_FIX_DIS    V_ATOMIC_FIX_DIS(1U)
+
 #define A_ULP_TX_PERR_INJECT 0x8dc4
 #define A_ULP_TX_INT_ENABLE 0x8dc8
 
@@ -13735,6 +28988,28 @@
 #define A_ULP_TX_PBL_LLIMIT 0x8ddc
 #define A_ULP_TX_PBL_ULIMIT 0x8de0
 #define A_ULP_TX_CPL_ERR_OFFSET 0x8de4
+#define A_ULP_TX_TLS_CTL 0x8de4
+
+#define S_TLSPERREN    4
+#define V_TLSPERREN(x) ((x) << S_TLSPERREN)
+#define F_TLSPERREN    V_TLSPERREN(1U)
+
+#define S_TLSPATHCTL    3
+#define V_TLSPATHCTL(x) ((x) << S_TLSPATHCTL)
+#define F_TLSPATHCTL    V_TLSPATHCTL(1U)
+
+#define S_TLSDISABLEIFUSE    2
+#define V_TLSDISABLEIFUSE(x) ((x) << S_TLSDISABLEIFUSE)
+#define F_TLSDISABLEIFUSE    V_TLSDISABLEIFUSE(1U)
+
+#define S_TLSDISABLECFUSE    1
+#define V_TLSDISABLECFUSE(x) ((x) << S_TLSDISABLECFUSE)
+#define F_TLSDISABLECFUSE    V_TLSDISABLECFUSE(1U)
+
+#define S_TLSDISABLE    0
+#define V_TLSDISABLE(x) ((x) << S_TLSDISABLE)
+#define F_TLSDISABLE    V_TLSDISABLE(1U)
+
 #define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
 #define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
 #define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
@@ -13813,25 +29088,300 @@
 #define V_ERR_CNT3(x) ((x) << S_ERR_CNT3)
 #define G_ERR_CNT3(x) (((x) >> S_ERR_CNT3) & M_ERR_CNT3)
 
+#define A_ULP_TX_FC_SOF 0x8e20
+
+#define S_SOF_FS3    24
+#define M_SOF_FS3    0xffU
+#define V_SOF_FS3(x) ((x) << S_SOF_FS3)
+#define G_SOF_FS3(x) (((x) >> S_SOF_FS3) & M_SOF_FS3)
+
+#define S_SOF_FS2    16
+#define M_SOF_FS2    0xffU
+#define V_SOF_FS2(x) ((x) << S_SOF_FS2)
+#define G_SOF_FS2(x) (((x) >> S_SOF_FS2) & M_SOF_FS2)
+
+#define S_SOF_3    8
+#define M_SOF_3    0xffU
+#define V_SOF_3(x) ((x) << S_SOF_3)
+#define G_SOF_3(x) (((x) >> S_SOF_3) & M_SOF_3)
+
+#define S_SOF_2    0
+#define M_SOF_2    0xffU
+#define V_SOF_2(x) ((x) << S_SOF_2)
+#define G_SOF_2(x) (((x) >> S_SOF_2) & M_SOF_2)
+
+#define A_ULP_TX_FC_EOF 0x8e24
+
+#define S_EOF_LS3    24
+#define M_EOF_LS3    0xffU
+#define V_EOF_LS3(x) ((x) << S_EOF_LS3)
+#define G_EOF_LS3(x) (((x) >> S_EOF_LS3) & M_EOF_LS3)
+
+#define S_EOF_LS2    16
+#define M_EOF_LS2    0xffU
+#define V_EOF_LS2(x) ((x) << S_EOF_LS2)
+#define G_EOF_LS2(x) (((x) >> S_EOF_LS2) & M_EOF_LS2)
+
+#define S_EOF_3    8
+#define M_EOF_3    0xffU
+#define V_EOF_3(x) ((x) << S_EOF_3)
+#define G_EOF_3(x) (((x) >> S_EOF_3) & M_EOF_3)
+
+#define S_EOF_2    0
+#define M_EOF_2    0xffU
+#define V_EOF_2(x) ((x) << S_EOF_2)
+#define G_EOF_2(x) (((x) >> S_EOF_2) & M_EOF_2)
+
+#define A_ULP_TX_CGEN_GLOBAL 0x8e28
+
+#define S_ULP_TX_GLOBAL_CGEN    0
+#define V_ULP_TX_GLOBAL_CGEN(x) ((x) << S_ULP_TX_GLOBAL_CGEN)
+#define F_ULP_TX_GLOBAL_CGEN    V_ULP_TX_GLOBAL_CGEN(1U)
+
+#define A_ULP_TX_CGEN 0x8e2c
+
+#define S_ULP_TX_CGEN_STORAGE    8
+#define M_ULP_TX_CGEN_STORAGE    0xfU
+#define V_ULP_TX_CGEN_STORAGE(x) ((x) << S_ULP_TX_CGEN_STORAGE)
+#define G_ULP_TX_CGEN_STORAGE(x) (((x) >> S_ULP_TX_CGEN_STORAGE) & M_ULP_TX_CGEN_STORAGE)
+
+#define S_ULP_TX_CGEN_RDMA    4
+#define M_ULP_TX_CGEN_RDMA    0xfU
+#define V_ULP_TX_CGEN_RDMA(x) ((x) << S_ULP_TX_CGEN_RDMA)
+#define G_ULP_TX_CGEN_RDMA(x) (((x) >> S_ULP_TX_CGEN_RDMA) & M_ULP_TX_CGEN_RDMA)
+
+#define S_ULP_TX_CGEN_CHANNEL    0
+#define M_ULP_TX_CGEN_CHANNEL    0xfU
+#define V_ULP_TX_CGEN_CHANNEL(x) ((x) << S_ULP_TX_CGEN_CHANNEL)
+#define G_ULP_TX_CGEN_CHANNEL(x) (((x) >> S_ULP_TX_CGEN_CHANNEL) & M_ULP_TX_CGEN_CHANNEL)
+
 #define A_ULP_TX_ULP2TP_BIST_CMD 0x8e30
+#define A_ULP_TX_MEM_CFG 0x8e30
+
+#define S_WRREQ_SZ    0
+#define M_WRREQ_SZ    0x7U
+#define V_WRREQ_SZ(x) ((x) << S_WRREQ_SZ)
+#define G_WRREQ_SZ(x) (((x) >> S_WRREQ_SZ) & M_WRREQ_SZ)
+
 #define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
+#define A_ULP_TX_PERR_INJECT_2 0x8e34
+
+#define S_T5_MEMSEL    1
+#define M_T5_MEMSEL    0x7U
+#define V_T5_MEMSEL(x) ((x) << S_T5_MEMSEL)
+#define G_T5_MEMSEL(x) (((x) >> S_T5_MEMSEL) & M_T5_MEMSEL)
+
+#define S_MEMSEL_ULPTX    1
+#define M_MEMSEL_ULPTX    0x1fU
+#define V_MEMSEL_ULPTX(x) ((x) << S_MEMSEL_ULPTX)
+#define G_MEMSEL_ULPTX(x) (((x) >> S_MEMSEL_ULPTX) & M_MEMSEL_ULPTX)
+
 #define A_ULP_TX_FPGA_CMD_CTRL 0x8e38
+#define A_ULP_TX_T5_FPGA_CMD_CTRL 0x8e38
+
+#define S_CHANNEL_SEL    12
+#define M_CHANNEL_SEL    0x3U
+#define V_CHANNEL_SEL(x) ((x) << S_CHANNEL_SEL)
+#define G_CHANNEL_SEL(x) (((x) >> S_CHANNEL_SEL) & M_CHANNEL_SEL)
+
+#define S_INTF_SEL    4
+#define M_INTF_SEL    0xfU
+#define V_INTF_SEL(x) ((x) << S_INTF_SEL)
+#define G_INTF_SEL(x) (((x) >> S_INTF_SEL) & M_INTF_SEL)
+
+#define S_NUM_FLITS    1
+#define M_NUM_FLITS    0x7U
+#define V_NUM_FLITS(x) ((x) << S_NUM_FLITS)
+#define G_NUM_FLITS(x) (((x) >> S_NUM_FLITS) & M_NUM_FLITS)
+
+#define S_CMD_GEN_EN    0
+#define V_CMD_GEN_EN(x) ((x) << S_CMD_GEN_EN)
+#define F_CMD_GEN_EN    V_CMD_GEN_EN(1U)
+
 #define A_ULP_TX_FPGA_CMD_0 0x8e3c
+#define A_ULP_TX_T5_FPGA_CMD_0 0x8e3c
 #define A_ULP_TX_FPGA_CMD_1 0x8e40
+#define A_ULP_TX_T5_FPGA_CMD_1 0x8e40
 #define A_ULP_TX_FPGA_CMD_2 0x8e44
+#define A_ULP_TX_T5_FPGA_CMD_2 0x8e44
 #define A_ULP_TX_FPGA_CMD_3 0x8e48
+#define A_ULP_TX_T5_FPGA_CMD_3 0x8e48
 #define A_ULP_TX_FPGA_CMD_4 0x8e4c
+#define A_ULP_TX_T5_FPGA_CMD_4 0x8e4c
 #define A_ULP_TX_FPGA_CMD_5 0x8e50
+#define A_ULP_TX_T5_FPGA_CMD_5 0x8e50
 #define A_ULP_TX_FPGA_CMD_6 0x8e54
+#define A_ULP_TX_T5_FPGA_CMD_6 0x8e54
 #define A_ULP_TX_FPGA_CMD_7 0x8e58
+#define A_ULP_TX_T5_FPGA_CMD_7 0x8e58
 #define A_ULP_TX_FPGA_CMD_8 0x8e5c
+#define A_ULP_TX_T5_FPGA_CMD_8 0x8e5c
 #define A_ULP_TX_FPGA_CMD_9 0x8e60
+#define A_ULP_TX_T5_FPGA_CMD_9 0x8e60
 #define A_ULP_TX_FPGA_CMD_10 0x8e64
+#define A_ULP_TX_T5_FPGA_CMD_10 0x8e64
 #define A_ULP_TX_FPGA_CMD_11 0x8e68
+#define A_ULP_TX_T5_FPGA_CMD_11 0x8e68
 #define A_ULP_TX_FPGA_CMD_12 0x8e6c
+#define A_ULP_TX_T5_FPGA_CMD_12 0x8e6c
 #define A_ULP_TX_FPGA_CMD_13 0x8e70
+#define A_ULP_TX_T5_FPGA_CMD_13 0x8e70
 #define A_ULP_TX_FPGA_CMD_14 0x8e74
+#define A_ULP_TX_T5_FPGA_CMD_14 0x8e74
 #define A_ULP_TX_FPGA_CMD_15 0x8e78
+#define A_ULP_TX_T5_FPGA_CMD_15 0x8e78
+#define A_ULP_TX_INT_ENABLE_2 0x8e7c
+
+#define S_SMARBT2ULP_DATA_PERR_SET    12
+#define V_SMARBT2ULP_DATA_PERR_SET(x) ((x) << S_SMARBT2ULP_DATA_PERR_SET)
+#define F_SMARBT2ULP_DATA_PERR_SET    V_SMARBT2ULP_DATA_PERR_SET(1U)
+
+#define S_ULP2TP_DATA_PERR_SET    11
+#define V_ULP2TP_DATA_PERR_SET(x) ((x) << S_ULP2TP_DATA_PERR_SET)
+#define F_ULP2TP_DATA_PERR_SET    V_ULP2TP_DATA_PERR_SET(1U)
+
+#define S_MA2ULP_DATA_PERR_SET    10
+#define V_MA2ULP_DATA_PERR_SET(x) ((x) << S_MA2ULP_DATA_PERR_SET)
+#define F_MA2ULP_DATA_PERR_SET    V_MA2ULP_DATA_PERR_SET(1U)
+
+#define S_SGE2ULP_DATA_PERR_SET    9
+#define V_SGE2ULP_DATA_PERR_SET(x) ((x) << S_SGE2ULP_DATA_PERR_SET)
+#define F_SGE2ULP_DATA_PERR_SET    V_SGE2ULP_DATA_PERR_SET(1U)
+
+#define S_CIM2ULP_DATA_PERR_SET    8
+#define V_CIM2ULP_DATA_PERR_SET(x) ((x) << S_CIM2ULP_DATA_PERR_SET)
+#define F_CIM2ULP_DATA_PERR_SET    V_CIM2ULP_DATA_PERR_SET(1U)
+
+#define S_FSO_HDR_SRAM_PERR_SET3    7
+#define V_FSO_HDR_SRAM_PERR_SET3(x) ((x) << S_FSO_HDR_SRAM_PERR_SET3)
+#define F_FSO_HDR_SRAM_PERR_SET3    V_FSO_HDR_SRAM_PERR_SET3(1U)
+
+#define S_FSO_HDR_SRAM_PERR_SET2    6
+#define V_FSO_HDR_SRAM_PERR_SET2(x) ((x) << S_FSO_HDR_SRAM_PERR_SET2)
+#define F_FSO_HDR_SRAM_PERR_SET2    V_FSO_HDR_SRAM_PERR_SET2(1U)
+
+#define S_FSO_HDR_SRAM_PERR_SET1    5
+#define V_FSO_HDR_SRAM_PERR_SET1(x) ((x) << S_FSO_HDR_SRAM_PERR_SET1)
+#define F_FSO_HDR_SRAM_PERR_SET1    V_FSO_HDR_SRAM_PERR_SET1(1U)
+
+#define S_FSO_HDR_SRAM_PERR_SET0    4
+#define V_FSO_HDR_SRAM_PERR_SET0(x) ((x) << S_FSO_HDR_SRAM_PERR_SET0)
+#define F_FSO_HDR_SRAM_PERR_SET0    V_FSO_HDR_SRAM_PERR_SET0(1U)
+
+#define S_T10_PI_SRAM_PERR_SET3    3
+#define V_T10_PI_SRAM_PERR_SET3(x) ((x) << S_T10_PI_SRAM_PERR_SET3)
+#define F_T10_PI_SRAM_PERR_SET3    V_T10_PI_SRAM_PERR_SET3(1U)
+
+#define S_T10_PI_SRAM_PERR_SET2    2
+#define V_T10_PI_SRAM_PERR_SET2(x) ((x) << S_T10_PI_SRAM_PERR_SET2)
+#define F_T10_PI_SRAM_PERR_SET2    V_T10_PI_SRAM_PERR_SET2(1U)
+
+#define S_T10_PI_SRAM_PERR_SET1    1
+#define V_T10_PI_SRAM_PERR_SET1(x) ((x) << S_T10_PI_SRAM_PERR_SET1)
+#define F_T10_PI_SRAM_PERR_SET1    V_T10_PI_SRAM_PERR_SET1(1U)
+
+#define S_T10_PI_SRAM_PERR_SET0    0
+#define V_T10_PI_SRAM_PERR_SET0(x) ((x) << S_T10_PI_SRAM_PERR_SET0)
+#define F_T10_PI_SRAM_PERR_SET0    V_T10_PI_SRAM_PERR_SET0(1U)
+
+#define S_EDMA_IN_FIFO_PERR_SET3    31
+#define V_EDMA_IN_FIFO_PERR_SET3(x) ((x) << S_EDMA_IN_FIFO_PERR_SET3)
+#define F_EDMA_IN_FIFO_PERR_SET3    V_EDMA_IN_FIFO_PERR_SET3(1U)
+
+#define S_EDMA_IN_FIFO_PERR_SET2    30
+#define V_EDMA_IN_FIFO_PERR_SET2(x) ((x) << S_EDMA_IN_FIFO_PERR_SET2)
+#define F_EDMA_IN_FIFO_PERR_SET2    V_EDMA_IN_FIFO_PERR_SET2(1U)
+
+#define S_EDMA_IN_FIFO_PERR_SET1    29
+#define V_EDMA_IN_FIFO_PERR_SET1(x) ((x) << S_EDMA_IN_FIFO_PERR_SET1)
+#define F_EDMA_IN_FIFO_PERR_SET1    V_EDMA_IN_FIFO_PERR_SET1(1U)
+
+#define S_EDMA_IN_FIFO_PERR_SET0    28
+#define V_EDMA_IN_FIFO_PERR_SET0(x) ((x) << S_EDMA_IN_FIFO_PERR_SET0)
+#define F_EDMA_IN_FIFO_PERR_SET0    V_EDMA_IN_FIFO_PERR_SET0(1U)
+
+#define S_ALIGN_CTL_FIFO_PERR_SET3    27
+#define V_ALIGN_CTL_FIFO_PERR_SET3(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET3)
+#define F_ALIGN_CTL_FIFO_PERR_SET3    V_ALIGN_CTL_FIFO_PERR_SET3(1U)
+
+#define S_ALIGN_CTL_FIFO_PERR_SET2    26
+#define V_ALIGN_CTL_FIFO_PERR_SET2(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET2)
+#define F_ALIGN_CTL_FIFO_PERR_SET2    V_ALIGN_CTL_FIFO_PERR_SET2(1U)
+
+#define S_ALIGN_CTL_FIFO_PERR_SET1    25
+#define V_ALIGN_CTL_FIFO_PERR_SET1(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET1)
+#define F_ALIGN_CTL_FIFO_PERR_SET1    V_ALIGN_CTL_FIFO_PERR_SET1(1U)
+
+#define S_ALIGN_CTL_FIFO_PERR_SET0    24
+#define V_ALIGN_CTL_FIFO_PERR_SET0(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET0)
+#define F_ALIGN_CTL_FIFO_PERR_SET0    V_ALIGN_CTL_FIFO_PERR_SET0(1U)
+
+#define S_SGE_FIFO_PERR_SET3    23
+#define V_SGE_FIFO_PERR_SET3(x) ((x) << S_SGE_FIFO_PERR_SET3)
+#define F_SGE_FIFO_PERR_SET3    V_SGE_FIFO_PERR_SET3(1U)
+
+#define S_SGE_FIFO_PERR_SET2    22
+#define V_SGE_FIFO_PERR_SET2(x) ((x) << S_SGE_FIFO_PERR_SET2)
+#define F_SGE_FIFO_PERR_SET2    V_SGE_FIFO_PERR_SET2(1U)
+
+#define S_SGE_FIFO_PERR_SET1    21
+#define V_SGE_FIFO_PERR_SET1(x) ((x) << S_SGE_FIFO_PERR_SET1)
+#define F_SGE_FIFO_PERR_SET1    V_SGE_FIFO_PERR_SET1(1U)
+
+#define S_SGE_FIFO_PERR_SET0    20
+#define V_SGE_FIFO_PERR_SET0(x) ((x) << S_SGE_FIFO_PERR_SET0)
+#define F_SGE_FIFO_PERR_SET0    V_SGE_FIFO_PERR_SET0(1U)
+
+#define S_STAG_FIFO_PERR_SET3    19
+#define V_STAG_FIFO_PERR_SET3(x) ((x) << S_STAG_FIFO_PERR_SET3)
+#define F_STAG_FIFO_PERR_SET3    V_STAG_FIFO_PERR_SET3(1U)
+
+#define S_STAG_FIFO_PERR_SET2    18
+#define V_STAG_FIFO_PERR_SET2(x) ((x) << S_STAG_FIFO_PERR_SET2)
+#define F_STAG_FIFO_PERR_SET2    V_STAG_FIFO_PERR_SET2(1U)
+
+#define S_STAG_FIFO_PERR_SET1    17
+#define V_STAG_FIFO_PERR_SET1(x) ((x) << S_STAG_FIFO_PERR_SET1)
+#define F_STAG_FIFO_PERR_SET1    V_STAG_FIFO_PERR_SET1(1U)
+
+#define S_STAG_FIFO_PERR_SET0    16
+#define V_STAG_FIFO_PERR_SET0(x) ((x) << S_STAG_FIFO_PERR_SET0)
+#define F_STAG_FIFO_PERR_SET0    V_STAG_FIFO_PERR_SET0(1U)
+
+#define S_MAP_FIFO_PERR_SET3    15
+#define V_MAP_FIFO_PERR_SET3(x) ((x) << S_MAP_FIFO_PERR_SET3)
+#define F_MAP_FIFO_PERR_SET3    V_MAP_FIFO_PERR_SET3(1U)
+
+#define S_MAP_FIFO_PERR_SET2    14
+#define V_MAP_FIFO_PERR_SET2(x) ((x) << S_MAP_FIFO_PERR_SET2)
+#define F_MAP_FIFO_PERR_SET2    V_MAP_FIFO_PERR_SET2(1U)
+
+#define S_MAP_FIFO_PERR_SET1    13
+#define V_MAP_FIFO_PERR_SET1(x) ((x) << S_MAP_FIFO_PERR_SET1)
+#define F_MAP_FIFO_PERR_SET1    V_MAP_FIFO_PERR_SET1(1U)
+
+#define S_MAP_FIFO_PERR_SET0    12
+#define V_MAP_FIFO_PERR_SET0(x) ((x) << S_MAP_FIFO_PERR_SET0)
+#define F_MAP_FIFO_PERR_SET0    V_MAP_FIFO_PERR_SET0(1U)
+
+#define S_DMA_FIFO_PERR_SET3    11
+#define V_DMA_FIFO_PERR_SET3(x) ((x) << S_DMA_FIFO_PERR_SET3)
+#define F_DMA_FIFO_PERR_SET3    V_DMA_FIFO_PERR_SET3(1U)
+
+#define S_DMA_FIFO_PERR_SET2    10
+#define V_DMA_FIFO_PERR_SET2(x) ((x) << S_DMA_FIFO_PERR_SET2)
+#define F_DMA_FIFO_PERR_SET2    V_DMA_FIFO_PERR_SET2(1U)
+
+#define S_DMA_FIFO_PERR_SET1    9
+#define V_DMA_FIFO_PERR_SET1(x) ((x) << S_DMA_FIFO_PERR_SET1)
+#define F_DMA_FIFO_PERR_SET1    V_DMA_FIFO_PERR_SET1(1U)
+
+#define S_DMA_FIFO_PERR_SET0    8
+#define V_DMA_FIFO_PERR_SET0(x) ((x) << S_DMA_FIFO_PERR_SET0)
+#define F_DMA_FIFO_PERR_SET0    V_DMA_FIFO_PERR_SET0(1U)
+
+#define A_ULP_TX_INT_CAUSE_2 0x8e80
+#define A_ULP_TX_PERR_ENABLE_2 0x8e84
 #define A_ULP_TX_SE_CNT_ERR 0x8ea0
 
 #define S_ERR_CH3    12
@@ -13854,6 +29404,7 @@
 #define V_ERR_CH0(x) ((x) << S_ERR_CH0)
 #define G_ERR_CH0(x) (((x) >> S_ERR_CH0) & M_ERR_CH0)
 
+#define A_ULP_TX_T5_SE_CNT_ERR 0x8ea0
 #define A_ULP_TX_SE_CNT_CLR 0x8ea4
 
 #define S_CLR_DROP    16
@@ -13881,6 +29432,7 @@
 #define V_CLR_CH0(x) ((x) << S_CLR_CH0)
 #define G_CLR_CH0(x) (((x) >> S_CLR_CH0) & M_CLR_CH0)
 
+#define A_ULP_TX_T5_SE_CNT_CLR 0x8ea4
 #define A_ULP_TX_SE_CNT_CH0 0x8ea8
 
 #define S_SOP_CNT_ULP2TP    28
@@ -13923,9 +29475,13 @@
 #define V_EOP_CNT_CIM2ULP(x) ((x) << S_EOP_CNT_CIM2ULP)
 #define G_EOP_CNT_CIM2ULP(x) (((x) >> S_EOP_CNT_CIM2ULP) & M_EOP_CNT_CIM2ULP)
 
+#define A_ULP_TX_T5_SE_CNT_CH0 0x8ea8
 #define A_ULP_TX_SE_CNT_CH1 0x8eac
+#define A_ULP_TX_T5_SE_CNT_CH1 0x8eac
 #define A_ULP_TX_SE_CNT_CH2 0x8eb0
+#define A_ULP_TX_T5_SE_CNT_CH2 0x8eb0
 #define A_ULP_TX_SE_CNT_CH3 0x8eb4
+#define A_ULP_TX_T5_SE_CNT_CH3 0x8eb4
 #define A_ULP_TX_DROP_CNT 0x8eb8
 
 #define S_DROP_CH3    12
@@ -13948,6 +29504,29 @@
 #define V_DROP_CH0(x) ((x) << S_DROP_CH0)
 #define G_DROP_CH0(x) (((x) >> S_DROP_CH0) & M_DROP_CH0)
 
+#define A_ULP_TX_T5_DROP_CNT 0x8eb8
+
+#define S_DROP_INVLD_MC_CH3    28
+#define M_DROP_INVLD_MC_CH3    0xfU
+#define V_DROP_INVLD_MC_CH3(x) ((x) << S_DROP_INVLD_MC_CH3)
+#define G_DROP_INVLD_MC_CH3(x) (((x) >> S_DROP_INVLD_MC_CH3) & M_DROP_INVLD_MC_CH3)
+
+#define S_DROP_INVLD_MC_CH2    24
+#define M_DROP_INVLD_MC_CH2    0xfU
+#define V_DROP_INVLD_MC_CH2(x) ((x) << S_DROP_INVLD_MC_CH2)
+#define G_DROP_INVLD_MC_CH2(x) (((x) >> S_DROP_INVLD_MC_CH2) & M_DROP_INVLD_MC_CH2)
+
+#define S_DROP_INVLD_MC_CH1    20
+#define M_DROP_INVLD_MC_CH1    0xfU
+#define V_DROP_INVLD_MC_CH1(x) ((x) << S_DROP_INVLD_MC_CH1)
+#define G_DROP_INVLD_MC_CH1(x) (((x) >> S_DROP_INVLD_MC_CH1) & M_DROP_INVLD_MC_CH1)
+
+#define S_DROP_INVLD_MC_CH0    16
+#define M_DROP_INVLD_MC_CH0    0xfU
+#define V_DROP_INVLD_MC_CH0(x) ((x) << S_DROP_INVLD_MC_CH0)
+#define G_DROP_INVLD_MC_CH0(x) (((x) >> S_DROP_INVLD_MC_CH0) & M_DROP_INVLD_MC_CH0)
+
+#define A_ULP_TX_CSU_REVISION 0x8ebc
 #define A_ULP_TX_LA_RDPTR_0 0x8ec0
 #define A_ULP_TX_LA_RDDATA_0 0x8ec4
 #define A_ULP_TX_LA_WRPTR_0 0x8ec8
@@ -13992,7 +29571,112 @@
 #define A_ULP_TX_LA_RDDATA_10 0x8f64
 #define A_ULP_TX_LA_WRPTR_10 0x8f68
 #define A_ULP_TX_LA_RESERVED_10 0x8f6c
+#define A_ULP_TX_ASIC_DEBUG_CTRL 0x8f70
 
+#define S_LA_WR0    0
+#define V_LA_WR0(x) ((x) << S_LA_WR0)
+#define F_LA_WR0    V_LA_WR0(1U)
+
+#define A_ULP_TX_ASIC_DEBUG_0 0x8f74
+#define A_ULP_TX_ASIC_DEBUG_1 0x8f78
+#define A_ULP_TX_ASIC_DEBUG_2 0x8f7c
+#define A_ULP_TX_ASIC_DEBUG_3 0x8f80
+#define A_ULP_TX_ASIC_DEBUG_4 0x8f84
+#define A_ULP_TX_CPL_TX_DATA_FLAGS_MASK 0x8f88
+
+#define S_BYPASS_FIRST    26
+#define V_BYPASS_FIRST(x) ((x) << S_BYPASS_FIRST)
+#define F_BYPASS_FIRST    V_BYPASS_FIRST(1U)
+
+#define S_BYPASS_MIDDLE    25
+#define V_BYPASS_MIDDLE(x) ((x) << S_BYPASS_MIDDLE)
+#define F_BYPASS_MIDDLE    V_BYPASS_MIDDLE(1U)
+
+#define S_BYPASS_LAST    24
+#define V_BYPASS_LAST(x) ((x) << S_BYPASS_LAST)
+#define F_BYPASS_LAST    V_BYPASS_LAST(1U)
+
+#define S_PUSH_FIRST    22
+#define V_PUSH_FIRST(x) ((x) << S_PUSH_FIRST)
+#define F_PUSH_FIRST    V_PUSH_FIRST(1U)
+
+#define S_PUSH_MIDDLE    21
+#define V_PUSH_MIDDLE(x) ((x) << S_PUSH_MIDDLE)
+#define F_PUSH_MIDDLE    V_PUSH_MIDDLE(1U)
+
+#define S_PUSH_LAST    20
+#define V_PUSH_LAST(x) ((x) << S_PUSH_LAST)
+#define F_PUSH_LAST    V_PUSH_LAST(1U)
+
+#define S_SAVE_FIRST    18
+#define V_SAVE_FIRST(x) ((x) << S_SAVE_FIRST)
+#define F_SAVE_FIRST    V_SAVE_FIRST(1U)
+
+#define S_SAVE_MIDDLE    17
+#define V_SAVE_MIDDLE(x) ((x) << S_SAVE_MIDDLE)
+#define F_SAVE_MIDDLE    V_SAVE_MIDDLE(1U)
+
+#define S_SAVE_LAST    16
+#define V_SAVE_LAST(x) ((x) << S_SAVE_LAST)
+#define F_SAVE_LAST    V_SAVE_LAST(1U)
+
+#define S_FLUSH_FIRST    14
+#define V_FLUSH_FIRST(x) ((x) << S_FLUSH_FIRST)
+#define F_FLUSH_FIRST    V_FLUSH_FIRST(1U)
+
+#define S_FLUSH_MIDDLE    13
+#define V_FLUSH_MIDDLE(x) ((x) << S_FLUSH_MIDDLE)
+#define F_FLUSH_MIDDLE    V_FLUSH_MIDDLE(1U)
+
+#define S_FLUSH_LAST    12
+#define V_FLUSH_LAST(x) ((x) << S_FLUSH_LAST)
+#define F_FLUSH_LAST    V_FLUSH_LAST(1U)
+
+#define S_URGENT_FIRST    10
+#define V_URGENT_FIRST(x) ((x) << S_URGENT_FIRST)
+#define F_URGENT_FIRST    V_URGENT_FIRST(1U)
+
+#define S_URGENT_MIDDLE    9
+#define V_URGENT_MIDDLE(x) ((x) << S_URGENT_MIDDLE)
+#define F_URGENT_MIDDLE    V_URGENT_MIDDLE(1U)
+
+#define S_URGENT_LAST    8
+#define V_URGENT_LAST(x) ((x) << S_URGENT_LAST)
+#define F_URGENT_LAST    V_URGENT_LAST(1U)
+
+#define S_MORE_FIRST    6
+#define V_MORE_FIRST(x) ((x) << S_MORE_FIRST)
+#define F_MORE_FIRST    V_MORE_FIRST(1U)
+
+#define S_MORE_MIDDLE    5
+#define V_MORE_MIDDLE(x) ((x) << S_MORE_MIDDLE)
+#define F_MORE_MIDDLE    V_MORE_MIDDLE(1U)
+
+#define S_MORE_LAST    4
+#define V_MORE_LAST(x) ((x) << S_MORE_LAST)
+#define F_MORE_LAST    V_MORE_LAST(1U)
+
+#define S_SHOVE_FIRST    2
+#define V_SHOVE_FIRST(x) ((x) << S_SHOVE_FIRST)
+#define F_SHOVE_FIRST    V_SHOVE_FIRST(1U)
+
+#define S_SHOVE_MIDDLE    1
+#define V_SHOVE_MIDDLE(x) ((x) << S_SHOVE_MIDDLE)
+#define F_SHOVE_MIDDLE    V_SHOVE_MIDDLE(1U)
+
+#define S_SHOVE_LAST    0
+#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST)
+#define F_SHOVE_LAST    V_SHOVE_LAST(1U)
+
+#define A_ULP_TX_TLS_IND_CMD 0x8fb8
+
+#define S_TLS_TX_REG_OFF_ADDR    0
+#define M_TLS_TX_REG_OFF_ADDR    0x3ffU
+#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR)
+#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR)
+
+#define A_ULP_TX_TLS_IND_DATA 0x8fbc
+
 /* registers for module PM_RX */
 #define PM_RX_BASE_ADDR 0x8fc0
 
@@ -14019,7 +29703,25 @@
 #define A_PM_RX_STAT_CONFIG 0x8fc8
 #define A_PM_RX_STAT_COUNT 0x8fcc
 #define A_PM_RX_STAT_LSB 0x8fd0
+#define A_PM_RX_DBG_CTRL 0x8fd0
+
+#define S_OSPIWRBUSY_T5    21
+#define M_OSPIWRBUSY_T5    0x3U
+#define V_OSPIWRBUSY_T5(x) ((x) << S_OSPIWRBUSY_T5)
+#define G_OSPIWRBUSY_T5(x) (((x) >> S_OSPIWRBUSY_T5) & M_OSPIWRBUSY_T5)
+
+#define S_ISPIWRBUSY    17
+#define M_ISPIWRBUSY    0xfU
+#define V_ISPIWRBUSY(x) ((x) << S_ISPIWRBUSY)
+#define G_ISPIWRBUSY(x) (((x) >> S_ISPIWRBUSY) & M_ISPIWRBUSY)
+
+#define S_PMDBGADDR    0
+#define M_PMDBGADDR    0x1ffffU
+#define V_PMDBGADDR(x) ((x) << S_PMDBGADDR)
+#define G_PMDBGADDR(x) (((x) >> S_PMDBGADDR) & M_PMDBGADDR)
+
 #define A_PM_RX_STAT_MSB 0x8fd4
+#define A_PM_RX_DBG_DATA 0x8fd4
 #define A_PM_RX_INT_ENABLE 0x8fd8
 
 #define S_ZERO_E_CMD_ERROR    22
@@ -14114,8 +29816,621 @@
 #define V_E_PCMD_PAR_ERROR(x) ((x) << S_E_PCMD_PAR_ERROR)
 #define F_E_PCMD_PAR_ERROR    V_E_PCMD_PAR_ERROR(1U)
 
+#define S_OSPI_OVERFLOW1    28
+#define V_OSPI_OVERFLOW1(x) ((x) << S_OSPI_OVERFLOW1)
+#define F_OSPI_OVERFLOW1    V_OSPI_OVERFLOW1(1U)
+
+#define S_OSPI_OVERFLOW0    27
+#define V_OSPI_OVERFLOW0(x) ((x) << S_OSPI_OVERFLOW0)
+#define F_OSPI_OVERFLOW0    V_OSPI_OVERFLOW0(1U)
+
+#define S_MA_INTF_SDC_ERR    26
+#define V_MA_INTF_SDC_ERR(x) ((x) << S_MA_INTF_SDC_ERR)
+#define F_MA_INTF_SDC_ERR    V_MA_INTF_SDC_ERR(1U)
+
+#define S_BUNDLE_LEN_PARERR    25
+#define V_BUNDLE_LEN_PARERR(x) ((x) << S_BUNDLE_LEN_PARERR)
+#define F_BUNDLE_LEN_PARERR    V_BUNDLE_LEN_PARERR(1U)
+
+#define S_BUNDLE_LEN_OVFL    24
+#define V_BUNDLE_LEN_OVFL(x) ((x) << S_BUNDLE_LEN_OVFL)
+#define F_BUNDLE_LEN_OVFL    V_BUNDLE_LEN_OVFL(1U)
+
+#define S_SDC_ERR    23
+#define V_SDC_ERR(x) ((x) << S_SDC_ERR)
+#define F_SDC_ERR    V_SDC_ERR(1U)
+
 #define A_PM_RX_INT_CAUSE 0x8fdc
+#define A_PM_RX_ISPI_DBG_4B_DATA0 0x10000
+#define A_PM_RX_ISPI_DBG_4B_DATA1 0x10001
+#define A_PM_RX_ISPI_DBG_4B_DATA2 0x10002
+#define A_PM_RX_ISPI_DBG_4B_DATA3 0x10003
+#define A_PM_RX_ISPI_DBG_4B_DATA4 0x10004
+#define A_PM_RX_ISPI_DBG_4B_DATA5 0x10005
+#define A_PM_RX_ISPI_DBG_4B_DATA6 0x10006
+#define A_PM_RX_ISPI_DBG_4B_DATA7 0x10007
+#define A_PM_RX_ISPI_DBG_4B_DATA8 0x10008
+#define A_PM_RX_OSPI_DBG_4B_DATA0 0x10009
+#define A_PM_RX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_PM_RX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_PM_RX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_PM_RX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_PM_RX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_PM_RX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_PM_RX_OSPI_DBG_4B_DATA7 0x10010
+#define A_PM_RX_OSPI_DBG_4B_DATA8 0x10011
+#define A_PM_RX_OSPI_DBG_4B_DATA9 0x10012
+#define A_PM_RX_DBG_STAT_MSB 0x10013
+#define A_PM_RX_DBG_STAT_LSB 0x10014
+#define A_PM_RX_DBG_RSVD_FLIT_CNT 0x10015
 
+#define S_I_TO_O_PATH_RSVD_FLIT_BACKUP    12
+#define M_I_TO_O_PATH_RSVD_FLIT_BACKUP    0xfU
+#define V_I_TO_O_PATH_RSVD_FLIT_BACKUP(x) ((x) << S_I_TO_O_PATH_RSVD_FLIT_BACKUP)
+#define G_I_TO_O_PATH_RSVD_FLIT_BACKUP(x) (((x) >> S_I_TO_O_PATH_RSVD_FLIT_BACKUP) & M_I_TO_O_PATH_RSVD_FLIT_BACKUP)
+
+#define S_I_TO_O_PATH_RSVD_FLIT    8
+#define M_I_TO_O_PATH_RSVD_FLIT    0xfU
+#define V_I_TO_O_PATH_RSVD_FLIT(x) ((x) << S_I_TO_O_PATH_RSVD_FLIT)
+#define G_I_TO_O_PATH_RSVD_FLIT(x) (((x) >> S_I_TO_O_PATH_RSVD_FLIT) & M_I_TO_O_PATH_RSVD_FLIT)
+
+#define S_PRFCH_RSVD_FLIT    4
+#define M_PRFCH_RSVD_FLIT    0xfU
+#define V_PRFCH_RSVD_FLIT(x) ((x) << S_PRFCH_RSVD_FLIT)
+#define G_PRFCH_RSVD_FLIT(x) (((x) >> S_PRFCH_RSVD_FLIT) & M_PRFCH_RSVD_FLIT)
+
+#define S_OSPI_RSVD_FLIT    0
+#define M_OSPI_RSVD_FLIT    0xfU
+#define V_OSPI_RSVD_FLIT(x) ((x) << S_OSPI_RSVD_FLIT)
+#define G_OSPI_RSVD_FLIT(x) (((x) >> S_OSPI_RSVD_FLIT) & M_OSPI_RSVD_FLIT)
+
+#define A_PM_RX_SDC_EN 0x10016
+
+#define S_SDC_EN    0
+#define V_SDC_EN(x) ((x) << S_SDC_EN)
+#define F_SDC_EN    V_SDC_EN(1U)
+
+#define A_PM_RX_INOUT_FIFO_DBG_CHNL_SEL 0x10017
+
+#define S_CHNL_3_SEL    3
+#define V_CHNL_3_SEL(x) ((x) << S_CHNL_3_SEL)
+#define F_CHNL_3_SEL    V_CHNL_3_SEL(1U)
+
+#define S_CHNL_2_SEL    2
+#define V_CHNL_2_SEL(x) ((x) << S_CHNL_2_SEL)
+#define F_CHNL_2_SEL    V_CHNL_2_SEL(1U)
+
+#define S_CHNL_1_SEL    1
+#define V_CHNL_1_SEL(x) ((x) << S_CHNL_1_SEL)
+#define F_CHNL_1_SEL    V_CHNL_1_SEL(1U)
+
+#define S_CHNL_0_SEL    0
+#define V_CHNL_0_SEL(x) ((x) << S_CHNL_0_SEL)
+#define F_CHNL_0_SEL    V_CHNL_0_SEL(1U)
+
+#define A_PM_RX_INOUT_FIFO_DBG_WR 0x10018
+
+#define S_O_FIFO_WRITE    3
+#define V_O_FIFO_WRITE(x) ((x) << S_O_FIFO_WRITE)
+#define F_O_FIFO_WRITE    V_O_FIFO_WRITE(1U)
+
+#define S_I_FIFO_WRITE    2
+#define V_I_FIFO_WRITE(x) ((x) << S_I_FIFO_WRITE)
+#define F_I_FIFO_WRITE    V_I_FIFO_WRITE(1U)
+
+#define S_O_FIFO_READ    1
+#define V_O_FIFO_READ(x) ((x) << S_O_FIFO_READ)
+#define F_O_FIFO_READ    V_O_FIFO_READ(1U)
+
+#define S_I_FIFO_READ    0
+#define V_I_FIFO_READ(x) ((x) << S_I_FIFO_READ)
+#define F_I_FIFO_READ    V_I_FIFO_READ(1U)
+
+#define A_PM_RX_INPUT_FIFO_STR_FWD_EN 0x10019
+
+#define S_ISPI_STR_FWD_EN    0
+#define V_ISPI_STR_FWD_EN(x) ((x) << S_ISPI_STR_FWD_EN)
+#define F_ISPI_STR_FWD_EN    V_ISPI_STR_FWD_EN(1U)
+
+#define A_PM_RX_PRFTCH_ACROSS_BNDLE_EN 0x1001a
+
+#define S_PRFTCH_ACROSS_BNDLE_EN    0
+#define V_PRFTCH_ACROSS_BNDLE_EN(x) ((x) << S_PRFTCH_ACROSS_BNDLE_EN)
+#define F_PRFTCH_ACROSS_BNDLE_EN    V_PRFTCH_ACROSS_BNDLE_EN(1U)
+
+#define A_PM_RX_PRFTCH_WRR_ENABLE 0x1001b
+
+#define S_PRFTCH_WRR_ENABLE    0
+#define V_PRFTCH_WRR_ENABLE(x) ((x) << S_PRFTCH_WRR_ENABLE)
+#define F_PRFTCH_WRR_ENABLE    V_PRFTCH_WRR_ENABLE(1U)
+
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT 0x1001c
+
+#define S_CHNL1_MAX_DEFICIT_CNT    16
+#define M_CHNL1_MAX_DEFICIT_CNT    0xffffU
+#define V_CHNL1_MAX_DEFICIT_CNT(x) ((x) << S_CHNL1_MAX_DEFICIT_CNT)
+#define G_CHNL1_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL1_MAX_DEFICIT_CNT) & M_CHNL1_MAX_DEFICIT_CNT)
+
+#define S_CHNL0_MAX_DEFICIT_CNT    0
+#define M_CHNL0_MAX_DEFICIT_CNT    0xffffU
+#define V_CHNL0_MAX_DEFICIT_CNT(x) ((x) << S_CHNL0_MAX_DEFICIT_CNT)
+#define G_CHNL0_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL0_MAX_DEFICIT_CNT) & M_CHNL0_MAX_DEFICIT_CNT)
+
+#define A_PM_RX_FEATURE_EN 0x1001d
+
+#define S_PIO_CH_DEFICIT_CTL_EN_RX    0
+#define V_PIO_CH_DEFICIT_CTL_EN_RX(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN_RX)
+#define F_PIO_CH_DEFICIT_CTL_EN_RX    V_PIO_CH_DEFICIT_CTL_EN_RX(1U)
+
+#define A_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x1001e
+
+#define S_CH0_OSPI_DEFICIT_THRSHLD    0
+#define M_CH0_OSPI_DEFICIT_THRSHLD    0xfffU
+#define V_CH0_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH0_OSPI_DEFICIT_THRSHLD)
+#define G_CH0_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH0_OSPI_DEFICIT_THRSHLD) & M_CH0_OSPI_DEFICIT_THRSHLD)
+
+#define A_PM_RX_CH1_OSPI_DEFICIT_THRSHLD 0x1001f
+
+#define S_CH1_OSPI_DEFICIT_THRSHLD    0
+#define M_CH1_OSPI_DEFICIT_THRSHLD    0xfffU
+#define V_CH1_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH1_OSPI_DEFICIT_THRSHLD)
+#define G_CH1_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH1_OSPI_DEFICIT_THRSHLD) & M_CH1_OSPI_DEFICIT_THRSHLD)
+
+#define A_PM_RX_INT_CAUSE_MASK_HALT 0x10020
+#define A_PM_RX_DBG_STAT0 0x10021
+
+#define S_RX_RD_I_BUSY    29
+#define V_RX_RD_I_BUSY(x) ((x) << S_RX_RD_I_BUSY)
+#define F_RX_RD_I_BUSY    V_RX_RD_I_BUSY(1U)
+
+#define S_RX_WR_TO_O_BUSY    28
+#define V_RX_WR_TO_O_BUSY(x) ((x) << S_RX_WR_TO_O_BUSY)
+#define F_RX_WR_TO_O_BUSY    V_RX_WR_TO_O_BUSY(1U)
+
+#define S_RX_M_TO_O_BUSY    27
+#define V_RX_M_TO_O_BUSY(x) ((x) << S_RX_M_TO_O_BUSY)
+#define F_RX_M_TO_O_BUSY    V_RX_M_TO_O_BUSY(1U)
+
+#define S_RX_I_TO_M_BUSY    26
+#define V_RX_I_TO_M_BUSY(x) ((x) << S_RX_I_TO_M_BUSY)
+#define F_RX_I_TO_M_BUSY    V_RX_I_TO_M_BUSY(1U)
+
+#define S_RX_PCMD_FB_ONLY    25
+#define V_RX_PCMD_FB_ONLY(x) ((x) << S_RX_PCMD_FB_ONLY)
+#define F_RX_PCMD_FB_ONLY    V_RX_PCMD_FB_ONLY(1U)
+
+#define S_RX_PCMD_MEM    24
+#define V_RX_PCMD_MEM(x) ((x) << S_RX_PCMD_MEM)
+#define F_RX_PCMD_MEM    V_RX_PCMD_MEM(1U)
+
+#define S_RX_PCMD_BYPASS    23
+#define V_RX_PCMD_BYPASS(x) ((x) << S_RX_PCMD_BYPASS)
+#define F_RX_PCMD_BYPASS    V_RX_PCMD_BYPASS(1U)
+
+#define S_RX_PCMD_EOP    22
+#define V_RX_PCMD_EOP(x) ((x) << S_RX_PCMD_EOP)
+#define F_RX_PCMD_EOP    V_RX_PCMD_EOP(1U)
+
+#define S_RX_DUMPLICATE_PCMD_EOP    21
+#define V_RX_DUMPLICATE_PCMD_EOP(x) ((x) << S_RX_DUMPLICATE_PCMD_EOP)
+#define F_RX_DUMPLICATE_PCMD_EOP    V_RX_DUMPLICATE_PCMD_EOP(1U)
+
+#define S_RX_PCMD_EOB    20
+#define V_RX_PCMD_EOB(x) ((x) << S_RX_PCMD_EOB)
+#define F_RX_PCMD_EOB    V_RX_PCMD_EOB(1U)
+
+#define S_RX_PCMD_FB    16
+#define M_RX_PCMD_FB    0xfU
+#define V_RX_PCMD_FB(x) ((x) << S_RX_PCMD_FB)
+#define G_RX_PCMD_FB(x) (((x) >> S_RX_PCMD_FB) & M_RX_PCMD_FB)
+
+#define S_RX_PCMD_LEN    0
+#define M_RX_PCMD_LEN    0xffffU
+#define V_RX_PCMD_LEN(x) ((x) << S_RX_PCMD_LEN)
+#define G_RX_PCMD_LEN(x) (((x) >> S_RX_PCMD_LEN) & M_RX_PCMD_LEN)
+
+#define A_PM_RX_DBG_STAT1 0x10022
+
+#define S_RX_PCMD0_MEM    30
+#define V_RX_PCMD0_MEM(x) ((x) << S_RX_PCMD0_MEM)
+#define F_RX_PCMD0_MEM    V_RX_PCMD0_MEM(1U)
+
+#define S_RX_FREE_OSPI_CNT0    18
+#define M_RX_FREE_OSPI_CNT0    0xfffU
+#define V_RX_FREE_OSPI_CNT0(x) ((x) << S_RX_FREE_OSPI_CNT0)
+#define G_RX_FREE_OSPI_CNT0(x) (((x) >> S_RX_FREE_OSPI_CNT0) & M_RX_FREE_OSPI_CNT0)
+
+#define S_RX_PCMD0_FLIT_LEN    6
+#define M_RX_PCMD0_FLIT_LEN    0xfffU
+#define V_RX_PCMD0_FLIT_LEN(x) ((x) << S_RX_PCMD0_FLIT_LEN)
+#define G_RX_PCMD0_FLIT_LEN(x) (((x) >> S_RX_PCMD0_FLIT_LEN) & M_RX_PCMD0_FLIT_LEN)
+
+#define S_RX_PCMD0_CMD    2
+#define M_RX_PCMD0_CMD    0xfU
+#define V_RX_PCMD0_CMD(x) ((x) << S_RX_PCMD0_CMD)
+#define G_RX_PCMD0_CMD(x) (((x) >> S_RX_PCMD0_CMD) & M_RX_PCMD0_CMD)
+
+#define S_RX_OFIFO_FULL0    1
+#define V_RX_OFIFO_FULL0(x) ((x) << S_RX_OFIFO_FULL0)
+#define F_RX_OFIFO_FULL0    V_RX_OFIFO_FULL0(1U)
+
+#define S_RX_PCMD0_BYPASS    0
+#define V_RX_PCMD0_BYPASS(x) ((x) << S_RX_PCMD0_BYPASS)
+#define F_RX_PCMD0_BYPASS    V_RX_PCMD0_BYPASS(1U)
+
+#define A_PM_RX_DBG_STAT2 0x10023
+
+#define S_RX_PCMD1_MEM    30
+#define V_RX_PCMD1_MEM(x) ((x) << S_RX_PCMD1_MEM)
+#define F_RX_PCMD1_MEM    V_RX_PCMD1_MEM(1U)
+
+#define S_RX_FREE_OSPI_CNT1    18
+#define M_RX_FREE_OSPI_CNT1    0xfffU
+#define V_RX_FREE_OSPI_CNT1(x) ((x) << S_RX_FREE_OSPI_CNT1)
+#define G_RX_FREE_OSPI_CNT1(x) (((x) >> S_RX_FREE_OSPI_CNT1) & M_RX_FREE_OSPI_CNT1)
+
+#define S_RX_PCMD1_FLIT_LEN    6
+#define M_RX_PCMD1_FLIT_LEN    0xfffU
+#define V_RX_PCMD1_FLIT_LEN(x) ((x) << S_RX_PCMD1_FLIT_LEN)
+#define G_RX_PCMD1_FLIT_LEN(x) (((x) >> S_RX_PCMD1_FLIT_LEN) & M_RX_PCMD1_FLIT_LEN)
+
+#define S_RX_PCMD1_CMD    2
+#define M_RX_PCMD1_CMD    0xfU
+#define V_RX_PCMD1_CMD(x) ((x) << S_RX_PCMD1_CMD)
+#define G_RX_PCMD1_CMD(x) (((x) >> S_RX_PCMD1_CMD) & M_RX_PCMD1_CMD)
+
+#define S_RX_OFIFO_FULL1    1
+#define V_RX_OFIFO_FULL1(x) ((x) << S_RX_OFIFO_FULL1)
+#define F_RX_OFIFO_FULL1    V_RX_OFIFO_FULL1(1U)
+
+#define S_RX_PCMD1_BYPASS    0
+#define V_RX_PCMD1_BYPASS(x) ((x) << S_RX_PCMD1_BYPASS)
+#define F_RX_PCMD1_BYPASS    V_RX_PCMD1_BYPASS(1U)
+
+#define A_PM_RX_DBG_STAT3 0x10024
+
+#define S_RX_SET_PCMD_RES_RDY_RD    10
+#define M_RX_SET_PCMD_RES_RDY_RD    0x3U
+#define V_RX_SET_PCMD_RES_RDY_RD(x) ((x) << S_RX_SET_PCMD_RES_RDY_RD)
+#define G_RX_SET_PCMD_RES_RDY_RD(x) (((x) >> S_RX_SET_PCMD_RES_RDY_RD) & M_RX_SET_PCMD_RES_RDY_RD)
+
+#define S_RX_ISSUED_PREFETCH_RD_E_CLR    8
+#define M_RX_ISSUED_PREFETCH_RD_E_CLR    0x3U
+#define V_RX_ISSUED_PREFETCH_RD_E_CLR(x) ((x) << S_RX_ISSUED_PREFETCH_RD_E_CLR)
+#define G_RX_ISSUED_PREFETCH_RD_E_CLR(x) (((x) >> S_RX_ISSUED_PREFETCH_RD_E_CLR) & M_RX_ISSUED_PREFETCH_RD_E_CLR)
+
+#define S_RX_ISSUED_PREFETCH_RD    6
+#define M_RX_ISSUED_PREFETCH_RD    0x3U
+#define V_RX_ISSUED_PREFETCH_RD(x) ((x) << S_RX_ISSUED_PREFETCH_RD)
+#define G_RX_ISSUED_PREFETCH_RD(x) (((x) >> S_RX_ISSUED_PREFETCH_RD) & M_RX_ISSUED_PREFETCH_RD)
+
+#define S_RX_PCMD_RES_RDY    4
+#define M_RX_PCMD_RES_RDY    0x3U
+#define V_RX_PCMD_RES_RDY(x) ((x) << S_RX_PCMD_RES_RDY)
+#define G_RX_PCMD_RES_RDY(x) (((x) >> S_RX_PCMD_RES_RDY) & M_RX_PCMD_RES_RDY)
+
+#define S_RX_DB_VLD    3
+#define V_RX_DB_VLD(x) ((x) << S_RX_DB_VLD)
+#define F_RX_DB_VLD    V_RX_DB_VLD(1U)
+
+#define S_RX_FIRST_BUNDLE    1
+#define M_RX_FIRST_BUNDLE    0x3U
+#define V_RX_FIRST_BUNDLE(x) ((x) << S_RX_FIRST_BUNDLE)
+#define G_RX_FIRST_BUNDLE(x) (((x) >> S_RX_FIRST_BUNDLE) & M_RX_FIRST_BUNDLE)
+
+#define S_RX_SDC_DRDY    0
+#define V_RX_SDC_DRDY(x) ((x) << S_RX_SDC_DRDY)
+#define F_RX_SDC_DRDY    V_RX_SDC_DRDY(1U)
+
+#define A_PM_RX_DBG_STAT4 0x10025
+
+#define S_RX_PCMD_VLD    26
+#define V_RX_PCMD_VLD(x) ((x) << S_RX_PCMD_VLD)
+#define F_RX_PCMD_VLD    V_RX_PCMD_VLD(1U)
+
+#define S_RX_PCMD_TO_CH    25
+#define V_RX_PCMD_TO_CH(x) ((x) << S_RX_PCMD_TO_CH)
+#define F_RX_PCMD_TO_CH    V_RX_PCMD_TO_CH(1U)
+
+#define S_RX_PCMD_FROM_CH    23
+#define M_RX_PCMD_FROM_CH    0x3U
+#define V_RX_PCMD_FROM_CH(x) ((x) << S_RX_PCMD_FROM_CH)
+#define G_RX_PCMD_FROM_CH(x) (((x) >> S_RX_PCMD_FROM_CH) & M_RX_PCMD_FROM_CH)
+
+#define S_RX_LINE    18
+#define M_RX_LINE    0x1fU
+#define V_RX_LINE(x) ((x) << S_RX_LINE)
+#define G_RX_LINE(x) (((x) >> S_RX_LINE) & M_RX_LINE)
+
+#define S_RX_IESPI_TXVALID    14
+#define M_RX_IESPI_TXVALID    0xfU
+#define V_RX_IESPI_TXVALID(x) ((x) << S_RX_IESPI_TXVALID)
+#define G_RX_IESPI_TXVALID(x) (((x) >> S_RX_IESPI_TXVALID) & M_RX_IESPI_TXVALID)
+
+#define S_RX_IESPI_TXFULL    10
+#define M_RX_IESPI_TXFULL    0xfU
+#define V_RX_IESPI_TXFULL(x) ((x) << S_RX_IESPI_TXFULL)
+#define G_RX_IESPI_TXFULL(x) (((x) >> S_RX_IESPI_TXFULL) & M_RX_IESPI_TXFULL)
+
+#define S_RX_PCMD_SRDY    8
+#define M_RX_PCMD_SRDY    0x3U
+#define V_RX_PCMD_SRDY(x) ((x) << S_RX_PCMD_SRDY)
+#define G_RX_PCMD_SRDY(x) (((x) >> S_RX_PCMD_SRDY) & M_RX_PCMD_SRDY)
+
+#define S_RX_PCMD_DRDY    6
+#define M_RX_PCMD_DRDY    0x3U
+#define V_RX_PCMD_DRDY(x) ((x) << S_RX_PCMD_DRDY)
+#define G_RX_PCMD_DRDY(x) (((x) >> S_RX_PCMD_DRDY) & M_RX_PCMD_DRDY)
+
+#define S_RX_PCMD_CMD    2
+#define M_RX_PCMD_CMD    0xfU
+#define V_RX_PCMD_CMD(x) ((x) << S_RX_PCMD_CMD)
+#define G_RX_PCMD_CMD(x) (((x) >> S_RX_PCMD_CMD) & M_RX_PCMD_CMD)
+
+#define S_DUPLICATE    0
+#define M_DUPLICATE    0x3U
+#define V_DUPLICATE(x) ((x) << S_DUPLICATE)
+#define G_DUPLICATE(x) (((x) >> S_DUPLICATE) & M_DUPLICATE)
+
+#define S_RX_PCMD_SRDY_STAT4    8
+#define M_RX_PCMD_SRDY_STAT4    0x3U
+#define V_RX_PCMD_SRDY_STAT4(x) ((x) << S_RX_PCMD_SRDY_STAT4)
+#define G_RX_PCMD_SRDY_STAT4(x) (((x) >> S_RX_PCMD_SRDY_STAT4) & M_RX_PCMD_SRDY_STAT4)
+
+#define S_RX_PCMD_DRDY_STAT4    6
+#define M_RX_PCMD_DRDY_STAT4    0x3U
+#define V_RX_PCMD_DRDY_STAT4(x) ((x) << S_RX_PCMD_DRDY_STAT4)
+#define G_RX_PCMD_DRDY_STAT4(x) (((x) >> S_RX_PCMD_DRDY_STAT4) & M_RX_PCMD_DRDY_STAT4)
+
+#define A_PM_RX_DBG_STAT5 0x10026
+
+#define S_RX_ATLST_1_PCMD_CH1    29
+#define V_RX_ATLST_1_PCMD_CH1(x) ((x) << S_RX_ATLST_1_PCMD_CH1)
+#define F_RX_ATLST_1_PCMD_CH1    V_RX_ATLST_1_PCMD_CH1(1U)
+
+#define S_RX_ATLST_1_PCMD_CH0    28
+#define V_RX_ATLST_1_PCMD_CH0(x) ((x) << S_RX_ATLST_1_PCMD_CH0)
+#define F_RX_ATLST_1_PCMD_CH0    V_RX_ATLST_1_PCMD_CH0(1U)
+
+#define S_T5_RX_PCMD_DRDY    26
+#define M_T5_RX_PCMD_DRDY    0x3U
+#define V_T5_RX_PCMD_DRDY(x) ((x) << S_T5_RX_PCMD_DRDY)
+#define G_T5_RX_PCMD_DRDY(x) (((x) >> S_T5_RX_PCMD_DRDY) & M_T5_RX_PCMD_DRDY)
+
+#define S_T5_RX_PCMD_SRDY    24
+#define M_T5_RX_PCMD_SRDY    0x3U
+#define V_T5_RX_PCMD_SRDY(x) ((x) << S_T5_RX_PCMD_SRDY)
+#define G_T5_RX_PCMD_SRDY(x) (((x) >> S_T5_RX_PCMD_SRDY) & M_T5_RX_PCMD_SRDY)
+
+#define S_RX_ISPI_TXVALID    20
+#define M_RX_ISPI_TXVALID    0xfU
+#define V_RX_ISPI_TXVALID(x) ((x) << S_RX_ISPI_TXVALID)
+#define G_RX_ISPI_TXVALID(x) (((x) >> S_RX_ISPI_TXVALID) & M_RX_ISPI_TXVALID)
+
+#define S_RX_ISPI_FULL    16
+#define M_RX_ISPI_FULL    0xfU
+#define V_RX_ISPI_FULL(x) ((x) << S_RX_ISPI_FULL)
+#define G_RX_ISPI_FULL(x) (((x) >> S_RX_ISPI_FULL) & M_RX_ISPI_FULL)
+
+#define S_RX_OSPI_TXVALID    14
+#define M_RX_OSPI_TXVALID    0x3U
+#define V_RX_OSPI_TXVALID(x) ((x) << S_RX_OSPI_TXVALID)
+#define G_RX_OSPI_TXVALID(x) (((x) >> S_RX_OSPI_TXVALID) & M_RX_OSPI_TXVALID)
+
+#define S_RX_OSPI_FULL    12
+#define M_RX_OSPI_FULL    0x3U
+#define V_RX_OSPI_FULL(x) ((x) << S_RX_OSPI_FULL)
+#define G_RX_OSPI_FULL(x) (((x) >> S_RX_OSPI_FULL) & M_RX_OSPI_FULL)
+
+#define S_RX_E_RXVALID    8
+#define M_RX_E_RXVALID    0xfU
+#define V_RX_E_RXVALID(x) ((x) << S_RX_E_RXVALID)
+#define G_RX_E_RXVALID(x) (((x) >> S_RX_E_RXVALID) & M_RX_E_RXVALID)
+
+#define S_RX_E_RXAFULL    4
+#define M_RX_E_RXAFULL    0xfU
+#define V_RX_E_RXAFULL(x) ((x) << S_RX_E_RXAFULL)
+#define G_RX_E_RXAFULL(x) (((x) >> S_RX_E_RXAFULL) & M_RX_E_RXAFULL)
+
+#define S_RX_C_TXVALID    2
+#define M_RX_C_TXVALID    0x3U
+#define V_RX_C_TXVALID(x) ((x) << S_RX_C_TXVALID)
+#define G_RX_C_TXVALID(x) (((x) >> S_RX_C_TXVALID) & M_RX_C_TXVALID)
+
+#define S_RX_C_TXAFULL    0
+#define M_RX_C_TXAFULL    0x3U
+#define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL)
+#define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL)
+
+#define S_T6_RX_PCMD_DRDY    26
+#define M_T6_RX_PCMD_DRDY    0x3U
+#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY)
+#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY)
+
+#define S_T6_RX_PCMD_SRDY    24
+#define M_T6_RX_PCMD_SRDY    0x3U
+#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY)
+#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY)
+
+#define A_PM_RX_DBG_STAT6 0x10027
+
+#define S_RX_M_INTRNL_FIFO_CNT    4
+#define M_RX_M_INTRNL_FIFO_CNT    0x3U
+#define V_RX_M_INTRNL_FIFO_CNT(x) ((x) << S_RX_M_INTRNL_FIFO_CNT)
+#define G_RX_M_INTRNL_FIFO_CNT(x) (((x) >> S_RX_M_INTRNL_FIFO_CNT) & M_RX_M_INTRNL_FIFO_CNT)
+
+#define S_RX_M_REQADDRRDY    3
+#define V_RX_M_REQADDRRDY(x) ((x) << S_RX_M_REQADDRRDY)
+#define F_RX_M_REQADDRRDY    V_RX_M_REQADDRRDY(1U)
+
+#define S_RX_M_REQWRITE    2
+#define V_RX_M_REQWRITE(x) ((x) << S_RX_M_REQWRITE)
+#define F_RX_M_REQWRITE    V_RX_M_REQWRITE(1U)
+
+#define S_RX_M_REQDATAVLD    1
+#define V_RX_M_REQDATAVLD(x) ((x) << S_RX_M_REQDATAVLD)
+#define F_RX_M_REQDATAVLD    V_RX_M_REQDATAVLD(1U)
+
+#define S_RX_M_REQDATARDY    0
+#define V_RX_M_REQDATARDY(x) ((x) << S_RX_M_REQDATARDY)
+#define F_RX_M_REQDATARDY    V_RX_M_REQDATARDY(1U)
+
+#define S_T6_RX_M_INTRNL_FIFO_CNT    7
+#define M_T6_RX_M_INTRNL_FIFO_CNT    0x3U
+#define V_T6_RX_M_INTRNL_FIFO_CNT(x) ((x) << S_T6_RX_M_INTRNL_FIFO_CNT)
+#define G_T6_RX_M_INTRNL_FIFO_CNT(x) (((x) >> S_T6_RX_M_INTRNL_FIFO_CNT) & M_T6_RX_M_INTRNL_FIFO_CNT)
+
+#define S_RX_M_RSPVLD    6
+#define V_RX_M_RSPVLD(x) ((x) << S_RX_M_RSPVLD)
+#define F_RX_M_RSPVLD    V_RX_M_RSPVLD(1U)
+
+#define S_RX_M_RSPRDY    5
+#define V_RX_M_RSPRDY(x) ((x) << S_RX_M_RSPRDY)
+#define F_RX_M_RSPRDY    V_RX_M_RSPRDY(1U)
+
+#define S_RX_M_REQADDRVLD    4
+#define V_RX_M_REQADDRVLD(x) ((x) << S_RX_M_REQADDRVLD)
+#define F_RX_M_REQADDRVLD    V_RX_M_REQADDRVLD(1U)
+
+#define A_PM_RX_DBG_STAT7 0x10028
+
+#define S_RX_PCMD1_FREE_CNT    7
+#define M_RX_PCMD1_FREE_CNT    0x7fU
+#define V_RX_PCMD1_FREE_CNT(x) ((x) << S_RX_PCMD1_FREE_CNT)
+#define G_RX_PCMD1_FREE_CNT(x) (((x) >> S_RX_PCMD1_FREE_CNT) & M_RX_PCMD1_FREE_CNT)
+
+#define S_RX_PCMD0_FREE_CNT    0
+#define M_RX_PCMD0_FREE_CNT    0x7fU
+#define V_RX_PCMD0_FREE_CNT(x) ((x) << S_RX_PCMD0_FREE_CNT)
+#define G_RX_PCMD0_FREE_CNT(x) (((x) >> S_RX_PCMD0_FREE_CNT) & M_RX_PCMD0_FREE_CNT)
+
+#define A_PM_RX_DBG_STAT8 0x10029
+
+#define S_RX_IN_EOP_CNT3    28
+#define M_RX_IN_EOP_CNT3    0xfU
+#define V_RX_IN_EOP_CNT3(x) ((x) << S_RX_IN_EOP_CNT3)
+#define G_RX_IN_EOP_CNT3(x) (((x) >> S_RX_IN_EOP_CNT3) & M_RX_IN_EOP_CNT3)
+
+#define S_RX_IN_EOP_CNT2    24
+#define M_RX_IN_EOP_CNT2    0xfU
+#define V_RX_IN_EOP_CNT2(x) ((x) << S_RX_IN_EOP_CNT2)
+#define G_RX_IN_EOP_CNT2(x) (((x) >> S_RX_IN_EOP_CNT2) & M_RX_IN_EOP_CNT2)
+
+#define S_RX_IN_EOP_CNT1    20
+#define M_RX_IN_EOP_CNT1    0xfU
+#define V_RX_IN_EOP_CNT1(x) ((x) << S_RX_IN_EOP_CNT1)
+#define G_RX_IN_EOP_CNT1(x) (((x) >> S_RX_IN_EOP_CNT1) & M_RX_IN_EOP_CNT1)
+
+#define S_RX_IN_EOP_CNT0    16
+#define M_RX_IN_EOP_CNT0    0xfU
+#define V_RX_IN_EOP_CNT0(x) ((x) << S_RX_IN_EOP_CNT0)
+#define G_RX_IN_EOP_CNT0(x) (((x) >> S_RX_IN_EOP_CNT0) & M_RX_IN_EOP_CNT0)
+
+#define S_RX_IN_SOP_CNT3    12
+#define M_RX_IN_SOP_CNT3    0xfU
+#define V_RX_IN_SOP_CNT3(x) ((x) << S_RX_IN_SOP_CNT3)
+#define G_RX_IN_SOP_CNT3(x) (((x) >> S_RX_IN_SOP_CNT3) & M_RX_IN_SOP_CNT3)
+
+#define S_RX_IN_SOP_CNT2    8
+#define M_RX_IN_SOP_CNT2    0xfU
+#define V_RX_IN_SOP_CNT2(x) ((x) << S_RX_IN_SOP_CNT2)
+#define G_RX_IN_SOP_CNT2(x) (((x) >> S_RX_IN_SOP_CNT2) & M_RX_IN_SOP_CNT2)
+
+#define S_RX_IN_SOP_CNT1    4
+#define M_RX_IN_SOP_CNT1    0xfU
+#define V_RX_IN_SOP_CNT1(x) ((x) << S_RX_IN_SOP_CNT1)
+#define G_RX_IN_SOP_CNT1(x) (((x) >> S_RX_IN_SOP_CNT1) & M_RX_IN_SOP_CNT1)
+
+#define S_RX_IN_SOP_CNT0    0
+#define M_RX_IN_SOP_CNT0    0xfU
+#define V_RX_IN_SOP_CNT0(x) ((x) << S_RX_IN_SOP_CNT0)
+#define G_RX_IN_SOP_CNT0(x) (((x) >> S_RX_IN_SOP_CNT0) & M_RX_IN_SOP_CNT0)
+
+#define A_PM_RX_DBG_STAT9 0x1002a
+
+#define S_RX_RSVD0    28
+#define M_RX_RSVD0    0xfU
+#define V_RX_RSVD0(x) ((x) << S_RX_RSVD0)
+#define G_RX_RSVD0(x) (((x) >> S_RX_RSVD0) & M_RX_RSVD0)
+
+#define S_RX_RSVD1    24
+#define M_RX_RSVD1    0xfU
+#define V_RX_RSVD1(x) ((x) << S_RX_RSVD1)
+#define G_RX_RSVD1(x) (((x) >> S_RX_RSVD1) & M_RX_RSVD1)
+
+#define S_RX_OUT_EOP_CNT1    20
+#define M_RX_OUT_EOP_CNT1    0xfU
+#define V_RX_OUT_EOP_CNT1(x) ((x) << S_RX_OUT_EOP_CNT1)
+#define G_RX_OUT_EOP_CNT1(x) (((x) >> S_RX_OUT_EOP_CNT1) & M_RX_OUT_EOP_CNT1)
+
+#define S_RX_OUT_EOP_CNT0    16
+#define M_RX_OUT_EOP_CNT0    0xfU
+#define V_RX_OUT_EOP_CNT0(x) ((x) << S_RX_OUT_EOP_CNT0)
+#define G_RX_OUT_EOP_CNT0(x) (((x) >> S_RX_OUT_EOP_CNT0) & M_RX_OUT_EOP_CNT0)
+
+#define S_RX_RSVD2    12
+#define M_RX_RSVD2    0xfU
+#define V_RX_RSVD2(x) ((x) << S_RX_RSVD2)
+#define G_RX_RSVD2(x) (((x) >> S_RX_RSVD2) & M_RX_RSVD2)
+
+#define S_RX_RSVD3    8
+#define M_RX_RSVD3    0xfU
+#define V_RX_RSVD3(x) ((x) << S_RX_RSVD3)
+#define G_RX_RSVD3(x) (((x) >> S_RX_RSVD3) & M_RX_RSVD3)
+
+#define S_RX_OUT_SOP_CNT1    4
+#define M_RX_OUT_SOP_CNT1    0xfU
+#define V_RX_OUT_SOP_CNT1(x) ((x) << S_RX_OUT_SOP_CNT1)
+#define G_RX_OUT_SOP_CNT1(x) (((x) >> S_RX_OUT_SOP_CNT1) & M_RX_OUT_SOP_CNT1)
+
+#define S_RX_OUT_SOP_CNT0    0
+#define M_RX_OUT_SOP_CNT0    0xfU
+#define V_RX_OUT_SOP_CNT0(x) ((x) << S_RX_OUT_SOP_CNT0)
+#define G_RX_OUT_SOP_CNT0(x) (((x) >> S_RX_OUT_SOP_CNT0) & M_RX_OUT_SOP_CNT0)
+
+#define A_PM_RX_DBG_STAT10 0x1002b
+
+#define S_RX_CH_DEFICIT_BLOWED    24
+#define V_RX_CH_DEFICIT_BLOWED(x) ((x) << S_RX_CH_DEFICIT_BLOWED)
+#define F_RX_CH_DEFICIT_BLOWED    V_RX_CH_DEFICIT_BLOWED(1U)
+
+#define S_RX_CH1_DEFICIT    12
+#define M_RX_CH1_DEFICIT    0xfffU
+#define V_RX_CH1_DEFICIT(x) ((x) << S_RX_CH1_DEFICIT)
+#define G_RX_CH1_DEFICIT(x) (((x) >> S_RX_CH1_DEFICIT) & M_RX_CH1_DEFICIT)
+
+#define S_RX_CH0_DEFICIT    0
+#define M_RX_CH0_DEFICIT    0xfffU
+#define V_RX_CH0_DEFICIT(x) ((x) << S_RX_CH0_DEFICIT)
+#define G_RX_CH0_DEFICIT(x) (((x) >> S_RX_CH0_DEFICIT) & M_RX_CH0_DEFICIT)
+
+#define A_PM_RX_DBG_STAT11 0x1002c
+
+#define S_RX_BUNDLE_LEN_SRDY    30
+#define M_RX_BUNDLE_LEN_SRDY    0x3U
+#define V_RX_BUNDLE_LEN_SRDY(x) ((x) << S_RX_BUNDLE_LEN_SRDY)
+#define G_RX_BUNDLE_LEN_SRDY(x) (((x) >> S_RX_BUNDLE_LEN_SRDY) & M_RX_BUNDLE_LEN_SRDY)
+
+#define S_RX_RSVD11_1    28
+#define M_RX_RSVD11_1    0x3U
+#define V_RX_RSVD11_1(x) ((x) << S_RX_RSVD11_1)
+#define G_RX_RSVD11_1(x) (((x) >> S_RX_RSVD11_1) & M_RX_RSVD11_1)
+
+#define S_RX_BUNDLE_LEN1    16
+#define M_RX_BUNDLE_LEN1    0xfffU
+#define V_RX_BUNDLE_LEN1(x) ((x) << S_RX_BUNDLE_LEN1)
+#define G_RX_BUNDLE_LEN1(x) (((x) >> S_RX_BUNDLE_LEN1) & M_RX_BUNDLE_LEN1)
+
+#define S_RX_RSVD11    12
+#define M_RX_RSVD11    0xfU
+#define V_RX_RSVD11(x) ((x) << S_RX_RSVD11)
+#define G_RX_RSVD11(x) (((x) >> S_RX_RSVD11) & M_RX_RSVD11)
+
+#define S_RX_BUNDLE_LEN0    0
+#define M_RX_BUNDLE_LEN0    0xfffU
+#define V_RX_BUNDLE_LEN0(x) ((x) << S_RX_BUNDLE_LEN0)
+#define G_RX_BUNDLE_LEN0(x) (((x) >> S_RX_BUNDLE_LEN0) & M_RX_BUNDLE_LEN0)
+
 /* registers for module PM_TX */
 #define PM_TX_BASE_ADDR 0x8fe0
 
@@ -14160,7 +30475,15 @@
 #define A_PM_TX_STAT_CONFIG 0x8fe8
 #define A_PM_TX_STAT_COUNT 0x8fec
 #define A_PM_TX_STAT_LSB 0x8ff0
+#define A_PM_TX_DBG_CTRL 0x8ff0
+
+#define S_OSPIWRBUSY    21
+#define M_OSPIWRBUSY    0xfU
+#define V_OSPIWRBUSY(x) ((x) << S_OSPIWRBUSY)
+#define G_OSPIWRBUSY(x) (((x) >> S_OSPIWRBUSY) & M_OSPIWRBUSY)
+
 #define A_PM_TX_STAT_MSB 0x8ff4
+#define A_PM_TX_DBG_DATA 0x8ff4
 #define A_PM_TX_INT_ENABLE 0x8ff8
 
 #define S_PCMD_LEN_OVFL0    31
@@ -14293,6 +30616,729 @@
 #define V_ZERO_C_CMD_ERROR(x) ((x) << S_ZERO_C_CMD_ERROR)
 #define F_ZERO_C_CMD_ERROR    V_ZERO_C_CMD_ERROR(1U)
 
+#define S_OSPI_OR_BUNDLE_LEN_PAR_ERR    3
+#define V_OSPI_OR_BUNDLE_LEN_PAR_ERR(x) ((x) << S_OSPI_OR_BUNDLE_LEN_PAR_ERR)
+#define F_OSPI_OR_BUNDLE_LEN_PAR_ERR    V_OSPI_OR_BUNDLE_LEN_PAR_ERR(1U)
+
+#define A_PM_TX_ISPI_DBG_4B_DATA0 0x10000
+#define A_PM_TX_ISPI_DBG_4B_DATA1 0x10001
+#define A_PM_TX_ISPI_DBG_4B_DATA2 0x10002
+#define A_PM_TX_ISPI_DBG_4B_DATA3 0x10003
+#define A_PM_TX_ISPI_DBG_4B_DATA4 0x10004
+#define A_PM_TX_ISPI_DBG_4B_DATA5 0x10005
+#define A_PM_TX_ISPI_DBG_4B_DATA6 0x10006
+#define A_PM_TX_ISPI_DBG_4B_DATA7 0x10007
+#define A_PM_TX_ISPI_DBG_4B_DATA8 0x10008
+#define A_PM_TX_OSPI_DBG_4B_DATA0 0x10009
+#define A_PM_TX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_PM_TX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_PM_TX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_PM_TX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_PM_TX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_PM_TX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_PM_TX_OSPI_DBG_4B_DATA7 0x10010
+#define A_PM_TX_OSPI_DBG_4B_DATA8 0x10011
+#define A_PM_TX_OSPI_DBG_4B_DATA9 0x10012
+#define A_PM_TX_OSPI_DBG_4B_DATA10 0x10013
+#define A_PM_TX_OSPI_DBG_4B_DATA11 0x10014
+#define A_PM_TX_OSPI_DBG_4B_DATA12 0x10015
+#define A_PM_TX_OSPI_DBG_4B_DATA13 0x10016
+#define A_PM_TX_OSPI_DBG_4B_DATA14 0x10017
+#define A_PM_TX_OSPI_DBG_4B_DATA15 0x10018
+#define A_PM_TX_OSPI_DBG_4B_DATA16 0x10019
+#define A_PM_TX_DBG_STAT_MSB 0x1001a
+#define A_PM_TX_DBG_STAT_LSB 0x1001b
+#define A_PM_TX_DBG_RSVD_FLIT_CNT 0x1001c
+#define A_PM_TX_SDC_EN 0x1001d
+#define A_PM_TX_INOUT_FIFO_DBG_CHNL_SEL 0x1001e
+#define A_PM_TX_INOUT_FIFO_DBG_WR 0x1001f
+#define A_PM_TX_INPUT_FIFO_STR_FWD_EN 0x10020
+#define A_PM_TX_FEATURE_EN 0x10021
+
+#define S_PIO_CH_DEFICIT_CTL_EN    2
+#define V_PIO_CH_DEFICIT_CTL_EN(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN)
+#define F_PIO_CH_DEFICIT_CTL_EN    V_PIO_CH_DEFICIT_CTL_EN(1U)
+
+#define S_PIO_WRR_BASED_PRFTCH_EN    1
+#define V_PIO_WRR_BASED_PRFTCH_EN(x) ((x) << S_PIO_WRR_BASED_PRFTCH_EN)
+#define F_PIO_WRR_BASED_PRFTCH_EN    V_PIO_WRR_BASED_PRFTCH_EN(1U)
+
+#define A_PM_TX_T5_PM_TX_INT_ENABLE 0x10022
+
+#define S_OSPI_OVERFLOW3    7
+#define V_OSPI_OVERFLOW3(x) ((x) << S_OSPI_OVERFLOW3)
+#define F_OSPI_OVERFLOW3    V_OSPI_OVERFLOW3(1U)
+
+#define S_OSPI_OVERFLOW2    6
+#define V_OSPI_OVERFLOW2(x) ((x) << S_OSPI_OVERFLOW2)
+#define F_OSPI_OVERFLOW2    V_OSPI_OVERFLOW2(1U)
+
+#define S_T5_OSPI_OVERFLOW1    5
+#define V_T5_OSPI_OVERFLOW1(x) ((x) << S_T5_OSPI_OVERFLOW1)
+#define F_T5_OSPI_OVERFLOW1    V_T5_OSPI_OVERFLOW1(1U)
+
+#define S_T5_OSPI_OVERFLOW0    4
+#define V_T5_OSPI_OVERFLOW0(x) ((x) << S_T5_OSPI_OVERFLOW0)
+#define F_T5_OSPI_OVERFLOW0    V_T5_OSPI_OVERFLOW0(1U)
+
+#define S_M_INTFPERREN    3
+#define V_M_INTFPERREN(x) ((x) << S_M_INTFPERREN)
+#define F_M_INTFPERREN    V_M_INTFPERREN(1U)
+
+#define S_BUNDLE_LEN_PARERR_EN    2
+#define V_BUNDLE_LEN_PARERR_EN(x) ((x) << S_BUNDLE_LEN_PARERR_EN)
+#define F_BUNDLE_LEN_PARERR_EN    V_BUNDLE_LEN_PARERR_EN(1U)
+
+#define S_BUNDLE_LEN_OVFL_EN    1
+#define V_BUNDLE_LEN_OVFL_EN(x) ((x) << S_BUNDLE_LEN_OVFL_EN)
+#define F_BUNDLE_LEN_OVFL_EN    V_BUNDLE_LEN_OVFL_EN(1U)
+
+#define S_SDC_ERR_EN    0
+#define V_SDC_ERR_EN(x) ((x) << S_SDC_ERR_EN)
+#define F_SDC_ERR_EN    V_SDC_ERR_EN(1U)
+
+#define S_OSPI_OVERFLOW3_T5    7
+#define V_OSPI_OVERFLOW3_T5(x) ((x) << S_OSPI_OVERFLOW3_T5)
+#define F_OSPI_OVERFLOW3_T5    V_OSPI_OVERFLOW3_T5(1U)
+
+#define S_OSPI_OVERFLOW2_T5    6
+#define V_OSPI_OVERFLOW2_T5(x) ((x) << S_OSPI_OVERFLOW2_T5)
+#define F_OSPI_OVERFLOW2_T5    V_OSPI_OVERFLOW2_T5(1U)
+
+#define S_OSPI_OVERFLOW1_T5    5
+#define V_OSPI_OVERFLOW1_T5(x) ((x) << S_OSPI_OVERFLOW1_T5)
+#define F_OSPI_OVERFLOW1_T5    V_OSPI_OVERFLOW1_T5(1U)
+
+#define S_OSPI_OVERFLOW0_T5    4
+#define V_OSPI_OVERFLOW0_T5(x) ((x) << S_OSPI_OVERFLOW0_T5)
+#define F_OSPI_OVERFLOW0_T5    V_OSPI_OVERFLOW0_T5(1U)
+
+#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10023
+#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x10024
+#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x10025
+#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x10026
+#define A_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x10027
+#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
+#define A_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x10029
+
+#define S_CH2_OSPI_DEFICIT_THRSHLD    0
+#define M_CH2_OSPI_DEFICIT_THRSHLD    0xfffU
+#define V_CH2_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH2_OSPI_DEFICIT_THRSHLD)
+#define G_CH2_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH2_OSPI_DEFICIT_THRSHLD) & M_CH2_OSPI_DEFICIT_THRSHLD)
+
+#define A_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x1002a
+
+#define S_CH3_OSPI_DEFICIT_THRSHLD    0
+#define M_CH3_OSPI_DEFICIT_THRSHLD    0xfffU
+#define V_CH3_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH3_OSPI_DEFICIT_THRSHLD)
+#define G_CH3_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH3_OSPI_DEFICIT_THRSHLD) & M_CH3_OSPI_DEFICIT_THRSHLD)
+
+#define A_PM_TX_INT_CAUSE_MASK_HALT 0x1002b
+#define A_PM_TX_DBG_STAT0 0x1002c
+
+#define S_RD_I_BUSY    29
+#define V_RD_I_BUSY(x) ((x) << S_RD_I_BUSY)
+#define F_RD_I_BUSY    V_RD_I_BUSY(1U)
+
+#define S_WR_O_BUSY    28
+#define V_WR_O_BUSY(x) ((x) << S_WR_O_BUSY)
+#define F_WR_O_BUSY    V_WR_O_BUSY(1U)
+
+#define S_M_TO_O_BUSY    27
+#define V_M_TO_O_BUSY(x) ((x) << S_M_TO_O_BUSY)
+#define F_M_TO_O_BUSY    V_M_TO_O_BUSY(1U)
+
+#define S_I_TO_M_BUSY    26
+#define V_I_TO_M_BUSY(x) ((x) << S_I_TO_M_BUSY)
+#define F_I_TO_M_BUSY    V_I_TO_M_BUSY(1U)
+
+#define S_PCMD_FB_ONLY    25
+#define V_PCMD_FB_ONLY(x) ((x) << S_PCMD_FB_ONLY)
+#define F_PCMD_FB_ONLY    V_PCMD_FB_ONLY(1U)
+
+#define S_PCMD_MEM    24
+#define V_PCMD_MEM(x) ((x) << S_PCMD_MEM)
+#define F_PCMD_MEM    V_PCMD_MEM(1U)
+
+#define S_PCMD_BYPASS    23
+#define V_PCMD_BYPASS(x) ((x) << S_PCMD_BYPASS)
+#define F_PCMD_BYPASS    V_PCMD_BYPASS(1U)
+
+#define S_PCMD_EOP2    22
+#define V_PCMD_EOP2(x) ((x) << S_PCMD_EOP2)
+#define F_PCMD_EOP2    V_PCMD_EOP2(1U)
+
+#define S_PCMD_EOP    21
+#define V_PCMD_EOP(x) ((x) << S_PCMD_EOP)
+#define F_PCMD_EOP    V_PCMD_EOP(1U)
+
+#define S_PCMD_END_BUNDLE    20
+#define V_PCMD_END_BUNDLE(x) ((x) << S_PCMD_END_BUNDLE)
+#define F_PCMD_END_BUNDLE    V_PCMD_END_BUNDLE(1U)
+
+#define S_PCMD_FB_CMD    16
+#define M_PCMD_FB_CMD    0xfU
+#define V_PCMD_FB_CMD(x) ((x) << S_PCMD_FB_CMD)
+#define G_PCMD_FB_CMD(x) (((x) >> S_PCMD_FB_CMD) & M_PCMD_FB_CMD)
+
+#define S_CUR_PCMD_LEN    0
+#define M_CUR_PCMD_LEN    0xffffU
+#define V_CUR_PCMD_LEN(x) ((x) << S_CUR_PCMD_LEN)
+#define G_CUR_PCMD_LEN(x) (((x) >> S_CUR_PCMD_LEN) & M_CUR_PCMD_LEN)
+
+#define S_T6_RD_I_BUSY    28
+#define V_T6_RD_I_BUSY(x) ((x) << S_T6_RD_I_BUSY)
+#define F_T6_RD_I_BUSY    V_T6_RD_I_BUSY(1U)
+
+#define S_T6_WR_O_BUSY    27
+#define V_T6_WR_O_BUSY(x) ((x) << S_T6_WR_O_BUSY)
+#define F_T6_WR_O_BUSY    V_T6_WR_O_BUSY(1U)
+
+#define S_T6_M_TO_O_BUSY    26
+#define V_T6_M_TO_O_BUSY(x) ((x) << S_T6_M_TO_O_BUSY)
+#define F_T6_M_TO_O_BUSY    V_T6_M_TO_O_BUSY(1U)
+
+#define S_T6_I_TO_M_BUSY    25
+#define V_T6_I_TO_M_BUSY(x) ((x) << S_T6_I_TO_M_BUSY)
+#define F_T6_I_TO_M_BUSY    V_T6_I_TO_M_BUSY(1U)
+
+#define S_T6_PCMD_FB_ONLY    24
+#define V_T6_PCMD_FB_ONLY(x) ((x) << S_T6_PCMD_FB_ONLY)
+#define F_T6_PCMD_FB_ONLY    V_T6_PCMD_FB_ONLY(1U)
+
+#define S_T6_PCMD_MEM    23
+#define V_T6_PCMD_MEM(x) ((x) << S_T6_PCMD_MEM)
+#define F_T6_PCMD_MEM    V_T6_PCMD_MEM(1U)
+
+#define S_T6_PCMD_BYPASS    22
+#define V_T6_PCMD_BYPASS(x) ((x) << S_T6_PCMD_BYPASS)
+#define F_T6_PCMD_BYPASS    V_T6_PCMD_BYPASS(1U)
+
+#define A_PM_TX_DBG_STAT1 0x1002d
+
+#define S_PCMD_MEM0    31
+#define V_PCMD_MEM0(x) ((x) << S_PCMD_MEM0)
+#define F_PCMD_MEM0    V_PCMD_MEM0(1U)
+
+#define S_FREE_OESPI_CNT0    19
+#define M_FREE_OESPI_CNT0    0xfffU
+#define V_FREE_OESPI_CNT0(x) ((x) << S_FREE_OESPI_CNT0)
+#define G_FREE_OESPI_CNT0(x) (((x) >> S_FREE_OESPI_CNT0) & M_FREE_OESPI_CNT0)
+
+#define S_PCMD_FLIT_LEN0    7
+#define M_PCMD_FLIT_LEN0    0xfffU
+#define V_PCMD_FLIT_LEN0(x) ((x) << S_PCMD_FLIT_LEN0)
+#define G_PCMD_FLIT_LEN0(x) (((x) >> S_PCMD_FLIT_LEN0) & M_PCMD_FLIT_LEN0)
+
+#define S_PCMD_CMD0    3
+#define M_PCMD_CMD0    0xfU
+#define V_PCMD_CMD0(x) ((x) << S_PCMD_CMD0)
+#define G_PCMD_CMD0(x) (((x) >> S_PCMD_CMD0) & M_PCMD_CMD0)
+
+#define S_OFIFO_FULL0    2
+#define V_OFIFO_FULL0(x) ((x) << S_OFIFO_FULL0)
+#define F_OFIFO_FULL0    V_OFIFO_FULL0(1U)
+
+#define S_GCSUM_DRDY0    1
+#define V_GCSUM_DRDY0(x) ((x) << S_GCSUM_DRDY0)
+#define F_GCSUM_DRDY0    V_GCSUM_DRDY0(1U)
+
+#define S_BYPASS0    0
+#define V_BYPASS0(x) ((x) << S_BYPASS0)
+#define F_BYPASS0    V_BYPASS0(1U)
+
+#define A_PM_TX_DBG_STAT2 0x1002e
+
+#define S_PCMD_MEM1    31
+#define V_PCMD_MEM1(x) ((x) << S_PCMD_MEM1)
+#define F_PCMD_MEM1    V_PCMD_MEM1(1U)
+
+#define S_FREE_OESPI_CNT1    19
+#define M_FREE_OESPI_CNT1    0xfffU
+#define V_FREE_OESPI_CNT1(x) ((x) << S_FREE_OESPI_CNT1)
+#define G_FREE_OESPI_CNT1(x) (((x) >> S_FREE_OESPI_CNT1) & M_FREE_OESPI_CNT1)
+
+#define S_PCMD_FLIT_LEN1    7
+#define M_PCMD_FLIT_LEN1    0xfffU
+#define V_PCMD_FLIT_LEN1(x) ((x) << S_PCMD_FLIT_LEN1)
+#define G_PCMD_FLIT_LEN1(x) (((x) >> S_PCMD_FLIT_LEN1) & M_PCMD_FLIT_LEN1)
+
+#define S_PCMD_CMD1    3
+#define M_PCMD_CMD1    0xfU
+#define V_PCMD_CMD1(x) ((x) << S_PCMD_CMD1)
+#define G_PCMD_CMD1(x) (((x) >> S_PCMD_CMD1) & M_PCMD_CMD1)
+
+#define S_OFIFO_FULL1    2
+#define V_OFIFO_FULL1(x) ((x) << S_OFIFO_FULL1)
+#define F_OFIFO_FULL1    V_OFIFO_FULL1(1U)
+
+#define S_GCSUM_DRDY1    1
+#define V_GCSUM_DRDY1(x) ((x) << S_GCSUM_DRDY1)
+#define F_GCSUM_DRDY1    V_GCSUM_DRDY1(1U)
+
+#define S_BYPASS1    0
+#define V_BYPASS1(x) ((x) << S_BYPASS1)
+#define F_BYPASS1    V_BYPASS1(1U)
+
+#define A_PM_TX_DBG_STAT3 0x1002f
+
+#define S_PCMD_MEM2    31
+#define V_PCMD_MEM2(x) ((x) << S_PCMD_MEM2)
+#define F_PCMD_MEM2    V_PCMD_MEM2(1U)
+
+#define S_FREE_OESPI_CNT2    19
+#define M_FREE_OESPI_CNT2    0xfffU
+#define V_FREE_OESPI_CNT2(x) ((x) << S_FREE_OESPI_CNT2)
+#define G_FREE_OESPI_CNT2(x) (((x) >> S_FREE_OESPI_CNT2) & M_FREE_OESPI_CNT2)
+
+#define S_PCMD_FLIT_LEN2    7
+#define M_PCMD_FLIT_LEN2    0xfffU
+#define V_PCMD_FLIT_LEN2(x) ((x) << S_PCMD_FLIT_LEN2)
+#define G_PCMD_FLIT_LEN2(x) (((x) >> S_PCMD_FLIT_LEN2) & M_PCMD_FLIT_LEN2)
+
+#define S_PCMD_CMD2    3
+#define M_PCMD_CMD2    0xfU
+#define V_PCMD_CMD2(x) ((x) << S_PCMD_CMD2)
+#define G_PCMD_CMD2(x) (((x) >> S_PCMD_CMD2) & M_PCMD_CMD2)
+
+#define S_OFIFO_FULL2    2
+#define V_OFIFO_FULL2(x) ((x) << S_OFIFO_FULL2)
+#define F_OFIFO_FULL2    V_OFIFO_FULL2(1U)
+
+#define S_GCSUM_DRDY2    1
+#define V_GCSUM_DRDY2(x) ((x) << S_GCSUM_DRDY2)
+#define F_GCSUM_DRDY2    V_GCSUM_DRDY2(1U)
+
+#define S_BYPASS2    0
+#define V_BYPASS2(x) ((x) << S_BYPASS2)
+#define F_BYPASS2    V_BYPASS2(1U)
+
+#define A_PM_TX_DBG_STAT4 0x10030
+
+#define S_PCMD_MEM3    31
+#define V_PCMD_MEM3(x) ((x) << S_PCMD_MEM3)
+#define F_PCMD_MEM3    V_PCMD_MEM3(1U)
+
+#define S_FREE_OESPI_CNT3    19
+#define M_FREE_OESPI_CNT3    0xfffU
+#define V_FREE_OESPI_CNT3(x) ((x) << S_FREE_OESPI_CNT3)
+#define G_FREE_OESPI_CNT3(x) (((x) >> S_FREE_OESPI_CNT3) & M_FREE_OESPI_CNT3)
+
+#define S_PCMD_FLIT_LEN3    7
+#define M_PCMD_FLIT_LEN3    0xfffU
+#define V_PCMD_FLIT_LEN3(x) ((x) << S_PCMD_FLIT_LEN3)
+#define G_PCMD_FLIT_LEN3(x) (((x) >> S_PCMD_FLIT_LEN3) & M_PCMD_FLIT_LEN3)
+
+#define S_PCMD_CMD3    3
+#define M_PCMD_CMD3    0xfU
+#define V_PCMD_CMD3(x) ((x) << S_PCMD_CMD3)
+#define G_PCMD_CMD3(x) (((x) >> S_PCMD_CMD3) & M_PCMD_CMD3)
+
+#define S_OFIFO_FULL3    2
+#define V_OFIFO_FULL3(x) ((x) << S_OFIFO_FULL3)
+#define F_OFIFO_FULL3    V_OFIFO_FULL3(1U)
+
+#define S_GCSUM_DRDY3    1
+#define V_GCSUM_DRDY3(x) ((x) << S_GCSUM_DRDY3)
+#define F_GCSUM_DRDY3    V_GCSUM_DRDY3(1U)
+
+#define S_BYPASS3    0
+#define V_BYPASS3(x) ((x) << S_BYPASS3)
+#define F_BYPASS3    V_BYPASS3(1U)
+
+#define A_PM_TX_DBG_STAT5 0x10031
+
+#define S_SET_PCMD_RES_RDY_RD    24
+#define M_SET_PCMD_RES_RDY_RD    0xfU
+#define V_SET_PCMD_RES_RDY_RD(x) ((x) << S_SET_PCMD_RES_RDY_RD)
+#define G_SET_PCMD_RES_RDY_RD(x) (((x) >> S_SET_PCMD_RES_RDY_RD) & M_SET_PCMD_RES_RDY_RD)
+
+#define S_ISSUED_PREF_RD_ER_CLR    20
+#define M_ISSUED_PREF_RD_ER_CLR    0xfU
+#define V_ISSUED_PREF_RD_ER_CLR(x) ((x) << S_ISSUED_PREF_RD_ER_CLR)
+#define G_ISSUED_PREF_RD_ER_CLR(x) (((x) >> S_ISSUED_PREF_RD_ER_CLR) & M_ISSUED_PREF_RD_ER_CLR)
+
+#define S_ISSUED_PREF_RD    16
+#define M_ISSUED_PREF_RD    0xfU
+#define V_ISSUED_PREF_RD(x) ((x) << S_ISSUED_PREF_RD)
+#define G_ISSUED_PREF_RD(x) (((x) >> S_ISSUED_PREF_RD) & M_ISSUED_PREF_RD)
+
+#define S_PCMD_RES_RDY    12
+#define M_PCMD_RES_RDY    0xfU
+#define V_PCMD_RES_RDY(x) ((x) << S_PCMD_RES_RDY)
+#define G_PCMD_RES_RDY(x) (((x) >> S_PCMD_RES_RDY) & M_PCMD_RES_RDY)
+
+#define S_DB_VLD    11
+#define V_DB_VLD(x) ((x) << S_DB_VLD)
+#define F_DB_VLD    V_DB_VLD(1U)
+
+#define S_INJECT0_DRDY    10
+#define V_INJECT0_DRDY(x) ((x) << S_INJECT0_DRDY)
+#define F_INJECT0_DRDY    V_INJECT0_DRDY(1U)
+
+#define S_INJECT1_DRDY    9
+#define V_INJECT1_DRDY(x) ((x) << S_INJECT1_DRDY)
+#define F_INJECT1_DRDY    V_INJECT1_DRDY(1U)
+
+#define S_FIRST_BUNDLE    5
+#define M_FIRST_BUNDLE    0xfU
+#define V_FIRST_BUNDLE(x) ((x) << S_FIRST_BUNDLE)
+#define G_FIRST_BUNDLE(x) (((x) >> S_FIRST_BUNDLE) & M_FIRST_BUNDLE)
+
+#define S_GCSUM_MORE_THAN_2_LEFT    1
+#define M_GCSUM_MORE_THAN_2_LEFT    0xfU
+#define V_GCSUM_MORE_THAN_2_LEFT(x) ((x) << S_GCSUM_MORE_THAN_2_LEFT)
+#define G_GCSUM_MORE_THAN_2_LEFT(x) (((x) >> S_GCSUM_MORE_THAN_2_LEFT) & M_GCSUM_MORE_THAN_2_LEFT)
+
+#define S_SDC_DRDY    0
+#define V_SDC_DRDY(x) ((x) << S_SDC_DRDY)
+#define F_SDC_DRDY    V_SDC_DRDY(1U)
+
+#define A_PM_TX_DBG_STAT6 0x10032
+
+#define S_PCMD_VLD    31
+#define V_PCMD_VLD(x) ((x) << S_PCMD_VLD)
+#define F_PCMD_VLD    V_PCMD_VLD(1U)
+
+#define S_PCMD_CH    29
+#define M_PCMD_CH    0x3U
+#define V_PCMD_CH(x) ((x) << S_PCMD_CH)
+#define G_PCMD_CH(x) (((x) >> S_PCMD_CH) & M_PCMD_CH)
+
+#define S_STATE_MACHINE_LOC    24
+#define M_STATE_MACHINE_LOC    0x1fU
+#define V_STATE_MACHINE_LOC(x) ((x) << S_STATE_MACHINE_LOC)
+#define G_STATE_MACHINE_LOC(x) (((x) >> S_STATE_MACHINE_LOC) & M_STATE_MACHINE_LOC)
+
+#define S_ICSPI_TXVALID    20
+#define M_ICSPI_TXVALID    0xfU
+#define V_ICSPI_TXVALID(x) ((x) << S_ICSPI_TXVALID)
+#define G_ICSPI_TXVALID(x) (((x) >> S_ICSPI_TXVALID) & M_ICSPI_TXVALID)
+
+#define S_ICSPI_TXFULL    16
+#define M_ICSPI_TXFULL    0xfU
+#define V_ICSPI_TXFULL(x) ((x) << S_ICSPI_TXFULL)
+#define G_ICSPI_TXFULL(x) (((x) >> S_ICSPI_TXFULL) & M_ICSPI_TXFULL)
+
+#define S_PCMD_SRDY    12
+#define M_PCMD_SRDY    0xfU
+#define V_PCMD_SRDY(x) ((x) << S_PCMD_SRDY)
+#define G_PCMD_SRDY(x) (((x) >> S_PCMD_SRDY) & M_PCMD_SRDY)
+
+#define S_PCMD_DRDY    8
+#define M_PCMD_DRDY    0xfU
+#define V_PCMD_DRDY(x) ((x) << S_PCMD_DRDY)
+#define G_PCMD_DRDY(x) (((x) >> S_PCMD_DRDY) & M_PCMD_DRDY)
+
+#define S_PCMD_CMD    4
+#define M_PCMD_CMD    0xfU
+#define V_PCMD_CMD(x) ((x) << S_PCMD_CMD)
+#define G_PCMD_CMD(x) (((x) >> S_PCMD_CMD) & M_PCMD_CMD)
+
+#define S_OEFIFO_FULL3    3
+#define V_OEFIFO_FULL3(x) ((x) << S_OEFIFO_FULL3)
+#define F_OEFIFO_FULL3    V_OEFIFO_FULL3(1U)
+
+#define S_OEFIFO_FULL2    2
+#define V_OEFIFO_FULL2(x) ((x) << S_OEFIFO_FULL2)
+#define F_OEFIFO_FULL2    V_OEFIFO_FULL2(1U)
+
+#define S_OEFIFO_FULL1    1
+#define V_OEFIFO_FULL1(x) ((x) << S_OEFIFO_FULL1)
+#define F_OEFIFO_FULL1    V_OEFIFO_FULL1(1U)
+
+#define S_OEFIFO_FULL0    0
+#define V_OEFIFO_FULL0(x) ((x) << S_OEFIFO_FULL0)
+#define F_OEFIFO_FULL0    V_OEFIFO_FULL0(1U)
+
+#define A_PM_TX_DBG_STAT7 0x10033
+
+#define S_ICSPI_RXVALID    28
+#define M_ICSPI_RXVALID    0xfU
+#define V_ICSPI_RXVALID(x) ((x) << S_ICSPI_RXVALID)
+#define G_ICSPI_RXVALID(x) (((x) >> S_ICSPI_RXVALID) & M_ICSPI_RXVALID)
+
+#define S_ICSPI_RXFULL    24
+#define M_ICSPI_RXFULL    0xfU
+#define V_ICSPI_RXFULL(x) ((x) << S_ICSPI_RXFULL)
+#define G_ICSPI_RXFULL(x) (((x) >> S_ICSPI_RXFULL) & M_ICSPI_RXFULL)
+
+#define S_OESPI_VALID    20
+#define M_OESPI_VALID    0xfU
+#define V_OESPI_VALID(x) ((x) << S_OESPI_VALID)
+#define G_OESPI_VALID(x) (((x) >> S_OESPI_VALID) & M_OESPI_VALID)
+
+#define S_OESPI_FULL    16
+#define M_OESPI_FULL    0xfU
+#define V_OESPI_FULL(x) ((x) << S_OESPI_FULL)
+#define G_OESPI_FULL(x) (((x) >> S_OESPI_FULL) & M_OESPI_FULL)
+
+#define S_C_RXVALID    12
+#define M_C_RXVALID    0xfU
+#define V_C_RXVALID(x) ((x) << S_C_RXVALID)
+#define G_C_RXVALID(x) (((x) >> S_C_RXVALID) & M_C_RXVALID)
+
+#define S_C_RXAFULL    8
+#define M_C_RXAFULL    0xfU
+#define V_C_RXAFULL(x) ((x) << S_C_RXAFULL)
+#define G_C_RXAFULL(x) (((x) >> S_C_RXAFULL) & M_C_RXAFULL)
+
+#define S_E_TXVALID3    7
+#define V_E_TXVALID3(x) ((x) << S_E_TXVALID3)
+#define F_E_TXVALID3    V_E_TXVALID3(1U)
+
+#define S_E_TXVALID2    6
+#define V_E_TXVALID2(x) ((x) << S_E_TXVALID2)
+#define F_E_TXVALID2    V_E_TXVALID2(1U)
+
+#define S_E_TXVALID1    5
+#define V_E_TXVALID1(x) ((x) << S_E_TXVALID1)
+#define F_E_TXVALID1    V_E_TXVALID1(1U)
+
+#define S_E_TXVALID0    4
+#define V_E_TXVALID0(x) ((x) << S_E_TXVALID0)
+#define F_E_TXVALID0    V_E_TXVALID0(1U)
+
+#define S_E_TXFULL3    3
+#define V_E_TXFULL3(x) ((x) << S_E_TXFULL3)
+#define F_E_TXFULL3    V_E_TXFULL3(1U)
+
+#define S_E_TXFULL2    2
+#define V_E_TXFULL2(x) ((x) << S_E_TXFULL2)
+#define F_E_TXFULL2    V_E_TXFULL2(1U)
+
+#define S_E_TXFULL1    1
+#define V_E_TXFULL1(x) ((x) << S_E_TXFULL1)
+#define F_E_TXFULL1    V_E_TXFULL1(1U)
+
+#define S_E_TXFULL0    0
+#define V_E_TXFULL0(x) ((x) << S_E_TXFULL0)
+#define F_E_TXFULL0    V_E_TXFULL0(1U)
+
+#define A_PM_TX_DBG_STAT8 0x10034
+
+#define S_MC_RSP_FIFO_CNT    24
+#define M_MC_RSP_FIFO_CNT    0x3U
+#define V_MC_RSP_FIFO_CNT(x) ((x) << S_MC_RSP_FIFO_CNT)
+#define G_MC_RSP_FIFO_CNT(x) (((x) >> S_MC_RSP_FIFO_CNT) & M_MC_RSP_FIFO_CNT)
+
+#define S_PCMD_FREE_CNT0    14
+#define M_PCMD_FREE_CNT0    0x3ffU
+#define V_PCMD_FREE_CNT0(x) ((x) << S_PCMD_FREE_CNT0)
+#define G_PCMD_FREE_CNT0(x) (((x) >> S_PCMD_FREE_CNT0) & M_PCMD_FREE_CNT0)
+
+#define S_PCMD_FREE_CNT1    4
+#define M_PCMD_FREE_CNT1    0x3ffU
+#define V_PCMD_FREE_CNT1(x) ((x) << S_PCMD_FREE_CNT1)
+#define G_PCMD_FREE_CNT1(x) (((x) >> S_PCMD_FREE_CNT1) & M_PCMD_FREE_CNT1)
+
+#define S_M_REQADDRRDY    3
+#define V_M_REQADDRRDY(x) ((x) << S_M_REQADDRRDY)
+#define F_M_REQADDRRDY    V_M_REQADDRRDY(1U)
+
+#define S_M_REQWRITE    2
+#define V_M_REQWRITE(x) ((x) << S_M_REQWRITE)
+#define F_M_REQWRITE    V_M_REQWRITE(1U)
+
+#define S_M_REQDATAVLD    1
+#define V_M_REQDATAVLD(x) ((x) << S_M_REQDATAVLD)
+#define F_M_REQDATAVLD    V_M_REQDATAVLD(1U)
+
+#define S_M_REQDATARDY    0
+#define V_M_REQDATARDY(x) ((x) << S_M_REQDATARDY)
+#define F_M_REQDATARDY    V_M_REQDATARDY(1U)
+
+#define S_T6_MC_RSP_FIFO_CNT    27
+#define M_T6_MC_RSP_FIFO_CNT    0x3U
+#define V_T6_MC_RSP_FIFO_CNT(x) ((x) << S_T6_MC_RSP_FIFO_CNT)
+#define G_T6_MC_RSP_FIFO_CNT(x) (((x) >> S_T6_MC_RSP_FIFO_CNT) & M_T6_MC_RSP_FIFO_CNT)
+
+#define S_T6_PCMD_FREE_CNT0    17
+#define M_T6_PCMD_FREE_CNT0    0x3ffU
+#define V_T6_PCMD_FREE_CNT0(x) ((x) << S_T6_PCMD_FREE_CNT0)
+#define G_T6_PCMD_FREE_CNT0(x) (((x) >> S_T6_PCMD_FREE_CNT0) & M_T6_PCMD_FREE_CNT0)
+
+#define S_T6_PCMD_FREE_CNT1    7
+#define M_T6_PCMD_FREE_CNT1    0x3ffU
+#define V_T6_PCMD_FREE_CNT1(x) ((x) << S_T6_PCMD_FREE_CNT1)
+#define G_T6_PCMD_FREE_CNT1(x) (((x) >> S_T6_PCMD_FREE_CNT1) & M_T6_PCMD_FREE_CNT1)
+
+#define S_M_RSPVLD    6
+#define V_M_RSPVLD(x) ((x) << S_M_RSPVLD)
+#define F_M_RSPVLD    V_M_RSPVLD(1U)
+
+#define S_M_RSPRDY    5
+#define V_M_RSPRDY(x) ((x) << S_M_RSPRDY)
+#define F_M_RSPRDY    V_M_RSPRDY(1U)
+
+#define S_M_REQADDRVLD    4
+#define V_M_REQADDRVLD(x) ((x) << S_M_REQADDRVLD)
+#define F_M_REQADDRVLD    V_M_REQADDRVLD(1U)
+
+#define A_PM_TX_DBG_STAT9 0x10035
+
+#define S_PCMD_FREE_CNT2    10
+#define M_PCMD_FREE_CNT2    0x3ffU
+#define V_PCMD_FREE_CNT2(x) ((x) << S_PCMD_FREE_CNT2)
+#define G_PCMD_FREE_CNT2(x) (((x) >> S_PCMD_FREE_CNT2) & M_PCMD_FREE_CNT2)
+
+#define S_PCMD_FREE_CNT3    0
+#define M_PCMD_FREE_CNT3    0x3ffU
+#define V_PCMD_FREE_CNT3(x) ((x) << S_PCMD_FREE_CNT3)
+#define G_PCMD_FREE_CNT3(x) (((x) >> S_PCMD_FREE_CNT3) & M_PCMD_FREE_CNT3)
+
+#define A_PM_TX_DBG_STAT10 0x10036
+
+#define S_IN_EOP_CNT3    28
+#define M_IN_EOP_CNT3    0xfU
+#define V_IN_EOP_CNT3(x) ((x) << S_IN_EOP_CNT3)
+#define G_IN_EOP_CNT3(x) (((x) >> S_IN_EOP_CNT3) & M_IN_EOP_CNT3)
+
+#define S_IN_EOP_CNT2    24
+#define M_IN_EOP_CNT2    0xfU
+#define V_IN_EOP_CNT2(x) ((x) << S_IN_EOP_CNT2)
+#define G_IN_EOP_CNT2(x) (((x) >> S_IN_EOP_CNT2) & M_IN_EOP_CNT2)
+
+#define S_IN_EOP_CNT1    20
+#define M_IN_EOP_CNT1    0xfU
+#define V_IN_EOP_CNT1(x) ((x) << S_IN_EOP_CNT1)
+#define G_IN_EOP_CNT1(x) (((x) >> S_IN_EOP_CNT1) & M_IN_EOP_CNT1)
+
+#define S_IN_EOP_CNT0    16
+#define M_IN_EOP_CNT0    0xfU
+#define V_IN_EOP_CNT0(x) ((x) << S_IN_EOP_CNT0)
+#define G_IN_EOP_CNT0(x) (((x) >> S_IN_EOP_CNT0) & M_IN_EOP_CNT0)
+
+#define S_IN_SOP_CNT3    12
+#define M_IN_SOP_CNT3    0xfU
+#define V_IN_SOP_CNT3(x) ((x) << S_IN_SOP_CNT3)
+#define G_IN_SOP_CNT3(x) (((x) >> S_IN_SOP_CNT3) & M_IN_SOP_CNT3)
+
+#define S_IN_SOP_CNT2    8
+#define M_IN_SOP_CNT2    0xfU
+#define V_IN_SOP_CNT2(x) ((x) << S_IN_SOP_CNT2)
+#define G_IN_SOP_CNT2(x) (((x) >> S_IN_SOP_CNT2) & M_IN_SOP_CNT2)
+
+#define S_IN_SOP_CNT1    4
+#define M_IN_SOP_CNT1    0xfU
+#define V_IN_SOP_CNT1(x) ((x) << S_IN_SOP_CNT1)
+#define G_IN_SOP_CNT1(x) (((x) >> S_IN_SOP_CNT1) & M_IN_SOP_CNT1)
+
+#define S_IN_SOP_CNT0    0
+#define M_IN_SOP_CNT0    0xfU
+#define V_IN_SOP_CNT0(x) ((x) << S_IN_SOP_CNT0)
+#define G_IN_SOP_CNT0(x) (((x) >> S_IN_SOP_CNT0) & M_IN_SOP_CNT0)
+
+#define A_PM_TX_DBG_STAT11 0x10037
+
+#define S_OUT_EOP_CNT3    28
+#define M_OUT_EOP_CNT3    0xfU
+#define V_OUT_EOP_CNT3(x) ((x) << S_OUT_EOP_CNT3)
+#define G_OUT_EOP_CNT3(x) (((x) >> S_OUT_EOP_CNT3) & M_OUT_EOP_CNT3)
+
+#define S_OUT_EOP_CNT2    24
+#define M_OUT_EOP_CNT2    0xfU
+#define V_OUT_EOP_CNT2(x) ((x) << S_OUT_EOP_CNT2)
+#define G_OUT_EOP_CNT2(x) (((x) >> S_OUT_EOP_CNT2) & M_OUT_EOP_CNT2)
+
+#define S_OUT_EOP_CNT1    20
+#define M_OUT_EOP_CNT1    0xfU
+#define V_OUT_EOP_CNT1(x) ((x) << S_OUT_EOP_CNT1)
+#define G_OUT_EOP_CNT1(x) (((x) >> S_OUT_EOP_CNT1) & M_OUT_EOP_CNT1)
+
+#define S_OUT_EOP_CNT0    16
+#define M_OUT_EOP_CNT0    0xfU
+#define V_OUT_EOP_CNT0(x) ((x) << S_OUT_EOP_CNT0)
+#define G_OUT_EOP_CNT0(x) (((x) >> S_OUT_EOP_CNT0) & M_OUT_EOP_CNT0)
+
+#define S_OUT_SOP_CNT3    12
+#define M_OUT_SOP_CNT3    0xfU
+#define V_OUT_SOP_CNT3(x) ((x) << S_OUT_SOP_CNT3)
+#define G_OUT_SOP_CNT3(x) (((x) >> S_OUT_SOP_CNT3) & M_OUT_SOP_CNT3)
+
+#define S_OUT_SOP_CNT2    8
+#define M_OUT_SOP_CNT2    0xfU
+#define V_OUT_SOP_CNT2(x) ((x) << S_OUT_SOP_CNT2)
+#define G_OUT_SOP_CNT2(x) (((x) >> S_OUT_SOP_CNT2) & M_OUT_SOP_CNT2)
+
+#define S_OUT_SOP_CNT1    4
+#define M_OUT_SOP_CNT1    0xfU
+#define V_OUT_SOP_CNT1(x) ((x) << S_OUT_SOP_CNT1)
+#define G_OUT_SOP_CNT1(x) (((x) >> S_OUT_SOP_CNT1) & M_OUT_SOP_CNT1)
+
+#define S_OUT_SOP_CNT0    0
+#define M_OUT_SOP_CNT0    0xfU
+#define V_OUT_SOP_CNT0(x) ((x) << S_OUT_SOP_CNT0)
+#define G_OUT_SOP_CNT0(x) (((x) >> S_OUT_SOP_CNT0) & M_OUT_SOP_CNT0)
+
+#define A_PM_TX_DBG_STAT12 0x10038
+#define A_PM_TX_DBG_STAT13 0x10039
+
+#define S_CH_DEFICIT_BLOWED    31
+#define V_CH_DEFICIT_BLOWED(x) ((x) << S_CH_DEFICIT_BLOWED)
+#define F_CH_DEFICIT_BLOWED    V_CH_DEFICIT_BLOWED(1U)
+
+#define S_CH1_DEFICIT    16
+#define M_CH1_DEFICIT    0xfffU
+#define V_CH1_DEFICIT(x) ((x) << S_CH1_DEFICIT)
+#define G_CH1_DEFICIT(x) (((x) >> S_CH1_DEFICIT) & M_CH1_DEFICIT)
+
+#define S_CH0_DEFICIT    0
+#define M_CH0_DEFICIT    0xfffU
+#define V_CH0_DEFICIT(x) ((x) << S_CH0_DEFICIT)
+#define G_CH0_DEFICIT(x) (((x) >> S_CH0_DEFICIT) & M_CH0_DEFICIT)
+
+#define A_PM_TX_DBG_STAT14 0x1003a
+
+#define S_CH3_DEFICIT    16
+#define M_CH3_DEFICIT    0xfffU
+#define V_CH3_DEFICIT(x) ((x) << S_CH3_DEFICIT)
+#define G_CH3_DEFICIT(x) (((x) >> S_CH3_DEFICIT) & M_CH3_DEFICIT)
+
+#define S_CH2_DEFICIT    0
+#define M_CH2_DEFICIT    0xfffU
+#define V_CH2_DEFICIT(x) ((x) << S_CH2_DEFICIT)
+#define G_CH2_DEFICIT(x) (((x) >> S_CH2_DEFICIT) & M_CH2_DEFICIT)
+
+#define A_PM_TX_DBG_STAT15 0x1003b
+
+#define S_BUNDLE_LEN_SRDY    28
+#define M_BUNDLE_LEN_SRDY    0xfU
+#define V_BUNDLE_LEN_SRDY(x) ((x) << S_BUNDLE_LEN_SRDY)
+#define G_BUNDLE_LEN_SRDY(x) (((x) >> S_BUNDLE_LEN_SRDY) & M_BUNDLE_LEN_SRDY)
+
+#define S_BUNDLE_LEN1    16
+#define M_BUNDLE_LEN1    0xfffU
+#define V_BUNDLE_LEN1(x) ((x) << S_BUNDLE_LEN1)
+#define G_BUNDLE_LEN1(x) (((x) >> S_BUNDLE_LEN1) & M_BUNDLE_LEN1)
+
+#define S_BUNDLE_LEN0    0
+#define M_BUNDLE_LEN0    0xfffU
+#define V_BUNDLE_LEN0(x) ((x) << S_BUNDLE_LEN0)
+#define G_BUNDLE_LEN0(x) (((x) >> S_BUNDLE_LEN0) & M_BUNDLE_LEN0)
+
+#define S_T6_BUNDLE_LEN_SRDY    24
+#define M_T6_BUNDLE_LEN_SRDY    0x3U
+#define V_T6_BUNDLE_LEN_SRDY(x) ((x) << S_T6_BUNDLE_LEN_SRDY)
+#define G_T6_BUNDLE_LEN_SRDY(x) (((x) >> S_T6_BUNDLE_LEN_SRDY) & M_T6_BUNDLE_LEN_SRDY)
+
+#define S_T6_BUNDLE_LEN1    12
+#define M_T6_BUNDLE_LEN1    0xfffU
+#define V_T6_BUNDLE_LEN1(x) ((x) << S_T6_BUNDLE_LEN1)
+#define G_T6_BUNDLE_LEN1(x) (((x) >> S_T6_BUNDLE_LEN1) & M_T6_BUNDLE_LEN1)
+
+#define A_PM_TX_DBG_STAT16 0x1003c
+
+#define S_BUNDLE_LEN3    16
+#define M_BUNDLE_LEN3    0xfffU
+#define V_BUNDLE_LEN3(x) ((x) << S_BUNDLE_LEN3)
+#define G_BUNDLE_LEN3(x) (((x) >> S_BUNDLE_LEN3) & M_BUNDLE_LEN3)
+
+#define S_BUNDLE_LEN2    0
+#define M_BUNDLE_LEN2    0xfffU
+#define V_BUNDLE_LEN2(x) ((x) << S_BUNDLE_LEN2)
+#define G_BUNDLE_LEN2(x) (((x) >> S_BUNDLE_LEN2) & M_BUNDLE_LEN2)
+
 /* registers for module MPS */
 #define MPS_BASE_ADDR 0x9000
 
@@ -14456,6 +31502,48 @@
 #define V_PRTY0(x) ((x) << S_PRTY0)
 #define G_PRTY0(x) (((x) >> S_PRTY0) & M_PRTY0)
 
+#define A_MPS_PORT_PRTY_BUFFER_GROUP_TH_MAP 0x30
+
+#define S_TXPRTY7    28
+#define M_TXPRTY7    0xfU
+#define V_TXPRTY7(x) ((x) << S_TXPRTY7)
+#define G_TXPRTY7(x) (((x) >> S_TXPRTY7) & M_TXPRTY7)
+
+#define S_TXPRTY6    24
+#define M_TXPRTY6    0xfU
+#define V_TXPRTY6(x) ((x) << S_TXPRTY6)
+#define G_TXPRTY6(x) (((x) >> S_TXPRTY6) & M_TXPRTY6)
+
+#define S_TXPRTY5    20
+#define M_TXPRTY5    0xfU
+#define V_TXPRTY5(x) ((x) << S_TXPRTY5)
+#define G_TXPRTY5(x) (((x) >> S_TXPRTY5) & M_TXPRTY5)
+
+#define S_TXPRTY4    16
+#define M_TXPRTY4    0xfU
+#define V_TXPRTY4(x) ((x) << S_TXPRTY4)
+#define G_TXPRTY4(x) (((x) >> S_TXPRTY4) & M_TXPRTY4)
+
+#define S_TXPRTY3    12
+#define M_TXPRTY3    0xfU
+#define V_TXPRTY3(x) ((x) << S_TXPRTY3)
+#define G_TXPRTY3(x) (((x) >> S_TXPRTY3) & M_TXPRTY3)
+
+#define S_TXPRTY2    8
+#define M_TXPRTY2    0xfU
+#define V_TXPRTY2(x) ((x) << S_TXPRTY2)
+#define G_TXPRTY2(x) (((x) >> S_TXPRTY2) & M_TXPRTY2)
+
+#define S_TXPRTY1    4
+#define M_TXPRTY1    0xfU
+#define V_TXPRTY1(x) ((x) << S_TXPRTY1)
+#define G_TXPRTY1(x) (((x) >> S_TXPRTY1) & M_TXPRTY1)
+
+#define S_TXPRTY0    0
+#define M_TXPRTY0    0xfU
+#define V_TXPRTY0(x) ((x) << S_TXPRTY0)
+#define G_TXPRTY0(x) (((x) >> S_TXPRTY0) & M_TXPRTY0)
+
 #define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
 #define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
 #define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
@@ -14571,6 +31659,26 @@
 #define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
 #define F_OVLAN_EN0    V_OVLAN_EN0(1U)
 
+#define S_PTP_FWD_UP    21
+#define V_PTP_FWD_UP(x) ((x) << S_PTP_FWD_UP)
+#define F_PTP_FWD_UP    V_PTP_FWD_UP(1U)
+
+#define S_HASH_PRIO_SEL_LPBK    25
+#define V_HASH_PRIO_SEL_LPBK(x) ((x) << S_HASH_PRIO_SEL_LPBK)
+#define F_HASH_PRIO_SEL_LPBK    V_HASH_PRIO_SEL_LPBK(1U)
+
+#define S_HASH_PRIO_SEL_MAC    24
+#define V_HASH_PRIO_SEL_MAC(x) ((x) << S_HASH_PRIO_SEL_MAC)
+#define F_HASH_PRIO_SEL_MAC    V_HASH_PRIO_SEL_MAC(1U)
+
+#define S_HASH_EN_LPBK    23
+#define V_HASH_EN_LPBK(x) ((x) << S_HASH_EN_LPBK)
+#define F_HASH_EN_LPBK    V_HASH_EN_LPBK(1U)
+
+#define S_HASH_EN_MAC    22
+#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC)
+#define F_HASH_EN_MAC    V_HASH_EN_MAC(1U)
+
 #define A_MPS_PORT_RX_MTU 0x104
 #define A_MPS_PORT_RX_PF_MAP 0x108
 #define A_MPS_PORT_RX_VF_MAP0 0x10c
@@ -14640,7 +31748,179 @@
 #define V_FIXED_VF(x) ((x) << S_FIXED_VF)
 #define G_FIXED_VF(x) (((x) >> S_FIXED_VF) & M_FIXED_VF)
 
+#define S_T6_FIXED_PFVF_MAC    14
+#define V_T6_FIXED_PFVF_MAC(x) ((x) << S_T6_FIXED_PFVF_MAC)
+#define F_T6_FIXED_PFVF_MAC    V_T6_FIXED_PFVF_MAC(1U)
+
+#define S_T6_FIXED_PFVF_LPBK    13
+#define V_T6_FIXED_PFVF_LPBK(x) ((x) << S_T6_FIXED_PFVF_LPBK)
+#define F_T6_FIXED_PFVF_LPBK    V_T6_FIXED_PFVF_LPBK(1U)
+
+#define S_T6_FIXED_PFVF_LPBK_OV    12
+#define V_T6_FIXED_PFVF_LPBK_OV(x) ((x) << S_T6_FIXED_PFVF_LPBK_OV)
+#define F_T6_FIXED_PFVF_LPBK_OV    V_T6_FIXED_PFVF_LPBK_OV(1U)
+
+#define S_T6_FIXED_PF    9
+#define M_T6_FIXED_PF    0x7U
+#define V_T6_FIXED_PF(x) ((x) << S_T6_FIXED_PF)
+#define G_T6_FIXED_PF(x) (((x) >> S_T6_FIXED_PF) & M_T6_FIXED_PF)
+
+#define S_T6_FIXED_VF_VLD    8
+#define V_T6_FIXED_VF_VLD(x) ((x) << S_T6_FIXED_VF_VLD)
+#define F_T6_FIXED_VF_VLD    V_T6_FIXED_VF_VLD(1U)
+
+#define S_T6_FIXED_VF    0
+#define M_T6_FIXED_VF    0xffU
+#define V_T6_FIXED_VF(x) ((x) << S_T6_FIXED_VF)
+#define G_T6_FIXED_VF(x) (((x) >> S_T6_FIXED_VF) & M_T6_FIXED_VF)
+
 #define A_MPS_PORT_RX_SPARE 0x13c
+#define A_MPS_PORT_RX_PTP_RSS_HASH 0x140
+#define A_MPS_PORT_RX_PTP_RSS_CONTROL 0x144
+#define A_MPS_PORT_RX_TS_VLD 0x148
+
+#define S_TS_VLD    0
+#define M_TS_VLD    0x3U
+#define V_TS_VLD(x) ((x) << S_TS_VLD)
+#define G_TS_VLD(x) (((x) >> S_TS_VLD) & M_TS_VLD)
+
+#define A_MPS_PORT_RX_TNL_LKP_INNER_SEL 0x14c
+
+#define S_LKP_SEL    0
+#define V_LKP_SEL(x) ((x) << S_LKP_SEL)
+#define F_LKP_SEL    V_LKP_SEL(1U)
+
+#define A_MPS_PORT_RX_VF_MAP4 0x150
+#define A_MPS_PORT_RX_VF_MAP5 0x154
+#define A_MPS_PORT_RX_VF_MAP6 0x158
+#define A_MPS_PORT_RX_VF_MAP7 0x15c
+#define A_MPS_PORT_RX_PRS_DEBUG_FLAG_MAC 0x160
+
+#define S_OUTER_IPV4_N_INNER_IPV4    31
+#define V_OUTER_IPV4_N_INNER_IPV4(x) ((x) << S_OUTER_IPV4_N_INNER_IPV4)
+#define F_OUTER_IPV4_N_INNER_IPV4    V_OUTER_IPV4_N_INNER_IPV4(1U)
+
+#define S_OUTER_IPV4_N_INNER_IPV6    30
+#define V_OUTER_IPV4_N_INNER_IPV6(x) ((x) << S_OUTER_IPV4_N_INNER_IPV6)
+#define F_OUTER_IPV4_N_INNER_IPV6    V_OUTER_IPV4_N_INNER_IPV6(1U)
+
+#define S_OUTER_IPV6_N_INNER_IPV4    29
+#define V_OUTER_IPV6_N_INNER_IPV4(x) ((x) << S_OUTER_IPV6_N_INNER_IPV4)
+#define F_OUTER_IPV6_N_INNER_IPV4    V_OUTER_IPV6_N_INNER_IPV4(1U)
+
+#define S_OUTER_IPV6_N_INNER_IPV6    28
+#define V_OUTER_IPV6_N_INNER_IPV6(x) ((x) << S_OUTER_IPV6_N_INNER_IPV6)
+#define F_OUTER_IPV6_N_INNER_IPV6    V_OUTER_IPV6_N_INNER_IPV6(1U)
+
+#define S_OUTER_IPV4_N_VLAN_NVGRE    27
+#define V_OUTER_IPV4_N_VLAN_NVGRE(x) ((x) << S_OUTER_IPV4_N_VLAN_NVGRE)
+#define F_OUTER_IPV4_N_VLAN_NVGRE    V_OUTER_IPV4_N_VLAN_NVGRE(1U)
+
+#define S_OUTER_IPV6_N_VLAN_NVGRE    26
+#define V_OUTER_IPV6_N_VLAN_NVGRE(x) ((x) << S_OUTER_IPV6_N_VLAN_NVGRE)
+#define F_OUTER_IPV6_N_VLAN_NVGRE    V_OUTER_IPV6_N_VLAN_NVGRE(1U)
+
+#define S_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE    25
+#define V_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE)
+#define F_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE    V_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE(1U)
+
+#define S_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE    24
+#define V_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE)
+#define F_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE    V_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE(1U)
+
+#define S_OUTER_IPV4_N_VLAN_GRE    23
+#define V_OUTER_IPV4_N_VLAN_GRE(x) ((x) << S_OUTER_IPV4_N_VLAN_GRE)
+#define F_OUTER_IPV4_N_VLAN_GRE    V_OUTER_IPV4_N_VLAN_GRE(1U)
+
+#define S_OUTER_IPV6_N_VLAN_GRE    22
+#define V_OUTER_IPV6_N_VLAN_GRE(x) ((x) << S_OUTER_IPV6_N_VLAN_GRE)
+#define F_OUTER_IPV6_N_VLAN_GRE    V_OUTER_IPV6_N_VLAN_GRE(1U)
+
+#define S_OUTER_IPV4_N_DOUBLE_VLAN_GRE    21
+#define V_OUTER_IPV4_N_DOUBLE_VLAN_GRE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_GRE)
+#define F_OUTER_IPV4_N_DOUBLE_VLAN_GRE    V_OUTER_IPV4_N_DOUBLE_VLAN_GRE(1U)
+
+#define S_OUTER_IPV6_N_DOUBLE_VLAN_GRE    20
+#define V_OUTER_IPV6_N_DOUBLE_VLAN_GRE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_GRE)
+#define F_OUTER_IPV6_N_DOUBLE_VLAN_GRE    V_OUTER_IPV6_N_DOUBLE_VLAN_GRE(1U)
+
+#define S_OUTER_IPV4_N_VLAN_VXLAN    19
+#define V_OUTER_IPV4_N_VLAN_VXLAN(x) ((x) << S_OUTER_IPV4_N_VLAN_VXLAN)
+#define F_OUTER_IPV4_N_VLAN_VXLAN    V_OUTER_IPV4_N_VLAN_VXLAN(1U)
+
+#define S_OUTER_IPV6_N_VLAN_VXLAN    18
+#define V_OUTER_IPV6_N_VLAN_VXLAN(x) ((x) << S_OUTER_IPV6_N_VLAN_VXLAN)
+#define F_OUTER_IPV6_N_VLAN_VXLAN    V_OUTER_IPV6_N_VLAN_VXLAN(1U)
+
+#define S_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN    17
+#define V_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN)
+#define F_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN    V_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN(1U)
+
+#define S_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN    16
+#define V_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN)
+#define F_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN    V_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN(1U)
+
+#define S_OUTER_IPV4_N_VLAN_GENEVE    15
+#define V_OUTER_IPV4_N_VLAN_GENEVE(x) ((x) << S_OUTER_IPV4_N_VLAN_GENEVE)
+#define F_OUTER_IPV4_N_VLAN_GENEVE    V_OUTER_IPV4_N_VLAN_GENEVE(1U)
+
+#define S_OUTER_IPV6_N_VLAN_GENEVE    14
+#define V_OUTER_IPV6_N_VLAN_GENEVE(x) ((x) << S_OUTER_IPV6_N_VLAN_GENEVE)
+#define F_OUTER_IPV6_N_VLAN_GENEVE    V_OUTER_IPV6_N_VLAN_GENEVE(1U)
+
+#define S_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE    13
+#define V_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE)
+#define F_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE    V_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE(1U)
+
+#define S_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE    12
+#define V_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE)
+#define F_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE    V_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE(1U)
+
+#define S_ERR_TNL_HDR_LEN    11
+#define V_ERR_TNL_HDR_LEN(x) ((x) << S_ERR_TNL_HDR_LEN)
+#define F_ERR_TNL_HDR_LEN    V_ERR_TNL_HDR_LEN(1U)
+
+#define S_NON_RUNT_FRAME    10
+#define V_NON_RUNT_FRAME(x) ((x) << S_NON_RUNT_FRAME)
+#define F_NON_RUNT_FRAME    V_NON_RUNT_FRAME(1U)
+
+#define S_INNER_VLAN_VLD    9
+#define V_INNER_VLAN_VLD(x) ((x) << S_INNER_VLAN_VLD)
+#define F_INNER_VLAN_VLD    V_INNER_VLAN_VLD(1U)
+
+#define S_ERR_IP_PAYLOAD_LEN    8
+#define V_ERR_IP_PAYLOAD_LEN(x) ((x) << S_ERR_IP_PAYLOAD_LEN)
+#define F_ERR_IP_PAYLOAD_LEN    V_ERR_IP_PAYLOAD_LEN(1U)
+
+#define S_ERR_UDP_PAYLOAD_LEN    7
+#define V_ERR_UDP_PAYLOAD_LEN(x) ((x) << S_ERR_UDP_PAYLOAD_LEN)
+#define F_ERR_UDP_PAYLOAD_LEN    V_ERR_UDP_PAYLOAD_LEN(1U)
+
+#define A_MPS_PORT_RX_PRS_DEBUG_FLAG_LPBK 0x164
+
+#define S_T6_INNER_VLAN_VLD    10
+#define V_T6_INNER_VLAN_VLD(x) ((x) << S_T6_INNER_VLAN_VLD)
+#define F_T6_INNER_VLAN_VLD    V_T6_INNER_VLAN_VLD(1U)
+
+#define S_T6_ERR_IP_PAYLOAD_LEN    9
+#define V_T6_ERR_IP_PAYLOAD_LEN(x) ((x) << S_T6_ERR_IP_PAYLOAD_LEN)
+#define F_T6_ERR_IP_PAYLOAD_LEN    V_T6_ERR_IP_PAYLOAD_LEN(1U)
+
+#define S_T6_ERR_UDP_PAYLOAD_LEN    8
+#define V_T6_ERR_UDP_PAYLOAD_LEN(x) ((x) << S_T6_ERR_UDP_PAYLOAD_LEN)
+#define F_T6_ERR_UDP_PAYLOAD_LEN    V_T6_ERR_UDP_PAYLOAD_LEN(1U)
+
+#define A_MPS_PORT_RX_REPL_VECT_SEL 0x168
+
+#define S_DIS_REPL_VECT_SEL    4
+#define V_DIS_REPL_VECT_SEL(x) ((x) << S_DIS_REPL_VECT_SEL)
+#define F_DIS_REPL_VECT_SEL    V_DIS_REPL_VECT_SEL(1U)
+
+#define S_REPL_VECT_SEL    0
+#define M_REPL_VECT_SEL    0xfU
+#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL)
+#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL)
+
 #define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
 
 #define S_CREDIT    0
@@ -14673,7 +31953,34 @@
 #define V_MAXPKTCNT(x) ((x) << S_MAXPKTCNT)
 #define G_MAXPKTCNT(x) (((x) >> S_MAXPKTCNT) & M_MAXPKTCNT)
 
+#define S_OUT_TH    22
+#define M_OUT_TH    0xffU
+#define V_OUT_TH(x) ((x) << S_OUT_TH)
+#define G_OUT_TH(x) (((x) >> S_OUT_TH) & M_OUT_TH)
+
+#define S_IN_TH    14
+#define M_IN_TH    0xffU
+#define V_IN_TH(x) ((x) << S_IN_TH)
+#define G_IN_TH(x) (((x) >> S_IN_TH) & M_IN_TH)
+
 #define A_MPS_PORT_FPGA_PAUSE_CTL 0x1c8
+
+#define S_FPGAPAUSEEN    0
+#define V_FPGAPAUSEEN(x) ((x) << S_FPGAPAUSEEN)
+#define F_FPGAPAUSEEN    V_FPGAPAUSEEN(1U)
+
+#define A_MPS_PORT_TX_PAUSE_PENDING_STATUS 0x1d0
+
+#define S_OFF_PENDING    8
+#define M_OFF_PENDING    0xffU
+#define V_OFF_PENDING(x) ((x) << S_OFF_PENDING)
+#define G_OFF_PENDING(x) (((x) >> S_OFF_PENDING) & M_OFF_PENDING)
+
+#define S_ON_PENDING    0
+#define M_ON_PENDING    0xffU
+#define V_ON_PENDING(x) ((x) << S_ON_PENDING)
+#define G_ON_PENDING(x) (((x) >> S_ON_PENDING) & M_ON_PENDING)
+
 #define A_MPS_PORT_CLS_HASH_SRAM 0x200
 
 #define S_VALID    20
@@ -14712,6 +32019,50 @@
 #define V_VF(x) ((x) << S_VF)
 #define G_VF(x) (((x) >> S_VF) & M_VF)
 
+#define S_DISENCAPOUTERRPLCT    23
+#define V_DISENCAPOUTERRPLCT(x) ((x) << S_DISENCAPOUTERRPLCT)
+#define F_DISENCAPOUTERRPLCT    V_DISENCAPOUTERRPLCT(1U)
+
+#define S_DISENCAP    22
+#define V_DISENCAP(x) ((x) << S_DISENCAP)
+#define F_DISENCAP    V_DISENCAP(1U)
+
+#define S_T6_VALID    21
+#define V_T6_VALID(x) ((x) << S_T6_VALID)
+#define F_T6_VALID    V_T6_VALID(1U)
+
+#define S_T6_HASHPORTMAP    17
+#define M_T6_HASHPORTMAP    0xfU
+#define V_T6_HASHPORTMAP(x) ((x) << S_T6_HASHPORTMAP)
+#define G_T6_HASHPORTMAP(x) (((x) >> S_T6_HASHPORTMAP) & M_T6_HASHPORTMAP)
+
+#define S_T6_MULTILISTEN    16
+#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
+#define F_T6_MULTILISTEN    V_T6_MULTILISTEN(1U)
+
+#define S_T6_PRIORITY    13
+#define M_T6_PRIORITY    0x7U
+#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
+#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
+
+#define S_T6_REPLICATE    12
+#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
+#define F_T6_REPLICATE    V_T6_REPLICATE(1U)
+
+#define S_T6_PF    9
+#define M_T6_PF    0x7U
+#define V_T6_PF(x) ((x) << S_T6_PF)
+#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
+
+#define S_T6_VF_VALID    8
+#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
+#define F_T6_VF_VALID    V_T6_VF_VALID(1U)
+
+#define S_T6_VF    0
+#define M_T6_VF    0xffU
+#define V_T6_VF(x) ((x) << S_T6_VF)
+#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
+
 #define A_MPS_PF_CTL 0x2c0
 
 #define S_TXEN    1
@@ -14758,6 +32109,33 @@
 #define V_PROMISCEN(x) ((x) << S_PROMISCEN)
 #define F_PROMISCEN    V_PROMISCEN(1U)
 
+#define S_T6_MULTILISTEN    16
+#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
+#define F_T6_MULTILISTEN    V_T6_MULTILISTEN(1U)
+
+#define S_T6_PRIORITY    13
+#define M_T6_PRIORITY    0x7U
+#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
+#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
+
+#define S_T6_REPLICATE    12
+#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
+#define F_T6_REPLICATE    V_T6_REPLICATE(1U)
+
+#define S_T6_PF    9
+#define M_T6_PF    0x7U
+#define V_T6_PF(x) ((x) << S_T6_PF)
+#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
+
+#define S_T6_VF_VALID    8
+#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
+#define F_T6_VF_VALID    V_T6_VF_VALID(1U)
+
+#define S_T6_VF    0
+#define M_T6_VF    0xffU
+#define V_T6_VF(x) ((x) << S_T6_VF)
+#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
+
 #define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
 #define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
 #define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
@@ -14771,6 +32149,10 @@
 #define V_BMC_VLD(x) ((x) << S_BMC_VLD)
 #define F_BMC_VLD    V_BMC_VLD(1U)
 
+#define S_MATCHALL    18
+#define V_MATCHALL(x) ((x) << S_MATCHALL)
+#define F_MATCHALL    V_MATCHALL(1U)
+
 #define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
 #define A_MPS_PORT_CLS_BMC_VLAN 0x314
 
@@ -14789,8 +32171,69 @@
 #define V_PF_VLAN_SEL(x) ((x) << S_PF_VLAN_SEL)
 #define F_PF_VLAN_SEL    V_PF_VLAN_SEL(1U)
 
+#define S_LPBK_TCAM1_HIT_PRIORITY    14
+#define V_LPBK_TCAM1_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM1_HIT_PRIORITY)
+#define F_LPBK_TCAM1_HIT_PRIORITY    V_LPBK_TCAM1_HIT_PRIORITY(1U)
+
+#define S_LPBK_TCAM0_HIT_PRIORITY    13
+#define V_LPBK_TCAM0_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM0_HIT_PRIORITY)
+#define F_LPBK_TCAM0_HIT_PRIORITY    V_LPBK_TCAM0_HIT_PRIORITY(1U)
+
+#define S_LPBK_TCAM_PRIORITY    12
+#define V_LPBK_TCAM_PRIORITY(x) ((x) << S_LPBK_TCAM_PRIORITY)
+#define F_LPBK_TCAM_PRIORITY    V_LPBK_TCAM_PRIORITY(1U)
+
+#define S_LPBK_SMAC_TCAM_SEL    10
+#define M_LPBK_SMAC_TCAM_SEL    0x3U
+#define V_LPBK_SMAC_TCAM_SEL(x) ((x) << S_LPBK_SMAC_TCAM_SEL)
+#define G_LPBK_SMAC_TCAM_SEL(x) (((x) >> S_LPBK_SMAC_TCAM_SEL) & M_LPBK_SMAC_TCAM_SEL)
+
+#define S_LPBK_DMAC_TCAM_SEL    8
+#define M_LPBK_DMAC_TCAM_SEL    0x3U
+#define V_LPBK_DMAC_TCAM_SEL(x) ((x) << S_LPBK_DMAC_TCAM_SEL)
+#define G_LPBK_DMAC_TCAM_SEL(x) (((x) >> S_LPBK_DMAC_TCAM_SEL) & M_LPBK_DMAC_TCAM_SEL)
+
+#define S_TCAM1_HIT_PRIORITY    7
+#define V_TCAM1_HIT_PRIORITY(x) ((x) << S_TCAM1_HIT_PRIORITY)
+#define F_TCAM1_HIT_PRIORITY    V_TCAM1_HIT_PRIORITY(1U)
+
+#define S_TCAM0_HIT_PRIORITY    6
+#define V_TCAM0_HIT_PRIORITY(x) ((x) << S_TCAM0_HIT_PRIORITY)
+#define F_TCAM0_HIT_PRIORITY    V_TCAM0_HIT_PRIORITY(1U)
+
+#define S_TCAM_PRIORITY    5
+#define V_TCAM_PRIORITY(x) ((x) << S_TCAM_PRIORITY)
+#define F_TCAM_PRIORITY    V_TCAM_PRIORITY(1U)
+
+#define S_SMAC_TCAM_SEL    3
+#define M_SMAC_TCAM_SEL    0x3U
+#define V_SMAC_TCAM_SEL(x) ((x) << S_SMAC_TCAM_SEL)
+#define G_SMAC_TCAM_SEL(x) (((x) >> S_SMAC_TCAM_SEL) & M_SMAC_TCAM_SEL)
+
+#define S_DMAC_TCAM_SEL    1
+#define M_DMAC_TCAM_SEL    0x3U
+#define V_DMAC_TCAM_SEL(x) ((x) << S_DMAC_TCAM_SEL)
+#define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL)
+
 #define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
+#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c
+
+#define S_ETHTYPE2    0
+#define M_ETHTYPE2    0xffffU
+#define V_ETHTYPE2(x) ((x) << S_ETHTYPE2)
+#define G_ETHTYPE2(x) (((x) >> S_ETHTYPE2) & M_ETHTYPE2)
+
 #define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_L 0x320
+#define A_MPS_PORT_CLS_NCSI_ETH_TYPE_EN 0x320
+
+#define S_EN1    1
+#define V_EN1(x) ((x) << S_EN1)
+#define F_EN1    V_EN1(1U)
+
+#define S_EN2    0
+#define V_EN2(x) ((x) << S_EN2)
+#define F_EN2    V_EN2(1U)
+
 #define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
 #define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
 #define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
@@ -14889,6 +32332,8 @@
 #define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
 #define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
 #define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c
 #define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
 #define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
 #define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -14943,6 +32388,8 @@
 #define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
 #define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
 #define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_L 0x618
+#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_H 0x61c
 #define A_MPS_CMN_CTL 0x9000
 
 #define S_DETECT8023    3
@@ -14958,6 +32405,23 @@
 #define V_NUMPORTS(x) ((x) << S_NUMPORTS)
 #define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS)
 
+#define S_LPBKCRDTCTRL    4
+#define V_LPBKCRDTCTRL(x) ((x) << S_LPBKCRDTCTRL)
+#define F_LPBKCRDTCTRL    V_LPBKCRDTCTRL(1U)
+
+#define S_TX_PORT_STATS_MODE    8
+#define V_TX_PORT_STATS_MODE(x) ((x) << S_TX_PORT_STATS_MODE)
+#define F_TX_PORT_STATS_MODE    V_TX_PORT_STATS_MODE(1U)
+
+#define S_T5MODE    7
+#define V_T5MODE(x) ((x) << S_T5MODE)
+#define F_T5MODE    V_T5MODE(1U)
+
+#define S_SPEEDMODE    5
+#define M_SPEEDMODE    0x3U
+#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE)
+#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE)
+
 #define A_MPS_INT_ENABLE 0x9004
 
 #define S_STATINTENB    5
@@ -15010,6 +32474,12 @@
 #define V_PLINT(x) ((x) << S_PLINT)
 #define F_PLINT    V_PLINT(1U)
 
+#define A_MPS_CGEN_GLOBAL 0x900c
+
+#define S_MPS_GLOBAL_CGEN    0
+#define V_MPS_GLOBAL_CGEN(x) ((x) << S_MPS_GLOBAL_CGEN)
+#define F_MPS_GLOBAL_CGEN    V_MPS_GLOBAL_CGEN(1U)
+
 #define A_MPS_VF_TX_CTL_31_0 0x9010
 #define A_MPS_VF_TX_CTL_63_32 0x9014
 #define A_MPS_VF_TX_CTL_95_64 0x9018
@@ -15072,6 +32542,11 @@
 #define V_CH_MAP0(x) ((x) << S_CH_MAP0)
 #define G_CH_MAP0(x) (((x) >> S_CH_MAP0) & M_CH_MAP0)
 
+#define S_FPGA_PTP_PORT    9
+#define M_FPGA_PTP_PORT    0x3U
+#define V_FPGA_PTP_PORT(x) ((x) << S_FPGA_PTP_PORT)
+#define G_FPGA_PTP_PORT(x) (((x) >> S_FPGA_PTP_PORT) & M_FPGA_PTP_PORT)
+
 #define A_MPS_DEBUG_CTL 0x9068
 
 #define S_DBGMODECTL_H    11
@@ -15096,16 +32571,11 @@
 #define A_MPS_DEBUG_DATA_REG_H 0x9070
 #define A_MPS_TOP_SPARE 0x9074
 
-#define S_TOPSPARE    12
-#define M_TOPSPARE    0xfffffU
+#define S_TOPSPARE    8
+#define M_TOPSPARE    0xffffffU
 #define V_TOPSPARE(x) ((x) << S_TOPSPARE)
 #define G_TOPSPARE(x) (((x) >> S_TOPSPARE) & M_TOPSPARE)
 
-#define S_CHIKN_14463    8
-#define M_CHIKN_14463    0xfU
-#define V_CHIKN_14463(x) ((x) << S_CHIKN_14463)
-#define G_CHIKN_14463(x) (((x) >> S_CHIKN_14463) & M_CHIKN_14463)
-
 #define S_OVLANSELLPBK3    7
 #define V_OVLANSELLPBK3(x) ((x) << S_OVLANSELLPBK3)
 #define F_OVLANSELLPBK3    V_OVLANSELLPBK3(1U)
@@ -15138,7 +32608,72 @@
 #define V_OVLANSELMAC0(x) ((x) << S_OVLANSELMAC0)
 #define F_OVLANSELMAC0    V_OVLANSELMAC0(1U)
 
+#define S_T5_TOPSPARE    8
+#define M_T5_TOPSPARE    0xffffffU
+#define V_T5_TOPSPARE(x) ((x) << S_T5_TOPSPARE)
+#define G_T5_TOPSPARE(x) (((x) >> S_T5_TOPSPARE) & M_T5_TOPSPARE)
+
+#define A_MPS_T5_BUILD_REVISION 0x9078
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH0 0x907c
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH1 0x9080
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH2 0x9084
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH3 0x9088
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH4 0x908c
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH5 0x9090
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH6 0x9094
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH7 0x9098
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH8 0x909c
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH9 0x90a0
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH10 0x90a4
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH11 0x90a8
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH12 0x90ac
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH13 0x90b0
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH14 0x90b4
+#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH15 0x90b8
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH0 0x90bc
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH1 0x90c0
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH2 0x90c4
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH3 0x90c8
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH4 0x90cc
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH5 0x90d0
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH6 0x90d4
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH7 0x90d8
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH8 0x90dc
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH9 0x90e0
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH10 0x90e4
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH11 0x90e8
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH12 0x90ec
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH13 0x90f0
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH14 0x90f4
+#define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH15 0x90f8
 #define A_MPS_BUILD_REVISION 0x90fc
+#define A_MPS_VF_TX_CTL_159_128 0x9100
+#define A_MPS_VF_TX_CTL_191_160 0x9104
+#define A_MPS_VF_TX_CTL_223_192 0x9108
+#define A_MPS_VF_TX_CTL_255_224 0x910c
+#define A_MPS_VF_RX_CTL_159_128 0x9110
+#define A_MPS_VF_RX_CTL_191_160 0x9114
+#define A_MPS_VF_RX_CTL_223_192 0x9118
+#define A_MPS_VF_RX_CTL_255_224 0x911c
+#define A_MPS_FPGA_BIST_CFG_P0 0x9120
+
+#define S_ADDRMASK    16
+#define M_ADDRMASK    0xffffU
+#define V_ADDRMASK(x) ((x) << S_ADDRMASK)
+#define G_ADDRMASK(x) (((x) >> S_ADDRMASK) & M_ADDRMASK)
+
+#define S_T6_BASEADDR    0
+#define M_T6_BASEADDR    0xffffU
+#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
+#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+
+#define A_MPS_FPGA_BIST_CFG_P1 0x9124
+
+#define S_T6_BASEADDR    0
+#define M_T6_BASEADDR    0xffffU
+#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
+#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+
 #define A_MPS_TX_PRTY_SEL 0x9400
 
 #define S_CH4_PRTY    20
@@ -15214,6 +32749,7 @@
 #define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
 
 #define A_MPS_TX_INT_CAUSE 0x9408
+#define A_MPS_TX_NCSI2MPS_CNT 0x940c
 #define A_MPS_TX_PERR_ENABLE 0x9410
 #define A_MPS_TX_PERR_INJECT 0x9414
 
@@ -15250,6 +32786,15 @@
 #define V_BUBBLECLR(x) ((x) << S_BUBBLECLR)
 #define G_BUBBLECLR(x) (((x) >> S_BUBBLECLR) & M_BUBBLECLR)
 
+#define S_NCSISECNT    20
+#define V_NCSISECNT(x) ((x) << S_NCSISECNT)
+#define F_NCSISECNT    V_NCSISECNT(1U)
+
+#define S_LPBKSECNT    16
+#define M_LPBKSECNT    0xfU
+#define V_LPBKSECNT(x) ((x) << S_LPBKSECNT)
+#define G_LPBKSECNT(x) (((x) >> S_LPBKSECNT) & M_LPBKSECNT)
+
 #define A_MPS_TX_PORT_ERR 0x9430
 
 #define S_LPBKPT3    7
@@ -15359,6 +32904,50 @@
 #define V_DATACH0(x) ((x) << S_DATACH0)
 #define G_DATACH0(x) (((x) >> S_DATACH0) & M_DATACH0)
 
+#define S_T5_SIZECH1    26
+#define M_T5_SIZECH1    0xfU
+#define V_T5_SIZECH1(x) ((x) << S_T5_SIZECH1)
+#define G_T5_SIZECH1(x) (((x) >> S_T5_SIZECH1) & M_T5_SIZECH1)
+
+#define S_T5_ERRCH1    25
+#define V_T5_ERRCH1(x) ((x) << S_T5_ERRCH1)
+#define F_T5_ERRCH1    V_T5_ERRCH1(1U)
+
+#define S_T5_FULLCH1    24
+#define V_T5_FULLCH1(x) ((x) << S_T5_FULLCH1)
+#define F_T5_FULLCH1    V_T5_FULLCH1(1U)
+
+#define S_T5_VALIDCH1    23
+#define V_T5_VALIDCH1(x) ((x) << S_T5_VALIDCH1)
+#define F_T5_VALIDCH1    V_T5_VALIDCH1(1U)
+
+#define S_T5_DATACH1    16
+#define M_T5_DATACH1    0x7fU
+#define V_T5_DATACH1(x) ((x) << S_T5_DATACH1)
+#define G_T5_DATACH1(x) (((x) >> S_T5_DATACH1) & M_T5_DATACH1)
+
+#define S_T5_SIZECH0    10
+#define M_T5_SIZECH0    0xfU
+#define V_T5_SIZECH0(x) ((x) << S_T5_SIZECH0)
+#define G_T5_SIZECH0(x) (((x) >> S_T5_SIZECH0) & M_T5_SIZECH0)
+
+#define S_T5_ERRCH0    9
+#define V_T5_ERRCH0(x) ((x) << S_T5_ERRCH0)
+#define F_T5_ERRCH0    V_T5_ERRCH0(1U)
+
+#define S_T5_FULLCH0    8
+#define V_T5_FULLCH0(x) ((x) << S_T5_FULLCH0)
+#define F_T5_FULLCH0    V_T5_FULLCH0(1U)
+
+#define S_T5_VALIDCH0    7
+#define V_T5_VALIDCH0(x) ((x) << S_T5_VALIDCH0)
+#define F_T5_VALIDCH0    V_T5_VALIDCH0(1U)
+
+#define S_T5_DATACH0    0
+#define M_T5_DATACH0    0x7fU
+#define V_T5_DATACH0(x) ((x) << S_T5_DATACH0)
+#define G_T5_DATACH0(x) (((x) >> S_T5_DATACH0) & M_T5_DATACH0)
+
 #define A_MPS_TX_DEBUG_REG_TP2TX_32 0x9448
 
 #define S_SOPCH3    31
@@ -15421,6 +33010,50 @@
 #define V_DATACH2(x) ((x) << S_DATACH2)
 #define G_DATACH2(x) (((x) >> S_DATACH2) & M_DATACH2)
 
+#define S_T5_SIZECH3    26
+#define M_T5_SIZECH3    0xfU
+#define V_T5_SIZECH3(x) ((x) << S_T5_SIZECH3)
+#define G_T5_SIZECH3(x) (((x) >> S_T5_SIZECH3) & M_T5_SIZECH3)
+
+#define S_T5_ERRCH3    25
+#define V_T5_ERRCH3(x) ((x) << S_T5_ERRCH3)
+#define F_T5_ERRCH3    V_T5_ERRCH3(1U)
+
+#define S_T5_FULLCH3    24
+#define V_T5_FULLCH3(x) ((x) << S_T5_FULLCH3)
+#define F_T5_FULLCH3    V_T5_FULLCH3(1U)
+
+#define S_T5_VALIDCH3    23
+#define V_T5_VALIDCH3(x) ((x) << S_T5_VALIDCH3)
+#define F_T5_VALIDCH3    V_T5_VALIDCH3(1U)
+
+#define S_T5_DATACH3    16
+#define M_T5_DATACH3    0x7fU
+#define V_T5_DATACH3(x) ((x) << S_T5_DATACH3)
+#define G_T5_DATACH3(x) (((x) >> S_T5_DATACH3) & M_T5_DATACH3)
+
+#define S_T5_SIZECH2    10
+#define M_T5_SIZECH2    0xfU
+#define V_T5_SIZECH2(x) ((x) << S_T5_SIZECH2)
+#define G_T5_SIZECH2(x) (((x) >> S_T5_SIZECH2) & M_T5_SIZECH2)
+
+#define S_T5_ERRCH2    9
+#define V_T5_ERRCH2(x) ((x) << S_T5_ERRCH2)
+#define F_T5_ERRCH2    V_T5_ERRCH2(1U)
+
+#define S_T5_FULLCH2    8
+#define V_T5_FULLCH2(x) ((x) << S_T5_FULLCH2)
+#define F_T5_FULLCH2    V_T5_FULLCH2(1U)
+
+#define S_T5_VALIDCH2    7
+#define V_T5_VALIDCH2(x) ((x) << S_T5_VALIDCH2)
+#define F_T5_VALIDCH2    V_T5_VALIDCH2(1U)
+
+#define S_T5_DATACH2    0
+#define M_T5_DATACH2    0x7fU
+#define V_T5_DATACH2(x) ((x) << S_T5_DATACH2)
+#define G_T5_DATACH2(x) (((x) >> S_T5_DATACH2) & M_T5_DATACH2)
+
 #define A_MPS_TX_DEBUG_REG_TX2MAC_10 0x944c
 
 #define S_SOPPT1    31
@@ -15483,6 +33116,50 @@
 #define V_DATAPT0(x) ((x) << S_DATAPT0)
 #define G_DATAPT0(x) (((x) >> S_DATAPT0) & M_DATAPT0)
 
+#define S_T5_SIZEPT1    26
+#define M_T5_SIZEPT1    0xfU
+#define V_T5_SIZEPT1(x) ((x) << S_T5_SIZEPT1)
+#define G_T5_SIZEPT1(x) (((x) >> S_T5_SIZEPT1) & M_T5_SIZEPT1)
+
+#define S_T5_ERRPT1    25
+#define V_T5_ERRPT1(x) ((x) << S_T5_ERRPT1)
+#define F_T5_ERRPT1    V_T5_ERRPT1(1U)
+
+#define S_T5_FULLPT1    24
+#define V_T5_FULLPT1(x) ((x) << S_T5_FULLPT1)
+#define F_T5_FULLPT1    V_T5_FULLPT1(1U)
+
+#define S_T5_VALIDPT1    23
+#define V_T5_VALIDPT1(x) ((x) << S_T5_VALIDPT1)
+#define F_T5_VALIDPT1    V_T5_VALIDPT1(1U)
+
+#define S_T5_DATAPT1    16
+#define M_T5_DATAPT1    0x7fU
+#define V_T5_DATAPT1(x) ((x) << S_T5_DATAPT1)
+#define G_T5_DATAPT1(x) (((x) >> S_T5_DATAPT1) & M_T5_DATAPT1)
+
+#define S_T5_SIZEPT0    10
+#define M_T5_SIZEPT0    0xfU
+#define V_T5_SIZEPT0(x) ((x) << S_T5_SIZEPT0)
+#define G_T5_SIZEPT0(x) (((x) >> S_T5_SIZEPT0) & M_T5_SIZEPT0)
+
+#define S_T5_ERRPT0    9
+#define V_T5_ERRPT0(x) ((x) << S_T5_ERRPT0)
+#define F_T5_ERRPT0    V_T5_ERRPT0(1U)
+
+#define S_T5_FULLPT0    8
+#define V_T5_FULLPT0(x) ((x) << S_T5_FULLPT0)
+#define F_T5_FULLPT0    V_T5_FULLPT0(1U)
+
+#define S_T5_VALIDPT0    7
+#define V_T5_VALIDPT0(x) ((x) << S_T5_VALIDPT0)
+#define F_T5_VALIDPT0    V_T5_VALIDPT0(1U)
+
+#define S_T5_DATAPT0    0
+#define M_T5_DATAPT0    0x7fU
+#define V_T5_DATAPT0(x) ((x) << S_T5_DATAPT0)
+#define G_T5_DATAPT0(x) (((x) >> S_T5_DATAPT0) & M_T5_DATAPT0)
+
 #define A_MPS_TX_DEBUG_REG_TX2MAC_32 0x9450
 
 #define S_SOPPT3    31
@@ -15545,6 +33222,50 @@
 #define V_DATAPT2(x) ((x) << S_DATAPT2)
 #define G_DATAPT2(x) (((x) >> S_DATAPT2) & M_DATAPT2)
 
+#define S_T5_SIZEPT3    26
+#define M_T5_SIZEPT3    0xfU
+#define V_T5_SIZEPT3(x) ((x) << S_T5_SIZEPT3)
+#define G_T5_SIZEPT3(x) (((x) >> S_T5_SIZEPT3) & M_T5_SIZEPT3)
+
+#define S_T5_ERRPT3    25
+#define V_T5_ERRPT3(x) ((x) << S_T5_ERRPT3)
+#define F_T5_ERRPT3    V_T5_ERRPT3(1U)
+
+#define S_T5_FULLPT3    24
+#define V_T5_FULLPT3(x) ((x) << S_T5_FULLPT3)
+#define F_T5_FULLPT3    V_T5_FULLPT3(1U)
+
+#define S_T5_VALIDPT3    23
+#define V_T5_VALIDPT3(x) ((x) << S_T5_VALIDPT3)
+#define F_T5_VALIDPT3    V_T5_VALIDPT3(1U)
+
+#define S_T5_DATAPT3    16
+#define M_T5_DATAPT3    0x7fU
+#define V_T5_DATAPT3(x) ((x) << S_T5_DATAPT3)
+#define G_T5_DATAPT3(x) (((x) >> S_T5_DATAPT3) & M_T5_DATAPT3)
+
+#define S_T5_SIZEPT2    10
+#define M_T5_SIZEPT2    0xfU
+#define V_T5_SIZEPT2(x) ((x) << S_T5_SIZEPT2)
+#define G_T5_SIZEPT2(x) (((x) >> S_T5_SIZEPT2) & M_T5_SIZEPT2)
+
+#define S_T5_ERRPT2    9
+#define V_T5_ERRPT2(x) ((x) << S_T5_ERRPT2)
+#define F_T5_ERRPT2    V_T5_ERRPT2(1U)
+
+#define S_T5_FULLPT2    8
+#define V_T5_FULLPT2(x) ((x) << S_T5_FULLPT2)
+#define F_T5_FULLPT2    V_T5_FULLPT2(1U)
+
+#define S_T5_VALIDPT2    7
+#define V_T5_VALIDPT2(x) ((x) << S_T5_VALIDPT2)
+#define F_T5_VALIDPT2    V_T5_VALIDPT2(1U)
+
+#define S_T5_DATAPT2    0
+#define M_T5_DATAPT2    0x7fU
+#define V_T5_DATAPT2(x) ((x) << S_T5_DATAPT2)
+#define G_T5_DATAPT2(x) (((x) >> S_T5_DATAPT2) & M_T5_DATAPT2)
+
 #define A_MPS_TX_SGE_CH_PAUSE_IGNR 0x9454
 
 #define S_SGEPAUSEIGNR    0
@@ -15552,6 +33273,13 @@
 #define V_SGEPAUSEIGNR(x) ((x) << S_SGEPAUSEIGNR)
 #define G_SGEPAUSEIGNR(x) (((x) >> S_SGEPAUSEIGNR) & M_SGEPAUSEIGNR)
 
+#define A_MPS_T5_TX_SGE_CH_PAUSE_IGNR 0x9454
+
+#define S_T5SGEPAUSEIGNR    0
+#define M_T5SGEPAUSEIGNR    0xffffU
+#define V_T5SGEPAUSEIGNR(x) ((x) << S_T5SGEPAUSEIGNR)
+#define G_T5SGEPAUSEIGNR(x) (((x) >> S_T5SGEPAUSEIGNR) & M_T5SGEPAUSEIGNR)
+
 #define A_MPS_TX_DEBUG_SUBPART_SEL 0x9458
 
 #define S_SUBPRTH    11
@@ -15574,6 +33302,182 @@
 #define V_PORTL(x) ((x) << S_PORTL)
 #define G_PORTL(x) (((x) >> S_PORTL) & M_PORTL)
 
+#define A_MPS_TX_PAD_CTL 0x945c
+
+#define S_LPBKPADENPT3    7
+#define V_LPBKPADENPT3(x) ((x) << S_LPBKPADENPT3)
+#define F_LPBKPADENPT3    V_LPBKPADENPT3(1U)
+
+#define S_LPBKPADENPT2    6
+#define V_LPBKPADENPT2(x) ((x) << S_LPBKPADENPT2)
+#define F_LPBKPADENPT2    V_LPBKPADENPT2(1U)
+
+#define S_LPBKPADENPT1    5
+#define V_LPBKPADENPT1(x) ((x) << S_LPBKPADENPT1)
+#define F_LPBKPADENPT1    V_LPBKPADENPT1(1U)
+
+#define S_LPBKPADENPT0    4
+#define V_LPBKPADENPT0(x) ((x) << S_LPBKPADENPT0)
+#define F_LPBKPADENPT0    V_LPBKPADENPT0(1U)
+
+#define S_MACPADENPT3    3
+#define V_MACPADENPT3(x) ((x) << S_MACPADENPT3)
+#define F_MACPADENPT3    V_MACPADENPT3(1U)
+
+#define S_MACPADENPT2    2
+#define V_MACPADENPT2(x) ((x) << S_MACPADENPT2)
+#define F_MACPADENPT2    V_MACPADENPT2(1U)
+
+#define S_MACPADENPT1    1
+#define V_MACPADENPT1(x) ((x) << S_MACPADENPT1)
+#define F_MACPADENPT1    V_MACPADENPT1(1U)
+
+#define S_MACPADENPT0    0
+#define V_MACPADENPT0(x) ((x) << S_MACPADENPT0)
+#define F_MACPADENPT0    V_MACPADENPT0(1U)
+
+#define A_MPS_TX_PFVF_PORT_DROP_TP 0x9460
+
+#define S_TP2MPS_CH3    24
+#define M_TP2MPS_CH3    0xffU
+#define V_TP2MPS_CH3(x) ((x) << S_TP2MPS_CH3)
+#define G_TP2MPS_CH3(x) (((x) >> S_TP2MPS_CH3) & M_TP2MPS_CH3)
+
+#define S_TP2MPS_CH2    16
+#define M_TP2MPS_CH2    0xffU
+#define V_TP2MPS_CH2(x) ((x) << S_TP2MPS_CH2)
+#define G_TP2MPS_CH2(x) (((x) >> S_TP2MPS_CH2) & M_TP2MPS_CH2)
+
+#define S_TP2MPS_CH1    8
+#define M_TP2MPS_CH1    0xffU
+#define V_TP2MPS_CH1(x) ((x) << S_TP2MPS_CH1)
+#define G_TP2MPS_CH1(x) (((x) >> S_TP2MPS_CH1) & M_TP2MPS_CH1)
+
+#define S_TP2MPS_CH0    0
+#define M_TP2MPS_CH0    0xffU
+#define V_TP2MPS_CH0(x) ((x) << S_TP2MPS_CH0)
+#define G_TP2MPS_CH0(x) (((x) >> S_TP2MPS_CH0) & M_TP2MPS_CH0)
+
+#define A_MPS_TX_PFVF_PORT_DROP_NCSI 0x9464
+
+#define S_NCSI_CH4    0
+#define M_NCSI_CH4    0xffU
+#define V_NCSI_CH4(x) ((x) << S_NCSI_CH4)
+#define G_NCSI_CH4(x) (((x) >> S_NCSI_CH4) & M_NCSI_CH4)
+
+#define A_MPS_TX_PFVF_PORT_DROP_CTL 0x9468
+
+#define S_PFNOVFDROP    5
+#define V_PFNOVFDROP(x) ((x) << S_PFNOVFDROP)
+#define F_PFNOVFDROP    V_PFNOVFDROP(1U)
+
+#define S_NCSI_CH4_CLR    4
+#define V_NCSI_CH4_CLR(x) ((x) << S_NCSI_CH4_CLR)
+#define F_NCSI_CH4_CLR    V_NCSI_CH4_CLR(1U)
+
+#define S_TP2MPS_CH3_CLR    3
+#define V_TP2MPS_CH3_CLR(x) ((x) << S_TP2MPS_CH3_CLR)
+#define F_TP2MPS_CH3_CLR    V_TP2MPS_CH3_CLR(1U)
+
+#define S_TP2MPS_CH2_CLR    2
+#define V_TP2MPS_CH2_CLR(x) ((x) << S_TP2MPS_CH2_CLR)
+#define F_TP2MPS_CH2_CLR    V_TP2MPS_CH2_CLR(1U)
+
+#define S_TP2MPS_CH1_CLR    1
+#define V_TP2MPS_CH1_CLR(x) ((x) << S_TP2MPS_CH1_CLR)
+#define F_TP2MPS_CH1_CLR    V_TP2MPS_CH1_CLR(1U)
+
+#define S_TP2MPS_CH0_CLR    0
+#define V_TP2MPS_CH0_CLR(x) ((x) << S_TP2MPS_CH0_CLR)
+#define F_TP2MPS_CH0_CLR    V_TP2MPS_CH0_CLR(1U)
+
+#define A_MPS_TX_CGEN 0x946c
+
+#define S_TXOUTLPBK3_CGEN    31
+#define V_TXOUTLPBK3_CGEN(x) ((x) << S_TXOUTLPBK3_CGEN)
+#define F_TXOUTLPBK3_CGEN    V_TXOUTLPBK3_CGEN(1U)
+
+#define S_TXOUTLPBK2_CGEN    30
+#define V_TXOUTLPBK2_CGEN(x) ((x) << S_TXOUTLPBK2_CGEN)
+#define F_TXOUTLPBK2_CGEN    V_TXOUTLPBK2_CGEN(1U)
+
+#define S_TXOUTLPBK1_CGEN    29
+#define V_TXOUTLPBK1_CGEN(x) ((x) << S_TXOUTLPBK1_CGEN)
+#define F_TXOUTLPBK1_CGEN    V_TXOUTLPBK1_CGEN(1U)
+
+#define S_TXOUTLPBK0_CGEN    28
+#define V_TXOUTLPBK0_CGEN(x) ((x) << S_TXOUTLPBK0_CGEN)
+#define F_TXOUTLPBK0_CGEN    V_TXOUTLPBK0_CGEN(1U)
+
+#define S_TXOUTMAC3_CGEN    27
+#define V_TXOUTMAC3_CGEN(x) ((x) << S_TXOUTMAC3_CGEN)
+#define F_TXOUTMAC3_CGEN    V_TXOUTMAC3_CGEN(1U)
+
+#define S_TXOUTMAC2_CGEN    26
+#define V_TXOUTMAC2_CGEN(x) ((x) << S_TXOUTMAC2_CGEN)
+#define F_TXOUTMAC2_CGEN    V_TXOUTMAC2_CGEN(1U)
+
+#define S_TXOUTMAC1_CGEN    25
+#define V_TXOUTMAC1_CGEN(x) ((x) << S_TXOUTMAC1_CGEN)
+#define F_TXOUTMAC1_CGEN    V_TXOUTMAC1_CGEN(1U)
+
+#define S_TXOUTMAC0_CGEN    24
+#define V_TXOUTMAC0_CGEN(x) ((x) << S_TXOUTMAC0_CGEN)
+#define F_TXOUTMAC0_CGEN    V_TXOUTMAC0_CGEN(1U)
+
+#define S_TXSCHLPBK3_CGEN    23
+#define V_TXSCHLPBK3_CGEN(x) ((x) << S_TXSCHLPBK3_CGEN)
+#define F_TXSCHLPBK3_CGEN    V_TXSCHLPBK3_CGEN(1U)
+
+#define S_TXSCHLPBK2_CGEN    22
+#define V_TXSCHLPBK2_CGEN(x) ((x) << S_TXSCHLPBK2_CGEN)
+#define F_TXSCHLPBK2_CGEN    V_TXSCHLPBK2_CGEN(1U)
+
+#define S_TXSCHLPBK1_CGEN    21
+#define V_TXSCHLPBK1_CGEN(x) ((x) << S_TXSCHLPBK1_CGEN)
+#define F_TXSCHLPBK1_CGEN    V_TXSCHLPBK1_CGEN(1U)
+
+#define S_TXSCHLPBK0_CGEN    20
+#define V_TXSCHLPBK0_CGEN(x) ((x) << S_TXSCHLPBK0_CGEN)
+#define F_TXSCHLPBK0_CGEN    V_TXSCHLPBK0_CGEN(1U)
+
+#define S_TXSCHMAC3_CGEN    19
+#define V_TXSCHMAC3_CGEN(x) ((x) << S_TXSCHMAC3_CGEN)
+#define F_TXSCHMAC3_CGEN    V_TXSCHMAC3_CGEN(1U)
+
+#define S_TXSCHMAC2_CGEN    18
+#define V_TXSCHMAC2_CGEN(x) ((x) << S_TXSCHMAC2_CGEN)
+#define F_TXSCHMAC2_CGEN    V_TXSCHMAC2_CGEN(1U)
+
+#define S_TXSCHMAC1_CGEN    17
+#define V_TXSCHMAC1_CGEN(x) ((x) << S_TXSCHMAC1_CGEN)
+#define F_TXSCHMAC1_CGEN    V_TXSCHMAC1_CGEN(1U)
+
+#define S_TXSCHMAC0_CGEN    16
+#define V_TXSCHMAC0_CGEN(x) ((x) << S_TXSCHMAC0_CGEN)
+#define F_TXSCHMAC0_CGEN    V_TXSCHMAC0_CGEN(1U)
+
+#define S_TXINCH4_CGEN    15
+#define V_TXINCH4_CGEN(x) ((x) << S_TXINCH4_CGEN)
+#define F_TXINCH4_CGEN    V_TXINCH4_CGEN(1U)
+
+#define S_TXINCH3_CGEN    14
+#define V_TXINCH3_CGEN(x) ((x) << S_TXINCH3_CGEN)
+#define F_TXINCH3_CGEN    V_TXINCH3_CGEN(1U)
+
+#define S_TXINCH2_CGEN    13
+#define V_TXINCH2_CGEN(x) ((x) << S_TXINCH2_CGEN)
+#define F_TXINCH2_CGEN    V_TXINCH2_CGEN(1U)
+
+#define S_TXINCH1_CGEN    12
+#define V_TXINCH1_CGEN(x) ((x) << S_TXINCH1_CGEN)
+#define F_TXINCH1_CGEN    V_TXINCH1_CGEN(1U)
+
+#define S_TXINCH0_CGEN    11
+#define V_TXINCH0_CGEN(x) ((x) << S_TXINCH0_CGEN)
+#define F_TXINCH0_CGEN    V_TXINCH0_CGEN(1U)
+
+#define A_MPS_TX_CGEN_DYNAMIC 0x9470
 #define A_MPS_STAT_CTL 0x9600
 
 #define S_COUNTVFINPF    1
@@ -15584,6 +33488,42 @@
 #define V_LPBKERRSTAT(x) ((x) << S_LPBKERRSTAT)
 #define F_LPBKERRSTAT    V_LPBKERRSTAT(1U)
 
+#define S_STATSTOPCTRL    10
+#define V_STATSTOPCTRL(x) ((x) << S_STATSTOPCTRL)
+#define F_STATSTOPCTRL    V_STATSTOPCTRL(1U)
+
+#define S_STOPSTAT    9
+#define V_STOPSTAT(x) ((x) << S_STOPSTAT)
+#define F_STOPSTAT    V_STOPSTAT(1U)
+
+#define S_STATWRITECTRL    8
+#define V_STATWRITECTRL(x) ((x) << S_STATWRITECTRL)
+#define F_STATWRITECTRL    V_STATWRITECTRL(1U)
+
+#define S_COUNTLBPF    7
+#define V_COUNTLBPF(x) ((x) << S_COUNTLBPF)
+#define F_COUNTLBPF    V_COUNTLBPF(1U)
+
+#define S_COUNTLBVF    6
+#define V_COUNTLBVF(x) ((x) << S_COUNTLBVF)
+#define F_COUNTLBVF    V_COUNTLBVF(1U)
+
+#define S_COUNTPAUSEMCRX    5
+#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX)
+#define F_COUNTPAUSEMCRX    V_COUNTPAUSEMCRX(1U)
+
+#define S_COUNTPAUSESTATRX    4
+#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX)
+#define F_COUNTPAUSESTATRX    V_COUNTPAUSESTATRX(1U)
+
+#define S_COUNTPAUSEMCTX    3
+#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX)
+#define F_COUNTPAUSEMCTX    V_COUNTPAUSEMCTX(1U)
+
+#define S_COUNTPAUSESTATTX    2
+#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX)
+#define F_COUNTPAUSESTATTX    V_COUNTPAUSESTATTX(1U)
+
 #define A_MPS_STAT_INT_ENABLE 0x9608
 
 #define S_PLREADSYNCERR    0
@@ -15632,6 +33572,36 @@
 #define V_TXPORT(x) ((x) << S_TXPORT)
 #define G_TXPORT(x) (((x) >> S_TXPORT) & M_TXPORT)
 
+#define S_T5_RXBG    27
+#define M_T5_RXBG    0x3U
+#define V_T5_RXBG(x) ((x) << S_T5_RXBG)
+#define G_T5_RXBG(x) (((x) >> S_T5_RXBG) & M_T5_RXBG)
+
+#define S_T5_RXPF    22
+#define M_T5_RXPF    0x1fU
+#define V_T5_RXPF(x) ((x) << S_T5_RXPF)
+#define G_T5_RXPF(x) (((x) >> S_T5_RXPF) & M_T5_RXPF)
+
+#define S_T5_TXPF    18
+#define M_T5_TXPF    0xfU
+#define V_T5_TXPF(x) ((x) << S_T5_TXPF)
+#define G_T5_TXPF(x) (((x) >> S_T5_TXPF) & M_T5_TXPF)
+
+#define S_T5_RXPORT    11
+#define M_T5_RXPORT    0x7fU
+#define V_T5_RXPORT(x) ((x) << S_T5_RXPORT)
+#define G_T5_RXPORT(x) (((x) >> S_T5_RXPORT) & M_T5_RXPORT)
+
+#define S_T5_LBPORT    6
+#define M_T5_LBPORT    0x1fU
+#define V_T5_LBPORT(x) ((x) << S_T5_LBPORT)
+#define G_T5_LBPORT(x) (((x) >> S_T5_LBPORT) & M_T5_LBPORT)
+
+#define S_T5_TXPORT    0
+#define M_T5_TXPORT    0x3fU
+#define V_T5_TXPORT(x) ((x) << S_T5_TXPORT)
+#define G_T5_TXPORT(x) (((x) >> S_T5_TXPORT) & M_T5_TXPORT)
+
 #define A_MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
 #define A_MPS_STAT_PERR_ENABLE_SRAM 0x9618
 #define A_MPS_STAT_PERR_INT_ENABLE_TX_FIFO 0x961c
@@ -15651,6 +33621,11 @@
 #define V_DROP(x) ((x) << S_DROP)
 #define G_DROP(x) (((x) >> S_DROP) & M_DROP)
 
+#define S_TXCH    20
+#define M_TXCH    0xfU
+#define V_TXCH(x) ((x) << S_TXCH)
+#define G_TXCH(x) (((x) >> S_TXCH) & M_TXCH)
+
 #define A_MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
 #define A_MPS_STAT_PERR_ENABLE_TX_FIFO 0x9624
 #define A_MPS_STAT_PERR_INT_ENABLE_RX_FIFO 0x9628
@@ -15690,6 +33665,22 @@
 #define G_STATMEMSEL(x) (((x) >> S_STATMEMSEL) & M_STATMEMSEL)
 
 #define A_MPS_STAT_DEBUG_SUB_SEL 0x9638
+
+#define S_STATSSUBPRTH    5
+#define M_STATSSUBPRTH    0x1fU
+#define V_STATSSUBPRTH(x) ((x) << S_STATSSUBPRTH)
+#define G_STATSSUBPRTH(x) (((x) >> S_STATSSUBPRTH) & M_STATSSUBPRTH)
+
+#define S_STATSSUBPRTL    0
+#define M_STATSSUBPRTL    0x1fU
+#define V_STATSSUBPRTL(x) ((x) << S_STATSSUBPRTL)
+#define G_STATSSUBPRTL(x) (((x) >> S_STATSSUBPRTL) & M_STATSSUBPRTL)
+
+#define S_STATSUBPRTH    5
+#define M_STATSUBPRTH    0x1fU
+#define V_STATSUBPRTH(x) ((x) << S_STATSUBPRTH)
+#define G_STATSUBPRTH(x) (((x) >> S_STATSUBPRTH) & M_STATSUBPRTH)
+
 #define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
 #define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
 #define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
@@ -15722,6 +33713,72 @@
 #define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
 #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
 #define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_STAT_PERR_INT_ENABLE_SRAM1 0x96c0
+
+#define S_T5_RXVF    5
+#define M_T5_RXVF    0x7U
+#define V_T5_RXVF(x) ((x) << S_T5_RXVF)
+#define G_T5_RXVF(x) (((x) >> S_T5_RXVF) & M_T5_RXVF)
+
+#define S_T5_TXVF    0
+#define M_T5_TXVF    0x1fU
+#define V_T5_TXVF(x) ((x) << S_T5_TXVF)
+#define G_T5_TXVF(x) (((x) >> S_T5_TXVF) & M_T5_TXVF)
+
+#define A_MPS_STAT_PERR_INT_CAUSE_SRAM1 0x96c4
+#define A_MPS_STAT_PERR_ENABLE_SRAM1 0x96c8
+#define A_MPS_STAT_STOP_UPD_BG 0x96cc
+
+#define S_BGRX    0
+#define M_BGRX    0xfU
+#define V_BGRX(x) ((x) << S_BGRX)
+#define G_BGRX(x) (((x) >> S_BGRX) & M_BGRX)
+
+#define A_MPS_STAT_STOP_UPD_PORT 0x96d0
+
+#define S_PTLPBK    8
+#define M_PTLPBK    0xfU
+#define V_PTLPBK(x) ((x) << S_PTLPBK)
+#define G_PTLPBK(x) (((x) >> S_PTLPBK) & M_PTLPBK)
+
+#define S_PTTX    4
+#define M_PTTX    0xfU
+#define V_PTTX(x) ((x) << S_PTTX)
+#define G_PTTX(x) (((x) >> S_PTTX) & M_PTTX)
+
+#define S_PTRX    0
+#define M_PTRX    0xfU
+#define V_PTRX(x) ((x) << S_PTRX)
+#define G_PTRX(x) (((x) >> S_PTRX) & M_PTRX)
+
+#define A_MPS_STAT_STOP_UPD_PF 0x96d4
+
+#define S_PFTX    8
+#define M_PFTX    0xffU
+#define V_PFTX(x) ((x) << S_PFTX)
+#define G_PFTX(x) (((x) >> S_PFTX) & M_PFTX)
+
+#define S_PFRX    0
+#define M_PFRX    0xffU
+#define V_PFRX(x) ((x) << S_PFRX)
+#define G_PFRX(x) (((x) >> S_PFRX) & M_PFRX)
+
+#define A_MPS_STAT_STOP_UPD_TX_VF_0_31 0x96d8
+#define A_MPS_STAT_STOP_UPD_TX_VF_32_63 0x96dc
+#define A_MPS_STAT_STOP_UPD_TX_VF_64_95 0x96e0
+#define A_MPS_STAT_STOP_UPD_TX_VF_96_127 0x96e4
+#define A_MPS_STAT_STOP_UPD_RX_VF_0_31 0x96e8
+#define A_MPS_STAT_STOP_UPD_RX_VF_32_63 0x96ec
+#define A_MPS_STAT_STOP_UPD_RX_VF_64_95 0x96f0
+#define A_MPS_STAT_STOP_UPD_RX_VF_96_127 0x96f4
+#define A_MPS_STAT_STOP_UPD_RX_VF_128_159 0x96f8
+#define A_MPS_STAT_STOP_UPD_RX_VF_160_191 0x96fc
+#define A_MPS_STAT_STOP_UPD_RX_VF_192_223 0x9700
+#define A_MPS_STAT_STOP_UPD_RX_VF_224_255 0x9704
+#define A_MPS_STAT_STOP_UPD_TX_VF_128_159 0x9710
+#define A_MPS_STAT_STOP_UPD_TX_VF_160_191 0x9714
+#define A_MPS_STAT_STOP_UPD_TX_VF_192_223 0x9718
+#define A_MPS_STAT_STOP_UPD_TX_VF_224_255 0x971c
 #define A_MPS_TRC_CFG 0x9800
 
 #define S_TRCFIFOEMPTY    4
@@ -15744,7 +33801,12 @@
 #define V_TRCMULTIFILTER(x) ((x) << S_TRCMULTIFILTER)
 #define F_TRCMULTIFILTER    V_TRCMULTIFILTER(1U)
 
+#define S_TRCMULTIRSSFILTER    5
+#define V_TRCMULTIRSSFILTER(x) ((x) << S_TRCMULTIRSSFILTER)
+#define F_TRCMULTIRSSFILTER    V_TRCMULTIRSSFILTER(1U)
+
 #define A_MPS_TRC_RSS_HASH 0x9804
+#define A_MPS_TRC_FILTER0_RSS_HASH 0x9804
 #define A_MPS_TRC_RSS_CONTROL 0x9808
 
 #define S_RSSCONTROL    16
@@ -15757,6 +33819,7 @@
 #define V_QUEUENUMBER(x) ((x) << S_QUEUENUMBER)
 #define G_QUEUENUMBER(x) (((x) >> S_QUEUENUMBER) & M_QUEUENUMBER)
 
+#define A_MPS_TRC_FILTER0_RSS_CONTROL 0x9808
 #define A_MPS_TRC_FILTER_MATCH_CTL_A 0x9810
 
 #define S_TFINVERTMATCH    24
@@ -15794,6 +33857,31 @@
 #define V_TFOFFSET(x) ((x) << S_TFOFFSET)
 #define G_TFOFFSET(x) (((x) >> S_TFOFFSET) & M_TFOFFSET)
 
+#define S_TFINSERTACTLEN    27
+#define V_TFINSERTACTLEN(x) ((x) << S_TFINSERTACTLEN)
+#define F_TFINSERTACTLEN    V_TFINSERTACTLEN(1U)
+
+#define S_TFINSERTTIMER    26
+#define V_TFINSERTTIMER(x) ((x) << S_TFINSERTTIMER)
+#define F_TFINSERTTIMER    V_TFINSERTTIMER(1U)
+
+#define S_T5_TFINVERTMATCH    25
+#define V_T5_TFINVERTMATCH(x) ((x) << S_T5_TFINVERTMATCH)
+#define F_T5_TFINVERTMATCH    V_T5_TFINVERTMATCH(1U)
+
+#define S_T5_TFPKTTOOLARGE    24
+#define V_T5_TFPKTTOOLARGE(x) ((x) << S_T5_TFPKTTOOLARGE)
+#define F_T5_TFPKTTOOLARGE    V_T5_TFPKTTOOLARGE(1U)
+
+#define S_T5_TFEN    23
+#define V_T5_TFEN(x) ((x) << S_T5_TFEN)
+#define F_T5_TFEN    V_T5_TFEN(1U)
+
+#define S_T5_TFPORT    18
+#define M_T5_TFPORT    0x1fU
+#define V_T5_TFPORT(x) ((x) << S_T5_TFPORT)
+#define G_T5_TFPORT(x) (((x) >> S_T5_TFPORT) & M_T5_TFPORT)
+
 #define A_MPS_TRC_FILTER_MATCH_CTL_B 0x9820
 
 #define S_TFMINPKTSIZE    16
@@ -15865,6 +33953,219 @@
 #define A_MPS_TRC_FILTER2_DONT_CARE 0x9e80
 #define A_MPS_TRC_FILTER3_MATCH 0x9f00
 #define A_MPS_TRC_FILTER3_DONT_CARE 0x9f80
+#define A_MPS_TRC_FILTER1_RSS_HASH 0x9ff0
+#define A_MPS_TRC_FILTER1_RSS_CONTROL 0x9ff4
+#define A_MPS_TRC_FILTER2_RSS_HASH 0x9ff8
+#define A_MPS_TRC_FILTER2_RSS_CONTROL 0x9ffc
+#define A_MPS_TRC_FILTER3_RSS_HASH 0xa000
+#define A_MPS_TRC_FILTER3_RSS_CONTROL 0xa004
+#define A_MPS_T5_TRC_RSS_HASH 0xa008
+#define A_MPS_T5_TRC_RSS_CONTROL 0xa00c
+#define A_MPS_TRC_VF_OFF_FILTER_0 0xa010
+
+#define S_TRCMPS2TP_MACONLY    20
+#define V_TRCMPS2TP_MACONLY(x) ((x) << S_TRCMPS2TP_MACONLY)
+#define F_TRCMPS2TP_MACONLY    V_TRCMPS2TP_MACONLY(1U)
+
+#define S_TRCALLMPS2TP    19
+#define V_TRCALLMPS2TP(x) ((x) << S_TRCALLMPS2TP)
+#define F_TRCALLMPS2TP    V_TRCALLMPS2TP(1U)
+
+#define S_TRCALLTP2MPS    18
+#define V_TRCALLTP2MPS(x) ((x) << S_TRCALLTP2MPS)
+#define F_TRCALLTP2MPS    V_TRCALLTP2MPS(1U)
+
+#define S_TRCALLVF    17
+#define V_TRCALLVF(x) ((x) << S_TRCALLVF)
+#define F_TRCALLVF    V_TRCALLVF(1U)
+
+#define S_TRC_OFLD_EN    16
+#define V_TRC_OFLD_EN(x) ((x) << S_TRC_OFLD_EN)
+#define F_TRC_OFLD_EN    V_TRC_OFLD_EN(1U)
+
+#define S_VFFILTEN    15
+#define V_VFFILTEN(x) ((x) << S_VFFILTEN)
+#define F_VFFILTEN    V_VFFILTEN(1U)
+
+#define S_VFFILTMASK    8
+#define M_VFFILTMASK    0x7fU
+#define V_VFFILTMASK(x) ((x) << S_VFFILTMASK)
+#define G_VFFILTMASK(x) (((x) >> S_VFFILTMASK) & M_VFFILTMASK)
+
+#define S_VFFILTVALID    7
+#define V_VFFILTVALID(x) ((x) << S_VFFILTVALID)
+#define F_VFFILTVALID    V_VFFILTVALID(1U)
+
+#define S_VFFILTDATA    0
+#define M_VFFILTDATA    0x7fU
+#define V_VFFILTDATA(x) ((x) << S_VFFILTDATA)
+#define G_VFFILTDATA(x) (((x) >> S_VFFILTDATA) & M_VFFILTDATA)
+
+#define S_T6_TRCMPS2TP_MACONLY    22
+#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
+#define F_T6_TRCMPS2TP_MACONLY    V_T6_TRCMPS2TP_MACONLY(1U)
+
+#define S_T6_TRCALLMPS2TP    21
+#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
+#define F_T6_TRCALLMPS2TP    V_T6_TRCALLMPS2TP(1U)
+
+#define S_T6_TRCALLTP2MPS    20
+#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
+#define F_T6_TRCALLTP2MPS    V_T6_TRCALLTP2MPS(1U)
+
+#define S_T6_TRCALLVF    19
+#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
+#define F_T6_TRCALLVF    V_T6_TRCALLVF(1U)
+
+#define S_T6_TRC_OFLD_EN    18
+#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
+#define F_T6_TRC_OFLD_EN    V_T6_TRC_OFLD_EN(1U)
+
+#define S_T6_VFFILTEN    17
+#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
+#define F_T6_VFFILTEN    V_T6_VFFILTEN(1U)
+
+#define S_T6_VFFILTMASK    9
+#define M_T6_VFFILTMASK    0xffU
+#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
+#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
+
+#define S_T6_VFFILTVALID    8
+#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
+#define F_T6_VFFILTVALID    V_T6_VFFILTVALID(1U)
+
+#define S_T6_VFFILTDATA    0
+#define M_T6_VFFILTDATA    0xffU
+#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
+#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
+
+#define A_MPS_TRC_VF_OFF_FILTER_1 0xa014
+
+#define S_T6_TRCMPS2TP_MACONLY    22
+#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
+#define F_T6_TRCMPS2TP_MACONLY    V_T6_TRCMPS2TP_MACONLY(1U)
+
+#define S_T6_TRCALLMPS2TP    21
+#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
+#define F_T6_TRCALLMPS2TP    V_T6_TRCALLMPS2TP(1U)
+
+#define S_T6_TRCALLTP2MPS    20
+#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
+#define F_T6_TRCALLTP2MPS    V_T6_TRCALLTP2MPS(1U)
+
+#define S_T6_TRCALLVF    19
+#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
+#define F_T6_TRCALLVF    V_T6_TRCALLVF(1U)
+
+#define S_T6_TRC_OFLD_EN    18
+#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
+#define F_T6_TRC_OFLD_EN    V_T6_TRC_OFLD_EN(1U)
+
+#define S_T6_VFFILTEN    17
+#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
+#define F_T6_VFFILTEN    V_T6_VFFILTEN(1U)
+
+#define S_T6_VFFILTMASK    9
+#define M_T6_VFFILTMASK    0xffU
+#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
+#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
+
+#define S_T6_VFFILTVALID    8
+#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
+#define F_T6_VFFILTVALID    V_T6_VFFILTVALID(1U)
+
+#define S_T6_VFFILTDATA    0
+#define M_T6_VFFILTDATA    0xffU
+#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
+#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
+
+#define A_MPS_TRC_VF_OFF_FILTER_2 0xa018
+
+#define S_T6_TRCMPS2TP_MACONLY    22
+#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
+#define F_T6_TRCMPS2TP_MACONLY    V_T6_TRCMPS2TP_MACONLY(1U)
+
+#define S_T6_TRCALLMPS2TP    21
+#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
+#define F_T6_TRCALLMPS2TP    V_T6_TRCALLMPS2TP(1U)
+
+#define S_T6_TRCALLTP2MPS    20
+#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
+#define F_T6_TRCALLTP2MPS    V_T6_TRCALLTP2MPS(1U)
+
+#define S_T6_TRCALLVF    19
+#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
+#define F_T6_TRCALLVF    V_T6_TRCALLVF(1U)
+
+#define S_T6_TRC_OFLD_EN    18
+#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
+#define F_T6_TRC_OFLD_EN    V_T6_TRC_OFLD_EN(1U)
+
+#define S_T6_VFFILTEN    17
+#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
+#define F_T6_VFFILTEN    V_T6_VFFILTEN(1U)
+
+#define S_T6_VFFILTMASK    9
+#define M_T6_VFFILTMASK    0xffU
+#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
+#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
+
+#define S_T6_VFFILTVALID    8
+#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
+#define F_T6_VFFILTVALID    V_T6_VFFILTVALID(1U)
+
+#define S_T6_VFFILTDATA    0
+#define M_T6_VFFILTDATA    0xffU
+#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
+#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
+
+#define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c
+
+#define S_T6_TRCMPS2TP_MACONLY    22
+#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
+#define F_T6_TRCMPS2TP_MACONLY    V_T6_TRCMPS2TP_MACONLY(1U)
+
+#define S_T6_TRCALLMPS2TP    21
+#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
+#define F_T6_TRCALLMPS2TP    V_T6_TRCALLMPS2TP(1U)
+
+#define S_T6_TRCALLTP2MPS    20
+#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
+#define F_T6_TRCALLTP2MPS    V_T6_TRCALLTP2MPS(1U)
+
+#define S_T6_TRCALLVF    19
+#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
+#define F_T6_TRCALLVF    V_T6_TRCALLVF(1U)
+
+#define S_T6_TRC_OFLD_EN    18
+#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
+#define F_T6_TRC_OFLD_EN    V_T6_TRC_OFLD_EN(1U)
+
+#define S_T6_VFFILTEN    17
+#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
+#define F_T6_VFFILTEN    V_T6_VFFILTEN(1U)
+
+#define S_T6_VFFILTMASK    9
+#define M_T6_VFFILTMASK    0xffU
+#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
+#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
+
+#define S_T6_VFFILTVALID    8
+#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
+#define F_T6_VFFILTVALID    V_T6_VFFILTVALID(1U)
+
+#define S_T6_VFFILTDATA    0
+#define M_T6_VFFILTDATA    0xffU
+#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
+#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
+
+#define A_MPS_TRC_CGEN 0xa020
+
+#define S_MPSTRCCGEN    0
+#define M_MPSTRCCGEN    0xfU
+#define V_MPSTRCCGEN(x) ((x) << S_MPSTRCCGEN)
+#define G_MPSTRCCGEN(x) (((x) >> S_MPSTRCCGEN) & M_MPSTRCCGEN)
+
 #define A_MPS_CLS_CTL 0xd000
 
 #define S_MEMWRITEFAULT    4
@@ -15887,6 +34188,18 @@
 #define V_VLANCLSEN(x) ((x) << S_VLANCLSEN)
 #define F_VLANCLSEN    V_VLANCLSEN(1U)
 
+#define S_VLANCLSEN_IN    7
+#define V_VLANCLSEN_IN(x) ((x) << S_VLANCLSEN_IN)
+#define F_VLANCLSEN_IN    V_VLANCLSEN_IN(1U)
+
+#define S_DISTCAMPARCHK    6
+#define V_DISTCAMPARCHK(x) ((x) << S_DISTCAMPARCHK)
+#define F_DISTCAMPARCHK    V_DISTCAMPARCHK(1U)
+
+#define S_VLANLKPEN    5
+#define V_VLANLKPEN(x) ((x) << S_VLANLKPEN)
+#define F_VLANLKPEN    V_VLANLKPEN(1U)
+
 #define A_MPS_CLS_ARB_WEIGHT 0xd004
 
 #define S_PLWEIGHT    16
@@ -15904,6 +34217,8 @@
 #define V_LPBKWEIGHT(x) ((x) << S_LPBKWEIGHT)
 #define G_LPBKWEIGHT(x) (((x) >> S_LPBKWEIGHT) & M_LPBKWEIGHT)
 
+#define A_MPS_CLS_NCSI_ETH_TYPE 0xd008
+#define A_MPS_CLS_NCSI_ETH_TYPE_EN 0xd00c
 #define A_MPS_CLS_BMC_MAC_ADDR_L 0xd010
 #define A_MPS_CLS_BMC_MAC_ADDR_H 0xd014
 #define A_MPS_CLS_BMC_VLAN 0xd018
@@ -15972,6 +34287,30 @@
 #define V_CLS_MATCH(x) ((x) << S_CLS_MATCH)
 #define G_CLS_MATCH(x) (((x) >> S_CLS_MATCH) & M_CLS_MATCH)
 
+#define S_CLS_SPARE    28
+#define M_CLS_SPARE    0xfU
+#define V_CLS_SPARE(x) ((x) << S_CLS_SPARE)
+#define G_CLS_SPARE(x) (((x) >> S_CLS_SPARE) & M_CLS_SPARE)
+
+#define S_T6_CLS_PRIORITY    25
+#define M_T6_CLS_PRIORITY    0x7U
+#define V_T6_CLS_PRIORITY(x) ((x) << S_T6_CLS_PRIORITY)
+#define G_T6_CLS_PRIORITY(x) (((x) >> S_T6_CLS_PRIORITY) & M_T6_CLS_PRIORITY)
+
+#define S_T6_CLS_REPLICATE    24
+#define V_T6_CLS_REPLICATE(x) ((x) << S_T6_CLS_REPLICATE)
+#define F_T6_CLS_REPLICATE    V_T6_CLS_REPLICATE(1U)
+
+#define S_T6_CLS_INDEX    15
+#define M_T6_CLS_INDEX    0x1ffU
+#define V_T6_CLS_INDEX(x) ((x) << S_T6_CLS_INDEX)
+#define G_T6_CLS_INDEX(x) (((x) >> S_T6_CLS_INDEX) & M_T6_CLS_INDEX)
+
+#define S_T6_CLS_VF    7
+#define M_T6_CLS_VF    0xffU
+#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF)
+#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF)
+
 #define A_MPS_CLS_PL_TEST_CTL 0xd038
 
 #define S_PLTESTCTL    0
@@ -15984,6 +34323,108 @@
 #define V_PRTBMCCTL(x) ((x) << S_PRTBMCCTL)
 #define F_PRTBMCCTL    V_PRTBMCCTL(1U)
 
+#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100
+#define A_MPS_CLS_MATCH_CNT_HASH 0xd104
+#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108
+#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c
+#define A_MPS_CLS_MATCH_CNT_PROM 0xd110
+#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114
+#define A_MPS_CLS_MISS_CNT 0xd118
+#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200
+#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204
+
+#define S_CLSTRCMACDAHI    0
+#define M_CLSTRCMACDAHI    0xffffU
+#define V_CLSTRCMACDAHI(x) ((x) << S_CLSTRCMACDAHI)
+#define G_CLSTRCMACDAHI(x) (((x) >> S_CLSTRCMACDAHI) & M_CLSTRCMACDAHI)
+
+#define A_MPS_CLS_REQUEST_TRACE_MAC_SA_L 0xd208
+#define A_MPS_CLS_REQUEST_TRACE_MAC_SA_H 0xd20c
+
+#define S_CLSTRCMACSAHI    0
+#define M_CLSTRCMACSAHI    0xffffU
+#define V_CLSTRCMACSAHI(x) ((x) << S_CLSTRCMACSAHI)
+#define G_CLSTRCMACSAHI(x) (((x) >> S_CLSTRCMACSAHI) & M_CLSTRCMACSAHI)
+
+#define A_MPS_CLS_REQUEST_TRACE_PORT_VLAN 0xd210
+
+#define S_CLSTRCVLANVLD    31
+#define V_CLSTRCVLANVLD(x) ((x) << S_CLSTRCVLANVLD)
+#define F_CLSTRCVLANVLD    V_CLSTRCVLANVLD(1U)
+
+#define S_CLSTRCVLANID    16
+#define M_CLSTRCVLANID    0xfffU
+#define V_CLSTRCVLANID(x) ((x) << S_CLSTRCVLANID)
+#define G_CLSTRCVLANID(x) (((x) >> S_CLSTRCVLANID) & M_CLSTRCVLANID)
+
+#define S_CLSTRCREQPORT    0
+#define M_CLSTRCREQPORT    0xfU
+#define V_CLSTRCREQPORT(x) ((x) << S_CLSTRCREQPORT)
+#define G_CLSTRCREQPORT(x) (((x) >> S_CLSTRCREQPORT) & M_CLSTRCREQPORT)
+
+#define A_MPS_CLS_REQUEST_TRACE_ENCAP 0xd214
+
+#define S_CLSTRCLKPTYPE    31
+#define V_CLSTRCLKPTYPE(x) ((x) << S_CLSTRCLKPTYPE)
+#define F_CLSTRCLKPTYPE    V_CLSTRCLKPTYPE(1U)
+
+#define S_CLSTRCDIPHIT    30
+#define V_CLSTRCDIPHIT(x) ((x) << S_CLSTRCDIPHIT)
+#define F_CLSTRCDIPHIT    V_CLSTRCDIPHIT(1U)
+
+#define S_CLSTRCVNI    0
+#define M_CLSTRCVNI    0xffffffU
+#define V_CLSTRCVNI(x) ((x) << S_CLSTRCVNI)
+#define G_CLSTRCVNI(x) (((x) >> S_CLSTRCVNI) & M_CLSTRCVNI)
+
+#define A_MPS_CLS_RESULT_TRACE 0xd300
+
+#define S_CLSTRCPORTNUM    31
+#define V_CLSTRCPORTNUM(x) ((x) << S_CLSTRCPORTNUM)
+#define F_CLSTRCPORTNUM    V_CLSTRCPORTNUM(1U)
+
+#define S_CLSTRCPRIORITY    28
+#define M_CLSTRCPRIORITY    0x7U
+#define V_CLSTRCPRIORITY(x) ((x) << S_CLSTRCPRIORITY)
+#define G_CLSTRCPRIORITY(x) (((x) >> S_CLSTRCPRIORITY) & M_CLSTRCPRIORITY)
+
+#define S_CLSTRCMULTILISTEN    27
+#define V_CLSTRCMULTILISTEN(x) ((x) << S_CLSTRCMULTILISTEN)
+#define F_CLSTRCMULTILISTEN    V_CLSTRCMULTILISTEN(1U)
+
+#define S_CLSTRCREPLICATE    26
+#define V_CLSTRCREPLICATE(x) ((x) << S_CLSTRCREPLICATE)
+#define F_CLSTRCREPLICATE    V_CLSTRCREPLICATE(1U)
+
+#define S_CLSTRCPORTMAP    24
+#define M_CLSTRCPORTMAP    0x3U
+#define V_CLSTRCPORTMAP(x) ((x) << S_CLSTRCPORTMAP)
+#define G_CLSTRCPORTMAP(x) (((x) >> S_CLSTRCPORTMAP) & M_CLSTRCPORTMAP)
+
+#define S_CLSTRCMATCH    21
+#define M_CLSTRCMATCH    0x7U
+#define V_CLSTRCMATCH(x) ((x) << S_CLSTRCMATCH)
+#define G_CLSTRCMATCH(x) (((x) >> S_CLSTRCMATCH) & M_CLSTRCMATCH)
+
+#define S_CLSTRCINDEX    12
+#define M_CLSTRCINDEX    0x1ffU
+#define V_CLSTRCINDEX(x) ((x) << S_CLSTRCINDEX)
+#define G_CLSTRCINDEX(x) (((x) >> S_CLSTRCINDEX) & M_CLSTRCINDEX)
+
+#define S_CLSTRCVF_VLD    11
+#define V_CLSTRCVF_VLD(x) ((x) << S_CLSTRCVF_VLD)
+#define F_CLSTRCVF_VLD    V_CLSTRCVF_VLD(1U)
+
+#define S_CLSTRCPF    3
+#define M_CLSTRCPF    0xffU
+#define V_CLSTRCPF(x) ((x) << S_CLSTRCPF)
+#define G_CLSTRCPF(x) (((x) >> S_CLSTRCPF) & M_CLSTRCPF)
+
+#define S_CLSTRCVF    0
+#define M_CLSTRCVF    0x7U
+#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF)
+#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF)
+
 #define A_MPS_CLS_VLAN_TABLE 0xdfc0
 
 #define S_VLAN_MASK    16
@@ -16042,6 +34483,74 @@
 #define V_SRAM_VLD(x) ((x) << S_SRAM_VLD)
 #define F_SRAM_VLD    V_SRAM_VLD(1U)
 
+#define A_MPS_T5_CLS_SRAM_L 0xe000
+
+#define S_T6_DISENCAPOUTERRPLCT    31
+#define V_T6_DISENCAPOUTERRPLCT(x) ((x) << S_T6_DISENCAPOUTERRPLCT)
+#define F_T6_DISENCAPOUTERRPLCT    V_T6_DISENCAPOUTERRPLCT(1U)
+
+#define S_T6_DISENCAP    30
+#define V_T6_DISENCAP(x) ((x) << S_T6_DISENCAP)
+#define F_T6_DISENCAP    V_T6_DISENCAP(1U)
+
+#define S_T6_MULTILISTEN3    29
+#define V_T6_MULTILISTEN3(x) ((x) << S_T6_MULTILISTEN3)
+#define F_T6_MULTILISTEN3    V_T6_MULTILISTEN3(1U)
+
+#define S_T6_MULTILISTEN2    28
+#define V_T6_MULTILISTEN2(x) ((x) << S_T6_MULTILISTEN2)
+#define F_T6_MULTILISTEN2    V_T6_MULTILISTEN2(1U)
+
+#define S_T6_MULTILISTEN1    27
+#define V_T6_MULTILISTEN1(x) ((x) << S_T6_MULTILISTEN1)
+#define F_T6_MULTILISTEN1    V_T6_MULTILISTEN1(1U)
+
+#define S_T6_MULTILISTEN0    26
+#define V_T6_MULTILISTEN0(x) ((x) << S_T6_MULTILISTEN0)
+#define F_T6_MULTILISTEN0    V_T6_MULTILISTEN0(1U)
+
+#define S_T6_SRAM_PRIO3    23
+#define M_T6_SRAM_PRIO3    0x7U
+#define V_T6_SRAM_PRIO3(x) ((x) << S_T6_SRAM_PRIO3)
+#define G_T6_SRAM_PRIO3(x) (((x) >> S_T6_SRAM_PRIO3) & M_T6_SRAM_PRIO3)
+
+#define S_T6_SRAM_PRIO2    20
+#define M_T6_SRAM_PRIO2    0x7U
+#define V_T6_SRAM_PRIO2(x) ((x) << S_T6_SRAM_PRIO2)
+#define G_T6_SRAM_PRIO2(x) (((x) >> S_T6_SRAM_PRIO2) & M_T6_SRAM_PRIO2)
+
+#define S_T6_SRAM_PRIO1    17
+#define M_T6_SRAM_PRIO1    0x7U
+#define V_T6_SRAM_PRIO1(x) ((x) << S_T6_SRAM_PRIO1)
+#define G_T6_SRAM_PRIO1(x) (((x) >> S_T6_SRAM_PRIO1) & M_T6_SRAM_PRIO1)
+
+#define S_T6_SRAM_PRIO0    14
+#define M_T6_SRAM_PRIO0    0x7U
+#define V_T6_SRAM_PRIO0(x) ((x) << S_T6_SRAM_PRIO0)
+#define G_T6_SRAM_PRIO0(x) (((x) >> S_T6_SRAM_PRIO0) & M_T6_SRAM_PRIO0)
+
+#define S_T6_SRAM_VLD    13
+#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD)
+#define F_T6_SRAM_VLD    V_T6_SRAM_VLD(1U)
+
+#define S_T6_REPLICATE    12
+#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
+#define F_T6_REPLICATE    V_T6_REPLICATE(1U)
+
+#define S_T6_PF    9
+#define M_T6_PF    0x7U
+#define V_T6_PF(x) ((x) << S_T6_PF)
+#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
+
+#define S_T6_VF_VALID    8
+#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
+#define F_T6_VF_VALID    V_T6_VF_VALID(1U)
+
+#define S_T6_VF    0
+#define M_T6_VF    0xffU
+#define V_T6_VF(x) ((x) << S_T6_VF)
+#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
+
 #define A_MPS_CLS_SRAM_H 0xe004
 
 #define S_MACPARITY1    9
@@ -16062,7 +34571,14 @@
 #define V_PORTMAP(x) ((x) << S_PORTMAP)
 #define G_PORTMAP(x) (((x) >> S_PORTMAP) & M_PORTMAP)
 
+#define A_MPS_T5_CLS_SRAM_H 0xe004
+
+#define S_MACPARITY2    10
+#define V_MACPARITY2(x) ((x) << S_MACPARITY2)
+#define F_MACPARITY2    V_MACPARITY2(1U)
+
 #define A_MPS_CLS_TCAM_Y_L 0xf000
+#define A_MPS_CLS_TCAM_DATA0 0xf000
 #define A_MPS_CLS_TCAM_Y_H 0xf004
 
 #define S_TCAMYH    0
@@ -16070,7 +34586,65 @@
 #define V_TCAMYH(x) ((x) << S_TCAMYH)
 #define G_TCAMYH(x) (((x) >> S_TCAMYH) & M_TCAMYH)
 
+#define A_MPS_CLS_TCAM_DATA1 0xf004
+
+#define S_VIDL    16
+#define M_VIDL    0xffffU
+#define V_VIDL(x) ((x) << S_VIDL)
+#define G_VIDL(x) (((x) >> S_VIDL) & M_VIDL)
+
+#define S_DMACH    0
+#define M_DMACH    0xffffU
+#define V_DMACH(x) ((x) << S_DMACH)
+#define G_DMACH(x) (((x) >> S_DMACH) & M_DMACH)
+
 #define A_MPS_CLS_TCAM_X_L 0xf008
+#define A_MPS_CLS_TCAM_DATA2_CTL 0xf008
+
+#define S_CTLCMDTYPE    31
+#define V_CTLCMDTYPE(x) ((x) << S_CTLCMDTYPE)
+#define F_CTLCMDTYPE    V_CTLCMDTYPE(1U)
+
+#define S_CTLREQID    30
+#define V_CTLREQID(x) ((x) << S_CTLREQID)
+#define F_CTLREQID    V_CTLREQID(1U)
+
+#define S_CTLTCAMSEL    25
+#define V_CTLTCAMSEL(x) ((x) << S_CTLTCAMSEL)
+#define F_CTLTCAMSEL    V_CTLTCAMSEL(1U)
+
+#define S_CTLTCAMINDEX    17
+#define M_CTLTCAMINDEX    0xffU
+#define V_CTLTCAMINDEX(x) ((x) << S_CTLTCAMINDEX)
+#define G_CTLTCAMINDEX(x) (((x) >> S_CTLTCAMINDEX) & M_CTLTCAMINDEX)
+
+#define S_CTLXYBITSEL    16
+#define V_CTLXYBITSEL(x) ((x) << S_CTLXYBITSEL)
+#define F_CTLXYBITSEL    V_CTLXYBITSEL(1U)
+
+#define S_DATAPORTNUM    12
+#define M_DATAPORTNUM    0xfU
+#define V_DATAPORTNUM(x) ((x) << S_DATAPORTNUM)
+#define G_DATAPORTNUM(x) (((x) >> S_DATAPORTNUM) & M_DATAPORTNUM)
+
+#define S_DATALKPTYPE    10
+#define M_DATALKPTYPE    0x3U
+#define V_DATALKPTYPE(x) ((x) << S_DATALKPTYPE)
+#define G_DATALKPTYPE(x) (((x) >> S_DATALKPTYPE) & M_DATALKPTYPE)
+
+#define S_DATADIPHIT    8
+#define V_DATADIPHIT(x) ((x) << S_DATADIPHIT)
+#define F_DATADIPHIT    V_DATADIPHIT(1U)
+
+#define S_DATAVIDH2    7
+#define V_DATAVIDH2(x) ((x) << S_DATAVIDH2)
+#define F_DATAVIDH2    V_DATAVIDH2(1U)
+
+#define S_DATAVIDH1    0
+#define M_DATAVIDH1    0x7fU
+#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1)
+#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1)
+
 #define A_MPS_CLS_TCAM_X_H 0xf00c
 
 #define S_TCAMXH    0
@@ -16078,6 +34652,12 @@
 #define V_TCAMXH(x) ((x) << S_TCAMXH)
 #define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
 
+#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010
+#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014
+#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020
+#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024
+#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028
 #define A_MPS_RX_CTL 0x11000
 
 #define S_FILT_VLAN_SEL    17
@@ -16136,7 +34716,15 @@
 #define V_CNT(x) ((x) << S_CNT)
 #define G_CNT(x) (((x) >> S_CNT) & M_CNT)
 
+#define A_MPS_RX_FIFO_0_CTL 0x11008
+
+#define S_DEST_SELECT    0
+#define M_DEST_SELECT    0xfU
+#define V_DEST_SELECT(x) ((x) << S_DEST_SELECT)
+#define G_DEST_SELECT(x) (((x) >> S_DEST_SELECT) & M_DEST_SELECT)
+
 #define A_MPS_RX_PKT_FL 0x1100c
+#define A_MPS_RX_FIFO_1_CTL 0x1100c
 #define A_MPS_RX_PG_RSV0 0x11010
 
 #define S_CLR_INTR    31
@@ -16157,7 +34745,19 @@
 #define V_ALLOC(x) ((x) << S_ALLOC)
 #define G_ALLOC(x) (((x) >> S_ALLOC) & M_ALLOC)
 
+#define S_T5_USED    16
+#define M_T5_USED    0xfffU
+#define V_T5_USED(x) ((x) << S_T5_USED)
+#define G_T5_USED(x) (((x) >> S_T5_USED) & M_T5_USED)
+
+#define S_T5_ALLOC    0
+#define M_T5_ALLOC    0xfffU
+#define V_T5_ALLOC(x) ((x) << S_T5_ALLOC)
+#define G_T5_ALLOC(x) (((x) >> S_T5_ALLOC) & M_T5_ALLOC)
+
+#define A_MPS_RX_FIFO_2_CTL 0x11010
 #define A_MPS_RX_PG_RSV1 0x11014
+#define A_MPS_RX_FIFO_3_CTL 0x11014
 #define A_MPS_RX_PG_RSV2 0x11018
 #define A_MPS_RX_PG_RSV3 0x1101c
 #define A_MPS_RX_PG_RSV4 0x11020
@@ -16184,6 +34784,16 @@
 #define V_BORW(x) ((x) << S_BORW)
 #define G_BORW(x) (((x) >> S_BORW) & M_BORW)
 
+#define S_T5_MAX    16
+#define M_T5_MAX    0xfffU
+#define V_T5_MAX(x) ((x) << S_T5_MAX)
+#define G_T5_MAX(x) (((x) >> S_T5_MAX) & M_T5_MAX)
+
+#define S_T5_BORW    0
+#define M_T5_BORW    0xfffU
+#define V_T5_BORW(x) ((x) << S_T5_BORW)
+#define G_T5_BORW(x) (((x) >> S_T5_BORW) & M_T5_BORW)
+
 #define A_MPS_RX_PG_SHR_BG1 0x11034
 #define A_MPS_RX_PG_SHR_BG2 0x11038
 #define A_MPS_RX_PG_SHR_BG3 0x1103c
@@ -16199,6 +34809,16 @@
 #define V_SHR_USED(x) ((x) << S_SHR_USED)
 #define G_SHR_USED(x) (((x) >> S_SHR_USED) & M_SHR_USED)
 
+#define S_T5_QUOTA    16
+#define M_T5_QUOTA    0xfffU
+#define V_T5_QUOTA(x) ((x) << S_T5_QUOTA)
+#define G_T5_QUOTA(x) (((x) >> S_T5_QUOTA) & M_T5_QUOTA)
+
+#define S_T5_SHR_USED    0
+#define M_T5_SHR_USED    0xfffU
+#define V_T5_SHR_USED(x) ((x) << S_T5_SHR_USED)
+#define G_T5_SHR_USED(x) (((x) >> S_T5_SHR_USED) & M_T5_SHR_USED)
+
 #define A_MPS_RX_PG_SHR1 0x11044
 #define A_MPS_RX_PG_HYST_BG0 0x11048
 
@@ -16207,6 +34827,16 @@
 #define V_TH(x) ((x) << S_TH)
 #define G_TH(x) (((x) >> S_TH) & M_TH)
 
+#define S_T5_TH    0
+#define M_T5_TH    0xfffU
+#define V_T5_TH(x) ((x) << S_T5_TH)
+#define G_T5_TH(x) (((x) >> S_T5_TH) & M_T5_TH)
+
+#define S_T6_TH    0
+#define M_T6_TH    0x7ffU
+#define V_T6_TH(x) ((x) << S_T6_TH)
+#define G_T6_TH(x) (((x) >> S_T6_TH) & M_T6_TH)
+
 #define A_MPS_RX_PG_HYST_BG1 0x1104c
 #define A_MPS_RX_PG_HYST_BG2 0x11050
 #define A_MPS_RX_PG_HYST_BG3 0x11054
@@ -16353,8 +34983,22 @@
 #define V_CDM0(x) ((x) << S_CDM0)
 #define F_CDM0    V_CDM0(1U)
 
+#define S_T6_INT_ERR_INT    24
+#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
+#define F_T6_INT_ERR_INT    V_T6_INT_ERR_INT(1U)
+
 #define A_MPS_RX_PERR_INT_ENABLE 0x11078
+
+#define S_T6_INT_ERR_INT    24
+#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
+#define F_T6_INT_ERR_INT    V_T6_INT_ERR_INT(1U)
+
 #define A_MPS_RX_PERR_ENABLE 0x1107c
+
+#define S_T6_INT_ERR_INT    24
+#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
+#define F_T6_INT_ERR_INT    V_T6_INT_ERR_INT(1U)
+
 #define A_MPS_RX_PERR_INJECT 0x11080
 #define A_MPS_RX_FUNC_INT_CAUSE 0x11084
 
@@ -16395,6 +35039,34 @@
 #define V_PG_TH_INT0(x) ((x) << S_PG_TH_INT0)
 #define F_PG_TH_INT0    V_PG_TH_INT0(1U)
 
+#define S_MTU_ERR_INT3    19
+#define V_MTU_ERR_INT3(x) ((x) << S_MTU_ERR_INT3)
+#define F_MTU_ERR_INT3    V_MTU_ERR_INT3(1U)
+
+#define S_MTU_ERR_INT2    18
+#define V_MTU_ERR_INT2(x) ((x) << S_MTU_ERR_INT2)
+#define F_MTU_ERR_INT2    V_MTU_ERR_INT2(1U)
+
+#define S_MTU_ERR_INT1    17
+#define V_MTU_ERR_INT1(x) ((x) << S_MTU_ERR_INT1)
+#define F_MTU_ERR_INT1    V_MTU_ERR_INT1(1U)
+
+#define S_MTU_ERR_INT0    16
+#define V_MTU_ERR_INT0(x) ((x) << S_MTU_ERR_INT0)
+#define F_MTU_ERR_INT0    V_MTU_ERR_INT0(1U)
+
+#define S_SE_CNT_ERR_INT    15
+#define V_SE_CNT_ERR_INT(x) ((x) << S_SE_CNT_ERR_INT)
+#define F_SE_CNT_ERR_INT    V_SE_CNT_ERR_INT(1U)
+
+#define S_FRM_ERR_INT    14
+#define V_FRM_ERR_INT(x) ((x) << S_FRM_ERR_INT)
+#define F_FRM_ERR_INT    V_FRM_ERR_INT(1U)
+
+#define S_LEN_ERR_INT    13
+#define V_LEN_ERR_INT(x) ((x) << S_LEN_ERR_INT)
+#define F_LEN_ERR_INT    V_LEN_ERR_INT(1U)
+
 #define A_MPS_RX_FUNC_INT_ENABLE 0x11088
 #define A_MPS_RX_PAUSE_GEN_TH_0 0x1108c
 
@@ -16411,6 +35083,12 @@
 #define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
 #define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
 #define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
+#define A_MPS_RX_REPL_CTL 0x11098
+
+#define S_INDEX_SEL    0
+#define V_INDEX_SEL(x) ((x) << S_INDEX_SEL)
+#define F_INDEX_SEL    V_INDEX_SEL(1U)
+
 #define A_MPS_RX_PPP_ATRB 0x1109c
 
 #define S_ETYPE    16
@@ -16446,7 +35124,9 @@
 #define A_MPS_RX_PT_ARB1 0x110ac
 #define A_MPS_RX_PT_ARB2 0x110b0
 #define A_MPS_RX_PT_ARB3 0x110b4
+#define A_T6_MPS_PF_OUT_EN 0x110b4
 #define A_MPS_RX_PT_ARB4 0x110b8
+#define A_T6_MPS_BMC_MTU 0x110b8
 #define A_MPS_PF_OUT_EN 0x110bc
 
 #define S_OUTEN    0
@@ -16454,6 +35134,7 @@
 #define V_OUTEN(x) ((x) << S_OUTEN)
 #define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
 
+#define A_T6_MPS_BMC_PKT_CNT 0x110bc
 #define A_MPS_BMC_MTU 0x110c0
 
 #define S_MTU    0
@@ -16461,8 +35142,22 @@
 #define V_MTU(x) ((x) << S_MTU)
 #define G_MTU(x) (((x) >> S_MTU) & M_MTU)
 
+#define A_T6_MPS_BMC_BYTE_CNT 0x110c0
 #define A_MPS_BMC_PKT_CNT 0x110c4
+#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4
+
+#define S_T6_PFVF    0
+#define M_T6_PFVF    0x1ffU
+#define V_T6_PFVF(x) ((x) << S_T6_PFVF)
+#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF)
+
 #define A_MPS_BMC_BYTE_CNT 0x110c8
+#define A_T6_MPS_PFVF_ATRB 0x110c8
+
+#define S_FULL_FRAME_MODE    14
+#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE)
+#define F_FULL_FRAME_MODE    V_FULL_FRAME_MODE(1U)
+
 #define A_MPS_PFVF_ATRB_CTL 0x110cc
 
 #define S_RD_WRN    31
@@ -16474,6 +35169,7 @@
 #define V_PFVF(x) ((x) << S_PFVF)
 #define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
 
+#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc
 #define A_MPS_PFVF_ATRB 0x110d0
 
 #define S_ATTR_PF    28
@@ -16493,6 +35189,7 @@
 #define V_ATTR_MODE(x) ((x) << S_ATTR_MODE)
 #define F_ATTR_MODE    V_ATTR_MODE(1U)
 
+#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0
 #define A_MPS_PFVF_ATRB_FLTR0 0x110d4
 
 #define S_VLAN_EN    16
@@ -16504,21 +35201,37 @@
 #define V_VLAN_ID(x) ((x) << S_VLAN_ID)
 #define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
 
+#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4
 #define A_MPS_PFVF_ATRB_FLTR1 0x110d8
+#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8
 #define A_MPS_PFVF_ATRB_FLTR2 0x110dc
+#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc
 #define A_MPS_PFVF_ATRB_FLTR3 0x110e0
+#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0
 #define A_MPS_PFVF_ATRB_FLTR4 0x110e4
+#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4
 #define A_MPS_PFVF_ATRB_FLTR5 0x110e8
+#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8
 #define A_MPS_PFVF_ATRB_FLTR6 0x110ec
+#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec
 #define A_MPS_PFVF_ATRB_FLTR7 0x110f0
+#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0
 #define A_MPS_PFVF_ATRB_FLTR8 0x110f4
+#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4
 #define A_MPS_PFVF_ATRB_FLTR9 0x110f8
+#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8
 #define A_MPS_PFVF_ATRB_FLTR10 0x110fc
+#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc
 #define A_MPS_PFVF_ATRB_FLTR11 0x11100
+#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100
 #define A_MPS_PFVF_ATRB_FLTR12 0x11104
+#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104
 #define A_MPS_PFVF_ATRB_FLTR13 0x11108
+#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108
 #define A_MPS_PFVF_ATRB_FLTR14 0x1110c
+#define A_T6_MPS_RPLC_MAP_CTL 0x1110c
 #define A_MPS_PFVF_ATRB_FLTR15 0x11110
+#define A_T6_MPS_PF_RPLCT_MAP 0x11110
 #define A_MPS_RPLC_MAP_CTL 0x11114
 
 #define S_RPLC_MAP_ADDR    0
@@ -16526,6 +35239,7 @@
 #define V_RPLC_MAP_ADDR(x) ((x) << S_RPLC_MAP_ADDR)
 #define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
 
+#define A_T6_MPS_VF_RPLCT_MAP0 0x11114
 #define A_MPS_PF_RPLCT_MAP 0x11118
 
 #define S_PF_EN    0
@@ -16533,8 +35247,11 @@
 #define V_PF_EN(x) ((x) << S_PF_EN)
 #define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
 
+#define A_T6_MPS_VF_RPLCT_MAP1 0x11118
 #define A_MPS_VF_RPLCT_MAP0 0x1111c
+#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c
 #define A_MPS_VF_RPLCT_MAP1 0x11120
+#define A_T6_MPS_VF_RPLCT_MAP3 0x11120
 #define A_MPS_VF_RPLCT_MAP2 0x11124
 #define A_MPS_VF_RPLCT_MAP3 0x11128
 #define A_MPS_MEM_DBG_CTL 0x1112c
@@ -16752,7 +35469,427 @@
 #define G_MAC_CNT3(x) (((x) >> S_MAC_CNT3) & M_MAC_CNT3)
 
 #define A_MPS_RX_SPARE 0x11190
+#define A_MPS_RX_PTP_ETYPE 0x11194
 
+#define S_PETYPE2    16
+#define M_PETYPE2    0xffffU
+#define V_PETYPE2(x) ((x) << S_PETYPE2)
+#define G_PETYPE2(x) (((x) >> S_PETYPE2) & M_PETYPE2)
+
+#define S_PETYPE1    0
+#define M_PETYPE1    0xffffU
+#define V_PETYPE1(x) ((x) << S_PETYPE1)
+#define G_PETYPE1(x) (((x) >> S_PETYPE1) & M_PETYPE1)
+
+#define A_MPS_RX_PTP_TCP 0x11198
+
+#define S_PTCPORT2    16
+#define M_PTCPORT2    0xffffU
+#define V_PTCPORT2(x) ((x) << S_PTCPORT2)
+#define G_PTCPORT2(x) (((x) >> S_PTCPORT2) & M_PTCPORT2)
+
+#define S_PTCPORT1    0
+#define M_PTCPORT1    0xffffU
+#define V_PTCPORT1(x) ((x) << S_PTCPORT1)
+#define G_PTCPORT1(x) (((x) >> S_PTCPORT1) & M_PTCPORT1)
+
+#define A_MPS_RX_PTP_UDP 0x1119c
+
+#define S_PUDPORT2    16
+#define M_PUDPORT2    0xffffU
+#define V_PUDPORT2(x) ((x) << S_PUDPORT2)
+#define G_PUDPORT2(x) (((x) >> S_PUDPORT2) & M_PUDPORT2)
+
+#define S_PUDPORT1    0
+#define M_PUDPORT1    0xffffU
+#define V_PUDPORT1(x) ((x) << S_PUDPORT1)
+#define G_PUDPORT1(x) (((x) >> S_PUDPORT1) & M_PUDPORT1)
+
+#define A_MPS_RX_PTP_CTL 0x111a0
+
+#define S_MIN_PTP_SPACE    24
+#define M_MIN_PTP_SPACE    0x7fU
+#define V_MIN_PTP_SPACE(x) ((x) << S_MIN_PTP_SPACE)
+#define G_MIN_PTP_SPACE(x) (((x) >> S_MIN_PTP_SPACE) & M_MIN_PTP_SPACE)
+
+#define S_PUDP2EN    20
+#define M_PUDP2EN    0xfU
+#define V_PUDP2EN(x) ((x) << S_PUDP2EN)
+#define G_PUDP2EN(x) (((x) >> S_PUDP2EN) & M_PUDP2EN)
+
+#define S_PUDP1EN    16
+#define M_PUDP1EN    0xfU
+#define V_PUDP1EN(x) ((x) << S_PUDP1EN)
+#define G_PUDP1EN(x) (((x) >> S_PUDP1EN) & M_PUDP1EN)
+
+#define S_PTCP2EN    12
+#define M_PTCP2EN    0xfU
+#define V_PTCP2EN(x) ((x) << S_PTCP2EN)
+#define G_PTCP2EN(x) (((x) >> S_PTCP2EN) & M_PTCP2EN)
+
+#define S_PTCP1EN    8
+#define M_PTCP1EN    0xfU
+#define V_PTCP1EN(x) ((x) << S_PTCP1EN)
+#define G_PTCP1EN(x) (((x) >> S_PTCP1EN) & M_PTCP1EN)
+
+#define S_PETYPE2EN    4
+#define M_PETYPE2EN    0xfU
+#define V_PETYPE2EN(x) ((x) << S_PETYPE2EN)
+#define G_PETYPE2EN(x) (((x) >> S_PETYPE2EN) & M_PETYPE2EN)
+
+#define S_PETYPE1EN    0
+#define M_PETYPE1EN    0xfU
+#define V_PETYPE1EN(x) ((x) << S_PETYPE1EN)
+#define G_PETYPE1EN(x) (((x) >> S_PETYPE1EN) & M_PETYPE1EN)
+
+#define A_MPS_RX_PAUSE_GEN_TH_0_0 0x111a4
+#define A_MPS_RX_PAUSE_GEN_TH_0_1 0x111a8
+#define A_MPS_RX_PAUSE_GEN_TH_0_2 0x111ac
+#define A_MPS_RX_PAUSE_GEN_TH_0_3 0x111b0
+#define A_MPS_RX_PAUSE_GEN_TH_1_0 0x111b4
+#define A_MPS_RX_PAUSE_GEN_TH_1_1 0x111b8
+#define A_MPS_RX_PAUSE_GEN_TH_1_2 0x111bc
+#define A_MPS_RX_PAUSE_GEN_TH_1_3 0x111c0
+#define A_MPS_RX_PAUSE_GEN_TH_2_0 0x111c4
+#define A_MPS_RX_PAUSE_GEN_TH_2_1 0x111c8
+#define A_MPS_RX_PAUSE_GEN_TH_2_2 0x111cc
+#define A_MPS_RX_PAUSE_GEN_TH_2_3 0x111d0
+#define A_MPS_RX_PAUSE_GEN_TH_3_0 0x111d4
+#define A_MPS_RX_PAUSE_GEN_TH_3_1 0x111d8
+#define A_MPS_RX_PAUSE_GEN_TH_3_2 0x111dc
+#define A_MPS_RX_PAUSE_GEN_TH_3_3 0x111e0
+#define A_MPS_RX_MAC_CLS_DROP_CNT0 0x111e4
+#define A_MPS_RX_MAC_CLS_DROP_CNT1 0x111e8
+#define A_MPS_RX_MAC_CLS_DROP_CNT2 0x111ec
+#define A_MPS_RX_MAC_CLS_DROP_CNT3 0x111f0
+#define A_MPS_RX_LPBK_CLS_DROP_CNT0 0x111f4
+#define A_MPS_RX_LPBK_CLS_DROP_CNT1 0x111f8
+#define A_MPS_RX_LPBK_CLS_DROP_CNT2 0x111fc
+#define A_MPS_RX_LPBK_CLS_DROP_CNT3 0x11200
+#define A_MPS_RX_CGEN 0x11204
+
+#define S_MPS_RX_CGEN_NCSI    12
+#define V_MPS_RX_CGEN_NCSI(x) ((x) << S_MPS_RX_CGEN_NCSI)
+#define F_MPS_RX_CGEN_NCSI    V_MPS_RX_CGEN_NCSI(1U)
+
+#define S_MPS_RX_CGEN_OUT    8
+#define M_MPS_RX_CGEN_OUT    0xfU
+#define V_MPS_RX_CGEN_OUT(x) ((x) << S_MPS_RX_CGEN_OUT)
+#define G_MPS_RX_CGEN_OUT(x) (((x) >> S_MPS_RX_CGEN_OUT) & M_MPS_RX_CGEN_OUT)
+
+#define S_MPS_RX_CGEN_LPBK_IN    4
+#define M_MPS_RX_CGEN_LPBK_IN    0xfU
+#define V_MPS_RX_CGEN_LPBK_IN(x) ((x) << S_MPS_RX_CGEN_LPBK_IN)
+#define G_MPS_RX_CGEN_LPBK_IN(x) (((x) >> S_MPS_RX_CGEN_LPBK_IN) & M_MPS_RX_CGEN_LPBK_IN)
+
+#define S_MPS_RX_CGEN_MAC_IN    0
+#define M_MPS_RX_CGEN_MAC_IN    0xfU
+#define V_MPS_RX_CGEN_MAC_IN(x) ((x) << S_MPS_RX_CGEN_MAC_IN)
+#define G_MPS_RX_CGEN_MAC_IN(x) (((x) >> S_MPS_RX_CGEN_MAC_IN) & M_MPS_RX_CGEN_MAC_IN)
+
+#define A_MPS_RX_MAC_BG_PG_CNT0 0x11208
+
+#define S_MAC_USED    16
+#define M_MAC_USED    0x7ffU
+#define V_MAC_USED(x) ((x) << S_MAC_USED)
+#define G_MAC_USED(x) (((x) >> S_MAC_USED) & M_MAC_USED)
+
+#define S_MAC_ALLOC    0
+#define M_MAC_ALLOC    0x7ffU
+#define V_MAC_ALLOC(x) ((x) << S_MAC_ALLOC)
+#define G_MAC_ALLOC(x) (((x) >> S_MAC_ALLOC) & M_MAC_ALLOC)
+
+#define A_MPS_RX_MAC_BG_PG_CNT1 0x1120c
+#define A_MPS_RX_MAC_BG_PG_CNT2 0x11210
+#define A_MPS_RX_MAC_BG_PG_CNT3 0x11214
+#define A_MPS_RX_LPBK_BG_PG_CNT0 0x11218
+
+#define S_LPBK_USED    16
+#define M_LPBK_USED    0x7ffU
+#define V_LPBK_USED(x) ((x) << S_LPBK_USED)
+#define G_LPBK_USED(x) (((x) >> S_LPBK_USED) & M_LPBK_USED)
+
+#define S_LPBK_ALLOC    0
+#define M_LPBK_ALLOC    0x7ffU
+#define V_LPBK_ALLOC(x) ((x) << S_LPBK_ALLOC)
+#define G_LPBK_ALLOC(x) (((x) >> S_LPBK_ALLOC) & M_LPBK_ALLOC)
+
+#define A_MPS_RX_LPBK_BG_PG_CNT1 0x1121c
+#define A_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11220
+
+#define S_CONG_EN    31
+#define V_CONG_EN(x) ((x) << S_CONG_EN)
+#define F_CONG_EN    V_CONG_EN(1U)
+
+#define S_CONG_TH    0
+#define M_CONG_TH    0xfffffU
+#define V_CONG_TH(x) ((x) << S_CONG_TH)
+#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH)
+
+#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224
+#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228
+#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c
+#define A_MPS_RX_GRE_PROT_TYPE 0x11230
+
+#define S_NVGRE_EN    9
+#define V_NVGRE_EN(x) ((x) << S_NVGRE_EN)
+#define F_NVGRE_EN    V_NVGRE_EN(1U)
+
+#define S_GRE_EN    8
+#define V_GRE_EN(x) ((x) << S_GRE_EN)
+#define F_GRE_EN    V_GRE_EN(1U)
+
+#define S_GRE    0
+#define M_GRE    0xffU
+#define V_GRE(x) ((x) << S_GRE)
+#define G_GRE(x) (((x) >> S_GRE) & M_GRE)
+
+#define A_MPS_RX_VXLAN_TYPE 0x11234
+
+#define S_VXLAN_EN    16
+#define V_VXLAN_EN(x) ((x) << S_VXLAN_EN)
+#define F_VXLAN_EN    V_VXLAN_EN(1U)
+
+#define S_VXLAN    0
+#define M_VXLAN    0xffffU
+#define V_VXLAN(x) ((x) << S_VXLAN)
+#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN)
+
+#define A_MPS_RX_GENEVE_TYPE 0x11238
+
+#define S_GENEVE_EN    16
+#define V_GENEVE_EN(x) ((x) << S_GENEVE_EN)
+#define F_GENEVE_EN    V_GENEVE_EN(1U)
+
+#define S_GENEVE    0
+#define M_GENEVE    0xffffU
+#define V_GENEVE(x) ((x) << S_GENEVE)
+#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE)
+
+#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c
+
+#define S_T6_IVLAN_EN    16
+#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN)
+#define F_T6_IVLAN_EN    V_T6_IVLAN_EN(1U)
+
+#define A_MPS_RX_ENCAP_NVGRE 0x11240
+
+#define S_ETYPE_EN    16
+#define V_ETYPE_EN(x) ((x) << S_ETYPE_EN)
+#define F_ETYPE_EN    V_ETYPE_EN(1U)
+
+#define S_T6_ETYPE    0
+#define M_T6_ETYPE    0xffffU
+#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
+#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+
+#define A_MPS_RX_ENCAP_GENEVE 0x11244
+
+#define S_T6_ETYPE    0
+#define M_T6_ETYPE    0xffffU
+#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
+#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+
+#define A_MPS_RX_TCP 0x11248
+
+#define S_PROT_TYPE_EN    8
+#define V_PROT_TYPE_EN(x) ((x) << S_PROT_TYPE_EN)
+#define F_PROT_TYPE_EN    V_PROT_TYPE_EN(1U)
+
+#define S_PROT_TYPE    0
+#define M_PROT_TYPE    0xffU
+#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE)
+#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE)
+
+#define A_MPS_RX_UDP 0x1124c
+#define A_MPS_RX_PAUSE 0x11250
+#define A_MPS_RX_LENGTH 0x11254
+
+#define S_SAP_VALUE    16
+#define M_SAP_VALUE    0xffffU
+#define V_SAP_VALUE(x) ((x) << S_SAP_VALUE)
+#define G_SAP_VALUE(x) (((x) >> S_SAP_VALUE) & M_SAP_VALUE)
+
+#define S_LENGTH_ETYPE    0
+#define M_LENGTH_ETYPE    0xffffU
+#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE)
+#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE)
+
+#define A_MPS_RX_CTL_ORG 0x11258
+
+#define S_CTL_VALUE    24
+#define M_CTL_VALUE    0xffU
+#define V_CTL_VALUE(x) ((x) << S_CTL_VALUE)
+#define G_CTL_VALUE(x) (((x) >> S_CTL_VALUE) & M_CTL_VALUE)
+
+#define S_ORG_VALUE    0
+#define M_ORG_VALUE    0xffffffU
+#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE)
+#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE)
+
+#define A_MPS_RX_IPV4 0x1125c
+
+#define S_ETYPE_IPV4    0
+#define M_ETYPE_IPV4    0xffffU
+#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4)
+#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4)
+
+#define A_MPS_RX_IPV6 0x11260
+
+#define S_ETYPE_IPV6    0
+#define M_ETYPE_IPV6    0xffffU
+#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6)
+#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6)
+
+#define A_MPS_RX_TTL 0x11264
+
+#define S_TTL_IPV4    10
+#define M_TTL_IPV4    0xffU
+#define V_TTL_IPV4(x) ((x) << S_TTL_IPV4)
+#define G_TTL_IPV4(x) (((x) >> S_TTL_IPV4) & M_TTL_IPV4)
+
+#define S_TTL_IPV6    2
+#define M_TTL_IPV6    0xffU
+#define V_TTL_IPV6(x) ((x) << S_TTL_IPV6)
+#define G_TTL_IPV6(x) (((x) >> S_TTL_IPV6) & M_TTL_IPV6)
+
+#define S_TTL_CHK_EN_IPV4    1
+#define V_TTL_CHK_EN_IPV4(x) ((x) << S_TTL_CHK_EN_IPV4)
+#define F_TTL_CHK_EN_IPV4    V_TTL_CHK_EN_IPV4(1U)
+
+#define S_TTL_CHK_EN_IPV6    0
+#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6)
+#define F_TTL_CHK_EN_IPV6    V_TTL_CHK_EN_IPV6(1U)
+
+#define A_MPS_RX_DEFAULT_VNI 0x11268
+
+#define S_VNI    0
+#define M_VNI    0xffffffU
+#define V_VNI(x) ((x) << S_VNI)
+#define G_VNI(x) (((x) >> S_VNI) & M_VNI)
+
+#define A_MPS_RX_PRS_CTL 0x1126c
+
+#define S_CTL_CHK_EN    28
+#define V_CTL_CHK_EN(x) ((x) << S_CTL_CHK_EN)
+#define F_CTL_CHK_EN    V_CTL_CHK_EN(1U)
+
+#define S_ORG_CHK_EN    27
+#define V_ORG_CHK_EN(x) ((x) << S_ORG_CHK_EN)
+#define F_ORG_CHK_EN    V_ORG_CHK_EN(1U)
+
+#define S_SAP_CHK_EN    26
+#define V_SAP_CHK_EN(x) ((x) << S_SAP_CHK_EN)
+#define F_SAP_CHK_EN    V_SAP_CHK_EN(1U)
+
+#define S_VXLAN_FLAG_CHK_EN    25
+#define V_VXLAN_FLAG_CHK_EN(x) ((x) << S_VXLAN_FLAG_CHK_EN)
+#define F_VXLAN_FLAG_CHK_EN    V_VXLAN_FLAG_CHK_EN(1U)
+
+#define S_VXLAN_FLAG_MASK    17
+#define M_VXLAN_FLAG_MASK    0xffU
+#define V_VXLAN_FLAG_MASK(x) ((x) << S_VXLAN_FLAG_MASK)
+#define G_VXLAN_FLAG_MASK(x) (((x) >> S_VXLAN_FLAG_MASK) & M_VXLAN_FLAG_MASK)
+
+#define S_VXLAN_FLAG    9
+#define M_VXLAN_FLAG    0xffU
+#define V_VXLAN_FLAG(x) ((x) << S_VXLAN_FLAG)
+#define G_VXLAN_FLAG(x) (((x) >> S_VXLAN_FLAG) & M_VXLAN_FLAG)
+
+#define S_GRE_VER_CHK_EN    8
+#define V_GRE_VER_CHK_EN(x) ((x) << S_GRE_VER_CHK_EN)
+#define F_GRE_VER_CHK_EN    V_GRE_VER_CHK_EN(1U)
+
+#define S_GRE_VER    5
+#define M_GRE_VER    0x7U
+#define V_GRE_VER(x) ((x) << S_GRE_VER)
+#define G_GRE_VER(x) (((x) >> S_GRE_VER) & M_GRE_VER)
+
+#define S_GENEVE_VER_CHK_EN    4
+#define V_GENEVE_VER_CHK_EN(x) ((x) << S_GENEVE_VER_CHK_EN)
+#define F_GENEVE_VER_CHK_EN    V_GENEVE_VER_CHK_EN(1U)
+
+#define S_GENEVE_VER    2
+#define M_GENEVE_VER    0x3U
+#define V_GENEVE_VER(x) ((x) << S_GENEVE_VER)
+#define G_GENEVE_VER(x) (((x) >> S_GENEVE_VER) & M_GENEVE_VER)
+
+#define S_DIP_EN    1
+#define V_DIP_EN(x) ((x) << S_DIP_EN)
+#define F_DIP_EN    V_DIP_EN(1U)
+
+#define A_MPS_RX_PRS_CTL_2 0x11270
+
+#define S_EN_UDP_CSUM_CHK    4
+#define V_EN_UDP_CSUM_CHK(x) ((x) << S_EN_UDP_CSUM_CHK)
+#define F_EN_UDP_CSUM_CHK    V_EN_UDP_CSUM_CHK(1U)
+
+#define S_EN_UDP_LEN_CHK    3
+#define V_EN_UDP_LEN_CHK(x) ((x) << S_EN_UDP_LEN_CHK)
+#define F_EN_UDP_LEN_CHK    V_EN_UDP_LEN_CHK(1U)
+
+#define S_EN_IP_CSUM_CHK    2
+#define V_EN_IP_CSUM_CHK(x) ((x) << S_EN_IP_CSUM_CHK)
+#define F_EN_IP_CSUM_CHK    V_EN_IP_CSUM_CHK(1U)
+
+#define S_EN_IP_PAYLOAD_LEN_CHK    1
+#define V_EN_IP_PAYLOAD_LEN_CHK(x) ((x) << S_EN_IP_PAYLOAD_LEN_CHK)
+#define F_EN_IP_PAYLOAD_LEN_CHK    V_EN_IP_PAYLOAD_LEN_CHK(1U)
+
+#define S_T6_IPV6_UDP_CSUM_COMPAT    0
+#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT)
+#define F_T6_IPV6_UDP_CSUM_COMPAT    V_T6_IPV6_UDP_CSUM_COMPAT(1U)
+
+#define A_MPS_RX_MPS2NCSI_CNT 0x11274
+#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278
+
+#define S_T6_LEN    0
+#define M_T6_LEN    0x1ffU
+#define V_T6_LEN(x) ((x) << S_T6_LEN)
+#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN)
+
+#define A_MPS_RX_PAUSE_DA_H 0x1127c
+#define A_MPS_RX_PAUSE_DA_L 0x11280
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0
+#define A_MPS_VF_RPLCT_MAP4 0x11300
+#define A_MPS_VF_RPLCT_MAP5 0x11304
+#define A_MPS_VF_RPLCT_MAP6 0x11308
+#define A_MPS_VF_RPLCT_MAP7 0x1130c
+#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000
+#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004
+#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020
+#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024
+#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028
+#define A_MPS_CLS_DIPIPV6ID_3_TABLE 0x1202c
+#define A_MPS_CLS_DIPIPV6MASK_0_TABLE 0x12030
+#define A_MPS_CLS_DIPIPV6MASK_1_TABLE 0x12034
+#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038
+#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c
+#define A_MPS_RX_HASH_LKP_TABLE 0x12060
+
 /* registers for module CPL_SWITCH */
 #define CPL_SWITCH_BASE_ADDR 0x19040
 
@@ -16787,6 +35924,10 @@
 #define V_CIM_ENABLE(x) ((x) << S_CIM_ENABLE)
 #define F_CIM_ENABLE    V_CIM_ENABLE(1U)
 
+#define S_CIM_SPLIT_ENABLE    6
+#define V_CIM_SPLIT_ENABLE(x) ((x) << S_CIM_SPLIT_ENABLE)
+#define F_CIM_SPLIT_ENABLE    V_CIM_SPLIT_ENABLE(1U)
+
 #define A_CPL_SWITCH_TBL_IDX 0x19044
 
 #define S_SWITCH_TBL_IDX    0
@@ -16833,6 +35974,14 @@
 #define V_ZERO_SWITCH_ERROR(x) ((x) << S_ZERO_SWITCH_ERROR)
 #define F_ZERO_SWITCH_ERROR    V_ZERO_SWITCH_ERROR(1U)
 
+#define S_PERR_CPL_128TO128_1    7
+#define V_PERR_CPL_128TO128_1(x) ((x) << S_PERR_CPL_128TO128_1)
+#define F_PERR_CPL_128TO128_1    V_PERR_CPL_128TO128_1(1U)
+
+#define S_PERR_CPL_128TO128_0    6
+#define V_PERR_CPL_128TO128_0(x) ((x) << S_PERR_CPL_128TO128_0)
+#define F_PERR_CPL_128TO128_0    V_PERR_CPL_128TO128_0(1U)
+
 #define A_CPL_INTR_CAUSE 0x19054
 #define A_CPL_MAP_TBL_IDX 0x19058
 
@@ -16841,6 +35990,10 @@
 #define V_MAP_TBL_IDX(x) ((x) << S_MAP_TBL_IDX)
 #define G_MAP_TBL_IDX(x) (((x) >> S_MAP_TBL_IDX) & M_MAP_TBL_IDX)
 
+#define S_CIM_SPLIT_OPCODE_PROGRAM    8
+#define V_CIM_SPLIT_OPCODE_PROGRAM(x) ((x) << S_CIM_SPLIT_OPCODE_PROGRAM)
+#define F_CIM_SPLIT_OPCODE_PROGRAM    V_CIM_SPLIT_OPCODE_PROGRAM(1U)
+
 #define A_CPL_MAP_TBL_DATA 0x1905c
 
 #define S_MAP_TBL_DATA    0
@@ -17194,6 +36347,18 @@
 #define V_SLVFIFOPERREN(x) ((x) << S_SLVFIFOPERREN)
 #define F_SLVFIFOPERREN    V_SLVFIFOPERREN(1U)
 
+#define S_MSTTXFIFO    21
+#define V_MSTTXFIFO(x) ((x) << S_MSTTXFIFO)
+#define F_MSTTXFIFO    V_MSTTXFIFO(1U)
+
+#define S_MSTRXFIFO    19
+#define V_MSTRXFIFO(x) ((x) << S_MSTRXFIFO)
+#define F_MSTRXFIFO    V_MSTRXFIFO(1U)
+
+#define S_SLVFIFO    18
+#define V_SLVFIFO(x) ((x) << S_SLVFIFO)
+#define F_SLVFIFO    V_SLVFIFO(1U)
+
 #define A_SMB_PERR_INJ 0x1909c
 
 #define S_MSTTXINJDATAERR    3
@@ -17381,6 +36546,20 @@
 #define V_MICROCNTCLKCFG(x) ((x) << S_MICROCNTCLKCFG)
 #define G_MICROCNTCLKCFG(x) (((x) >> S_MICROCNTCLKCFG) & M_MICROCNTCLKCFG)
 
+#define A_SMB_CTL_STATUS 0x190e8
+
+#define S_MSTBUSBUSY    2
+#define V_MSTBUSBUSY(x) ((x) << S_MSTBUSBUSY)
+#define F_MSTBUSBUSY    V_MSTBUSBUSY(1U)
+
+#define S_SLVBUSBUSY    1
+#define V_SLVBUSBUSY(x) ((x) << S_SLVBUSBUSY)
+#define F_SLVBUSBUSY    V_SLVBUSBUSY(1U)
+
+#define S_BUSBUSY    0
+#define V_BUSBUSY(x) ((x) << S_BUSBUSY)
+#define F_BUSBUSY    V_BUSBUSY(1U)
+
 /* registers for module I2CM */
 #define I2CM_BASE_ADDR 0x190f0
 
@@ -17391,6 +36570,11 @@
 #define V_I2C_CLKDIV(x) ((x) << S_I2C_CLKDIV)
 #define G_I2C_CLKDIV(x) (((x) >> S_I2C_CLKDIV) & M_I2C_CLKDIV)
 
+#define S_I2C_CLKDIV16B    0
+#define M_I2C_CLKDIV16B    0xffffU
+#define V_I2C_CLKDIV16B(x) ((x) << S_I2C_CLKDIV16B)
+#define G_I2C_CLKDIV16B(x) (((x) >> S_I2C_CLKDIV16B) & M_I2C_CLKDIV16B)
+
 #define A_I2CM_DATA 0x190f4
 
 #define S_I2C_DATA    0
@@ -17536,6 +36720,50 @@
 #define V_INITPOWERMODE(x) ((x) << S_INITPOWERMODE)
 #define G_INITPOWERMODE(x) (((x) >> S_INITPOWERMODE) & M_INITPOWERMODE)
 
+#define S_SGE_PART_CGEN    19
+#define V_SGE_PART_CGEN(x) ((x) << S_SGE_PART_CGEN)
+#define F_SGE_PART_CGEN    V_SGE_PART_CGEN(1U)
+
+#define S_PDP_PART_CGEN    18
+#define V_PDP_PART_CGEN(x) ((x) << S_PDP_PART_CGEN)
+#define F_PDP_PART_CGEN    V_PDP_PART_CGEN(1U)
+
+#define S_TP_PART_CGEN    17
+#define V_TP_PART_CGEN(x) ((x) << S_TP_PART_CGEN)
+#define F_TP_PART_CGEN    V_TP_PART_CGEN(1U)
+
+#define S_EDC0_PART_CGEN    16
+#define V_EDC0_PART_CGEN(x) ((x) << S_EDC0_PART_CGEN)
+#define F_EDC0_PART_CGEN    V_EDC0_PART_CGEN(1U)
+
+#define S_EDC1_PART_CGEN    15
+#define V_EDC1_PART_CGEN(x) ((x) << S_EDC1_PART_CGEN)
+#define F_EDC1_PART_CGEN    V_EDC1_PART_CGEN(1U)
+
+#define S_LE_PART_CGEN    14
+#define V_LE_PART_CGEN(x) ((x) << S_LE_PART_CGEN)
+#define F_LE_PART_CGEN    V_LE_PART_CGEN(1U)
+
+#define S_MA_PART_CGEN    13
+#define V_MA_PART_CGEN(x) ((x) << S_MA_PART_CGEN)
+#define F_MA_PART_CGEN    V_MA_PART_CGEN(1U)
+
+#define S_MC0_PART_CGEN    12
+#define V_MC0_PART_CGEN(x) ((x) << S_MC0_PART_CGEN)
+#define F_MC0_PART_CGEN    V_MC0_PART_CGEN(1U)
+
+#define S_MC1_PART_CGEN    11
+#define V_MC1_PART_CGEN(x) ((x) << S_MC1_PART_CGEN)
+#define F_MC1_PART_CGEN    V_MC1_PART_CGEN(1U)
+
+#define S_PCIE_PART_CGEN    10
+#define V_PCIE_PART_CGEN(x) ((x) << S_PCIE_PART_CGEN)
+#define F_PCIE_PART_CGEN    V_PCIE_PART_CGEN(1U)
+
+#define S_PL_DIS_PRTY_CHK    20
+#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK)
+#define F_PL_DIS_PRTY_CHK    V_PL_DIS_PRTY_CHK(1U)
+
 #define A_PMU_SLEEPMODE_WAKEUP 0x19124
 
 #define S_HWWAKEUPEN    5
@@ -17562,6 +36790,10 @@
 #define V_WAKEUP(x) ((x) << S_WAKEUP)
 #define F_WAKEUP    V_WAKEUP(1U)
 
+#define S_GLOBALDEEPSLEEPEN    6
+#define V_GLOBALDEEPSLEEPEN(x) ((x) << S_GLOBALDEEPSLEEPEN)
+#define F_GLOBALDEEPSLEEPEN    V_GLOBALDEEPSLEEPEN(1U)
+
 /* registers for module ULP_RX */
 #define ULP_RX_BASE_ADDR 0x19150
 
@@ -17656,78 +36888,86 @@
 #define V_ENABLE_AF_0(x) ((x) << S_ENABLE_AF_0)
 #define F_ENABLE_AF_0    V_ENABLE_AF_0(1U)
 
-#define S_ENABLE_PCMDF_1    17
-#define V_ENABLE_PCMDF_1(x) ((x) << S_ENABLE_PCMDF_1)
-#define F_ENABLE_PCMDF_1    V_ENABLE_PCMDF_1(1U)
-
-#define S_ENABLE_MPARC_1    16
-#define V_ENABLE_MPARC_1(x) ((x) << S_ENABLE_MPARC_1)
-#define F_ENABLE_MPARC_1    V_ENABLE_MPARC_1(1U)
-
-#define S_ENABLE_MPARF_1    15
-#define V_ENABLE_MPARF_1(x) ((x) << S_ENABLE_MPARF_1)
-#define F_ENABLE_MPARF_1    V_ENABLE_MPARF_1(1U)
-
-#define S_ENABLE_DDPCF_1    14
-#define V_ENABLE_DDPCF_1(x) ((x) << S_ENABLE_DDPCF_1)
-#define F_ENABLE_DDPCF_1    V_ENABLE_DDPCF_1(1U)
-
-#define S_ENABLE_TPTCF_1    13
-#define V_ENABLE_TPTCF_1(x) ((x) << S_ENABLE_TPTCF_1)
-#define F_ENABLE_TPTCF_1    V_ENABLE_TPTCF_1(1U)
-
-#define S_ENABLE_PCMDF_0    12
-#define V_ENABLE_PCMDF_0(x) ((x) << S_ENABLE_PCMDF_0)
-#define F_ENABLE_PCMDF_0    V_ENABLE_PCMDF_0(1U)
-
-#define S_ENABLE_MPARC_0    11
-#define V_ENABLE_MPARC_0(x) ((x) << S_ENABLE_MPARC_0)
-#define F_ENABLE_MPARC_0    V_ENABLE_MPARC_0(1U)
-
-#define S_ENABLE_MPARF_0    10
-#define V_ENABLE_MPARF_0(x) ((x) << S_ENABLE_MPARF_0)
-#define F_ENABLE_MPARF_0    V_ENABLE_MPARF_0(1U)
-
-#define S_ENABLE_DDPCF_0    9
-#define V_ENABLE_DDPCF_0(x) ((x) << S_ENABLE_DDPCF_0)
-#define F_ENABLE_DDPCF_0    V_ENABLE_DDPCF_0(1U)
-
-#define S_ENABLE_TPTCF_0    8
-#define V_ENABLE_TPTCF_0(x) ((x) << S_ENABLE_TPTCF_0)
-#define F_ENABLE_TPTCF_0    V_ENABLE_TPTCF_0(1U)
-
-#define S_ENABLE_DDPDF_1    7
+#define S_ENABLE_DDPDF_1    17
 #define V_ENABLE_DDPDF_1(x) ((x) << S_ENABLE_DDPDF_1)
 #define F_ENABLE_DDPDF_1    V_ENABLE_DDPDF_1(1U)
 
-#define S_ENABLE_DDPMF_1    6
+#define S_ENABLE_DDPMF_1    16
 #define V_ENABLE_DDPMF_1(x) ((x) << S_ENABLE_DDPMF_1)
 #define F_ENABLE_DDPMF_1    V_ENABLE_DDPMF_1(1U)
 
-#define S_ENABLE_MEMRF_1    5
+#define S_ENABLE_MEMRF_1    15
 #define V_ENABLE_MEMRF_1(x) ((x) << S_ENABLE_MEMRF_1)
 #define F_ENABLE_MEMRF_1    V_ENABLE_MEMRF_1(1U)
 
-#define S_ENABLE_PRSDF_1    4
+#define S_ENABLE_PRSDF_1    14
 #define V_ENABLE_PRSDF_1(x) ((x) << S_ENABLE_PRSDF_1)
 #define F_ENABLE_PRSDF_1    V_ENABLE_PRSDF_1(1U)
 
-#define S_ENABLE_DDPDF_0    3
+#define S_ENABLE_DDPDF_0    13
 #define V_ENABLE_DDPDF_0(x) ((x) << S_ENABLE_DDPDF_0)
 #define F_ENABLE_DDPDF_0    V_ENABLE_DDPDF_0(1U)
 
-#define S_ENABLE_DDPMF_0    2
+#define S_ENABLE_DDPMF_0    12
 #define V_ENABLE_DDPMF_0(x) ((x) << S_ENABLE_DDPMF_0)
 #define F_ENABLE_DDPMF_0    V_ENABLE_DDPMF_0(1U)
 
-#define S_ENABLE_MEMRF_0    1
+#define S_ENABLE_MEMRF_0    11
 #define V_ENABLE_MEMRF_0(x) ((x) << S_ENABLE_MEMRF_0)
 #define F_ENABLE_MEMRF_0    V_ENABLE_MEMRF_0(1U)
 
-#define S_ENABLE_PRSDF_0    0
+#define S_ENABLE_PRSDF_0    10
 #define V_ENABLE_PRSDF_0(x) ((x) << S_ENABLE_PRSDF_0)
 #define F_ENABLE_PRSDF_0    V_ENABLE_PRSDF_0(1U)
 
+#define S_ENABLE_PCMDF_1    9
+#define V_ENABLE_PCMDF_1(x) ((x) << S_ENABLE_PCMDF_1)
+#define F_ENABLE_PCMDF_1    V_ENABLE_PCMDF_1(1U)
+
+#define S_ENABLE_TPTCF_1    8
+#define V_ENABLE_TPTCF_1(x) ((x) << S_ENABLE_TPTCF_1)
+#define F_ENABLE_TPTCF_1    V_ENABLE_TPTCF_1(1U)
+
+#define S_ENABLE_DDPCF_1    7
+#define V_ENABLE_DDPCF_1(x) ((x) << S_ENABLE_DDPCF_1)
+#define F_ENABLE_DDPCF_1    V_ENABLE_DDPCF_1(1U)
+
+#define S_ENABLE_MPARF_1    6
+#define V_ENABLE_MPARF_1(x) ((x) << S_ENABLE_MPARF_1)
+#define F_ENABLE_MPARF_1    V_ENABLE_MPARF_1(1U)
+
+#define S_ENABLE_MPARC_1    5
+#define V_ENABLE_MPARC_1(x) ((x) << S_ENABLE_MPARC_1)
+#define F_ENABLE_MPARC_1    V_ENABLE_MPARC_1(1U)
+
+#define S_ENABLE_PCMDF_0    4
+#define V_ENABLE_PCMDF_0(x) ((x) << S_ENABLE_PCMDF_0)
+#define F_ENABLE_PCMDF_0    V_ENABLE_PCMDF_0(1U)
+
+#define S_ENABLE_TPTCF_0    3
+#define V_ENABLE_TPTCF_0(x) ((x) << S_ENABLE_TPTCF_0)
+#define F_ENABLE_TPTCF_0    V_ENABLE_TPTCF_0(1U)
+
+#define S_ENABLE_DDPCF_0    2
+#define V_ENABLE_DDPCF_0(x) ((x) << S_ENABLE_DDPCF_0)
+#define F_ENABLE_DDPCF_0    V_ENABLE_DDPCF_0(1U)
+
+#define S_ENABLE_MPARF_0    1
+#define V_ENABLE_MPARF_0(x) ((x) << S_ENABLE_MPARF_0)
+#define F_ENABLE_MPARF_0    V_ENABLE_MPARF_0(1U)
+
+#define S_ENABLE_MPARC_0    0
+#define V_ENABLE_MPARC_0(x) ((x) << S_ENABLE_MPARC_0)
+#define F_ENABLE_MPARC_0    V_ENABLE_MPARC_0(1U)
+
+#define S_SE_CNT_MISMATCH_1    26
+#define V_SE_CNT_MISMATCH_1(x) ((x) << S_SE_CNT_MISMATCH_1)
+#define F_SE_CNT_MISMATCH_1    V_SE_CNT_MISMATCH_1(1U)
+
+#define S_SE_CNT_MISMATCH_0    25
+#define V_SE_CNT_MISMATCH_0(x) ((x) << S_SE_CNT_MISMATCH_0)
+#define F_SE_CNT_MISMATCH_0    V_SE_CNT_MISMATCH_0(1U)
+
 #define A_ULP_RX_INT_CAUSE 0x19158
 
 #define S_CAUSE_CTX_1    24
@@ -17758,78 +36998,78 @@
 #define V_CAUSE_AF_0(x) ((x) << S_CAUSE_AF_0)
 #define F_CAUSE_AF_0    V_CAUSE_AF_0(1U)
 
-#define S_CAUSE_PCMDF_1    17
-#define V_CAUSE_PCMDF_1(x) ((x) << S_CAUSE_PCMDF_1)
-#define F_CAUSE_PCMDF_1    V_CAUSE_PCMDF_1(1U)
-
-#define S_CAUSE_MPARC_1    16
-#define V_CAUSE_MPARC_1(x) ((x) << S_CAUSE_MPARC_1)
-#define F_CAUSE_MPARC_1    V_CAUSE_MPARC_1(1U)
-
-#define S_CAUSE_MPARF_1    15
-#define V_CAUSE_MPARF_1(x) ((x) << S_CAUSE_MPARF_1)
-#define F_CAUSE_MPARF_1    V_CAUSE_MPARF_1(1U)
-
-#define S_CAUSE_DDPCF_1    14
-#define V_CAUSE_DDPCF_1(x) ((x) << S_CAUSE_DDPCF_1)
-#define F_CAUSE_DDPCF_1    V_CAUSE_DDPCF_1(1U)
-
-#define S_CAUSE_TPTCF_1    13
-#define V_CAUSE_TPTCF_1(x) ((x) << S_CAUSE_TPTCF_1)
-#define F_CAUSE_TPTCF_1    V_CAUSE_TPTCF_1(1U)
-
-#define S_CAUSE_PCMDF_0    12
-#define V_CAUSE_PCMDF_0(x) ((x) << S_CAUSE_PCMDF_0)
-#define F_CAUSE_PCMDF_0    V_CAUSE_PCMDF_0(1U)
-
-#define S_CAUSE_MPARC_0    11
-#define V_CAUSE_MPARC_0(x) ((x) << S_CAUSE_MPARC_0)
-#define F_CAUSE_MPARC_0    V_CAUSE_MPARC_0(1U)
-
-#define S_CAUSE_MPARF_0    10
-#define V_CAUSE_MPARF_0(x) ((x) << S_CAUSE_MPARF_0)
-#define F_CAUSE_MPARF_0    V_CAUSE_MPARF_0(1U)
-
-#define S_CAUSE_DDPCF_0    9
-#define V_CAUSE_DDPCF_0(x) ((x) << S_CAUSE_DDPCF_0)
-#define F_CAUSE_DDPCF_0    V_CAUSE_DDPCF_0(1U)
-
-#define S_CAUSE_TPTCF_0    8
-#define V_CAUSE_TPTCF_0(x) ((x) << S_CAUSE_TPTCF_0)
-#define F_CAUSE_TPTCF_0    V_CAUSE_TPTCF_0(1U)
-
-#define S_CAUSE_DDPDF_1    7
+#define S_CAUSE_DDPDF_1    17
 #define V_CAUSE_DDPDF_1(x) ((x) << S_CAUSE_DDPDF_1)
 #define F_CAUSE_DDPDF_1    V_CAUSE_DDPDF_1(1U)
 
-#define S_CAUSE_DDPMF_1    6
+#define S_CAUSE_DDPMF_1    16
 #define V_CAUSE_DDPMF_1(x) ((x) << S_CAUSE_DDPMF_1)
 #define F_CAUSE_DDPMF_1    V_CAUSE_DDPMF_1(1U)
 
-#define S_CAUSE_MEMRF_1    5
+#define S_CAUSE_MEMRF_1    15
 #define V_CAUSE_MEMRF_1(x) ((x) << S_CAUSE_MEMRF_1)
 #define F_CAUSE_MEMRF_1    V_CAUSE_MEMRF_1(1U)
 
-#define S_CAUSE_PRSDF_1    4
+#define S_CAUSE_PRSDF_1    14
 #define V_CAUSE_PRSDF_1(x) ((x) << S_CAUSE_PRSDF_1)
 #define F_CAUSE_PRSDF_1    V_CAUSE_PRSDF_1(1U)
 
-#define S_CAUSE_DDPDF_0    3
+#define S_CAUSE_DDPDF_0    13
 #define V_CAUSE_DDPDF_0(x) ((x) << S_CAUSE_DDPDF_0)
 #define F_CAUSE_DDPDF_0    V_CAUSE_DDPDF_0(1U)
 
-#define S_CAUSE_DDPMF_0    2
+#define S_CAUSE_DDPMF_0    12
 #define V_CAUSE_DDPMF_0(x) ((x) << S_CAUSE_DDPMF_0)
 #define F_CAUSE_DDPMF_0    V_CAUSE_DDPMF_0(1U)
 
-#define S_CAUSE_MEMRF_0    1
+#define S_CAUSE_MEMRF_0    11
 #define V_CAUSE_MEMRF_0(x) ((x) << S_CAUSE_MEMRF_0)
 #define F_CAUSE_MEMRF_0    V_CAUSE_MEMRF_0(1U)
 
-#define S_CAUSE_PRSDF_0    0
+#define S_CAUSE_PRSDF_0    10
 #define V_CAUSE_PRSDF_0(x) ((x) << S_CAUSE_PRSDF_0)
 #define F_CAUSE_PRSDF_0    V_CAUSE_PRSDF_0(1U)
 
+#define S_CAUSE_PCMDF_1    9
+#define V_CAUSE_PCMDF_1(x) ((x) << S_CAUSE_PCMDF_1)
+#define F_CAUSE_PCMDF_1    V_CAUSE_PCMDF_1(1U)
+
+#define S_CAUSE_TPTCF_1    8
+#define V_CAUSE_TPTCF_1(x) ((x) << S_CAUSE_TPTCF_1)
+#define F_CAUSE_TPTCF_1    V_CAUSE_TPTCF_1(1U)
+
+#define S_CAUSE_DDPCF_1    7
+#define V_CAUSE_DDPCF_1(x) ((x) << S_CAUSE_DDPCF_1)
+#define F_CAUSE_DDPCF_1    V_CAUSE_DDPCF_1(1U)
+
+#define S_CAUSE_MPARF_1    6
+#define V_CAUSE_MPARF_1(x) ((x) << S_CAUSE_MPARF_1)
+#define F_CAUSE_MPARF_1    V_CAUSE_MPARF_1(1U)
+
+#define S_CAUSE_MPARC_1    5
+#define V_CAUSE_MPARC_1(x) ((x) << S_CAUSE_MPARC_1)
+#define F_CAUSE_MPARC_1    V_CAUSE_MPARC_1(1U)
+
+#define S_CAUSE_PCMDF_0    4
+#define V_CAUSE_PCMDF_0(x) ((x) << S_CAUSE_PCMDF_0)
+#define F_CAUSE_PCMDF_0    V_CAUSE_PCMDF_0(1U)
+
+#define S_CAUSE_TPTCF_0    3
+#define V_CAUSE_TPTCF_0(x) ((x) << S_CAUSE_TPTCF_0)
+#define F_CAUSE_TPTCF_0    V_CAUSE_TPTCF_0(1U)
+
+#define S_CAUSE_DDPCF_0    2
+#define V_CAUSE_DDPCF_0(x) ((x) << S_CAUSE_DDPCF_0)
+#define F_CAUSE_DDPCF_0    V_CAUSE_DDPCF_0(1U)
+
+#define S_CAUSE_MPARF_0    1
+#define V_CAUSE_MPARF_0(x) ((x) << S_CAUSE_MPARF_0)
+#define F_CAUSE_MPARF_0    V_CAUSE_MPARF_0(1U)
+
+#define S_CAUSE_MPARC_0    0
+#define V_CAUSE_MPARC_0(x) ((x) << S_CAUSE_MPARC_0)
+#define F_CAUSE_MPARC_0    V_CAUSE_MPARC_0(1U)
+
 #define A_ULP_RX_ISCSI_LLIMIT 0x1915c
 
 #define S_ISCSILLIMIT    6
@@ -17903,6 +37143,123 @@
 #define A_ULP_RX_PBL_ULIMIT 0x19190
 #define A_ULP_RX_CTX_BASE 0x19194
 #define A_ULP_RX_PERR_ENABLE 0x1919c
+
+#define S_PERR_ENABLE_FF    22
+#define V_PERR_ENABLE_FF(x) ((x) << S_PERR_ENABLE_FF)
+#define F_PERR_ENABLE_FF    V_PERR_ENABLE_FF(1U)
+
+#define S_PERR_ENABLE_APF_1    21
+#define V_PERR_ENABLE_APF_1(x) ((x) << S_PERR_ENABLE_APF_1)
+#define F_PERR_ENABLE_APF_1    V_PERR_ENABLE_APF_1(1U)
+
+#define S_PERR_ENABLE_APF_0    20
+#define V_PERR_ENABLE_APF_0(x) ((x) << S_PERR_ENABLE_APF_0)
+#define F_PERR_ENABLE_APF_0    V_PERR_ENABLE_APF_0(1U)
+
+#define S_PERR_ENABLE_AF_1    19
+#define V_PERR_ENABLE_AF_1(x) ((x) << S_PERR_ENABLE_AF_1)
+#define F_PERR_ENABLE_AF_1    V_PERR_ENABLE_AF_1(1U)
+
+#define S_PERR_ENABLE_AF_0    18
+#define V_PERR_ENABLE_AF_0(x) ((x) << S_PERR_ENABLE_AF_0)
+#define F_PERR_ENABLE_AF_0    V_PERR_ENABLE_AF_0(1U)
+
+#define S_PERR_ENABLE_DDPDF_1    17
+#define V_PERR_ENABLE_DDPDF_1(x) ((x) << S_PERR_ENABLE_DDPDF_1)
+#define F_PERR_ENABLE_DDPDF_1    V_PERR_ENABLE_DDPDF_1(1U)
+
+#define S_PERR_ENABLE_DDPMF_1    16
+#define V_PERR_ENABLE_DDPMF_1(x) ((x) << S_PERR_ENABLE_DDPMF_1)
+#define F_PERR_ENABLE_DDPMF_1    V_PERR_ENABLE_DDPMF_1(1U)
+
+#define S_PERR_ENABLE_MEMRF_1    15
+#define V_PERR_ENABLE_MEMRF_1(x) ((x) << S_PERR_ENABLE_MEMRF_1)
+#define F_PERR_ENABLE_MEMRF_1    V_PERR_ENABLE_MEMRF_1(1U)
+
+#define S_PERR_ENABLE_PRSDF_1    14
+#define V_PERR_ENABLE_PRSDF_1(x) ((x) << S_PERR_ENABLE_PRSDF_1)
+#define F_PERR_ENABLE_PRSDF_1    V_PERR_ENABLE_PRSDF_1(1U)
+
+#define S_PERR_ENABLE_DDPDF_0    13
+#define V_PERR_ENABLE_DDPDF_0(x) ((x) << S_PERR_ENABLE_DDPDF_0)
+#define F_PERR_ENABLE_DDPDF_0    V_PERR_ENABLE_DDPDF_0(1U)
+
+#define S_PERR_ENABLE_DDPMF_0    12
+#define V_PERR_ENABLE_DDPMF_0(x) ((x) << S_PERR_ENABLE_DDPMF_0)
+#define F_PERR_ENABLE_DDPMF_0    V_PERR_ENABLE_DDPMF_0(1U)
+
+#define S_PERR_ENABLE_MEMRF_0    11
+#define V_PERR_ENABLE_MEMRF_0(x) ((x) << S_PERR_ENABLE_MEMRF_0)
+#define F_PERR_ENABLE_MEMRF_0    V_PERR_ENABLE_MEMRF_0(1U)
+
+#define S_PERR_ENABLE_PRSDF_0    10
+#define V_PERR_ENABLE_PRSDF_0(x) ((x) << S_PERR_ENABLE_PRSDF_0)
+#define F_PERR_ENABLE_PRSDF_0    V_PERR_ENABLE_PRSDF_0(1U)
+
+#define S_PERR_ENABLE_PCMDF_1    9
+#define V_PERR_ENABLE_PCMDF_1(x) ((x) << S_PERR_ENABLE_PCMDF_1)
+#define F_PERR_ENABLE_PCMDF_1    V_PERR_ENABLE_PCMDF_1(1U)
+
+#define S_PERR_ENABLE_TPTCF_1    8
+#define V_PERR_ENABLE_TPTCF_1(x) ((x) << S_PERR_ENABLE_TPTCF_1)
+#define F_PERR_ENABLE_TPTCF_1    V_PERR_ENABLE_TPTCF_1(1U)
+
+#define S_PERR_ENABLE_DDPCF_1    7
+#define V_PERR_ENABLE_DDPCF_1(x) ((x) << S_PERR_ENABLE_DDPCF_1)
+#define F_PERR_ENABLE_DDPCF_1    V_PERR_ENABLE_DDPCF_1(1U)
+
+#define S_PERR_ENABLE_MPARF_1    6
+#define V_PERR_ENABLE_MPARF_1(x) ((x) << S_PERR_ENABLE_MPARF_1)
+#define F_PERR_ENABLE_MPARF_1    V_PERR_ENABLE_MPARF_1(1U)
+
+#define S_PERR_ENABLE_MPARC_1    5
+#define V_PERR_ENABLE_MPARC_1(x) ((x) << S_PERR_ENABLE_MPARC_1)
+#define F_PERR_ENABLE_MPARC_1    V_PERR_ENABLE_MPARC_1(1U)
+
+#define S_PERR_ENABLE_PCMDF_0    4
+#define V_PERR_ENABLE_PCMDF_0(x) ((x) << S_PERR_ENABLE_PCMDF_0)
+#define F_PERR_ENABLE_PCMDF_0    V_PERR_ENABLE_PCMDF_0(1U)
+
+#define S_PERR_ENABLE_TPTCF_0    3
+#define V_PERR_ENABLE_TPTCF_0(x) ((x) << S_PERR_ENABLE_TPTCF_0)
+#define F_PERR_ENABLE_TPTCF_0    V_PERR_ENABLE_TPTCF_0(1U)
+
+#define S_PERR_ENABLE_DDPCF_0    2
+#define V_PERR_ENABLE_DDPCF_0(x) ((x) << S_PERR_ENABLE_DDPCF_0)
+#define F_PERR_ENABLE_DDPCF_0    V_PERR_ENABLE_DDPCF_0(1U)
+
+#define S_PERR_ENABLE_MPARF_0    1
+#define V_PERR_ENABLE_MPARF_0(x) ((x) << S_PERR_ENABLE_MPARF_0)
+#define F_PERR_ENABLE_MPARF_0    V_PERR_ENABLE_MPARF_0(1U)
+
+#define S_PERR_ENABLE_MPARC_0    0
+#define V_PERR_ENABLE_MPARC_0(x) ((x) << S_PERR_ENABLE_MPARC_0)
+#define F_PERR_ENABLE_MPARC_0    V_PERR_ENABLE_MPARC_0(1U)
+
+#define S_PERR_SE_CNT_MISMATCH_1    26
+#define V_PERR_SE_CNT_MISMATCH_1(x) ((x) << S_PERR_SE_CNT_MISMATCH_1)
+#define F_PERR_SE_CNT_MISMATCH_1    V_PERR_SE_CNT_MISMATCH_1(1U)
+
+#define S_PERR_SE_CNT_MISMATCH_0    25
+#define V_PERR_SE_CNT_MISMATCH_0(x) ((x) << S_PERR_SE_CNT_MISMATCH_0)
+#define F_PERR_SE_CNT_MISMATCH_0    V_PERR_SE_CNT_MISMATCH_0(1U)
+
+#define S_PERR_RSVD0    24
+#define V_PERR_RSVD0(x) ((x) << S_PERR_RSVD0)
+#define F_PERR_RSVD0    V_PERR_RSVD0(1U)
+
+#define S_PERR_RSVD1    23
+#define V_PERR_RSVD1(x) ((x) << S_PERR_RSVD1)
+#define F_PERR_RSVD1    V_PERR_RSVD1(1U)
+
+#define S_PERR_ENABLE_CTX_1    24
+#define V_PERR_ENABLE_CTX_1(x) ((x) << S_PERR_ENABLE_CTX_1)
+#define F_PERR_ENABLE_CTX_1    V_PERR_ENABLE_CTX_1(1U)
+
+#define S_PERR_ENABLE_CTX_0    23
+#define V_PERR_ENABLE_CTX_0(x) ((x) << S_PERR_ENABLE_CTX_0)
+#define F_PERR_ENABLE_CTX_0    V_PERR_ENABLE_CTX_0(1U)
+
 #define A_ULP_RX_PERR_INJECT 0x191a0
 #define A_ULP_RX_RQUDP_LLIMIT 0x191a4
 #define A_ULP_RX_RQUDP_ULIMIT 0x191a8
@@ -18069,7 +37426,373 @@
 #define G_WR_PTR(x) (((x) >> S_WR_PTR) & M_WR_PTR)
 
 #define A_ULP_RX_LA_RESERVED 0x1924c
+#define A_ULP_RX_CQE_GEN_EN 0x19250
 
+#define S_TERMIMATE_MSG    1
+#define V_TERMIMATE_MSG(x) ((x) << S_TERMIMATE_MSG)
+#define F_TERMIMATE_MSG    V_TERMIMATE_MSG(1U)
+
+#define S_TERMINATE_WITH_ERR    0
+#define V_TERMINATE_WITH_ERR(x) ((x) << S_TERMINATE_WITH_ERR)
+#define F_TERMINATE_WITH_ERR    V_TERMINATE_WITH_ERR(1U)
+
+#define A_ULP_RX_ATOMIC_OPCODES 0x19254
+
+#define S_ATOMIC_REQ_QNO    22
+#define M_ATOMIC_REQ_QNO    0x3U
+#define V_ATOMIC_REQ_QNO(x) ((x) << S_ATOMIC_REQ_QNO)
+#define G_ATOMIC_REQ_QNO(x) (((x) >> S_ATOMIC_REQ_QNO) & M_ATOMIC_REQ_QNO)
+
+#define S_ATOMIC_RSP_QNO    20
+#define M_ATOMIC_RSP_QNO    0x3U
+#define V_ATOMIC_RSP_QNO(x) ((x) << S_ATOMIC_RSP_QNO)
+#define G_ATOMIC_RSP_QNO(x) (((x) >> S_ATOMIC_RSP_QNO) & M_ATOMIC_RSP_QNO)
+
+#define S_IMMEDIATE_QNO    18
+#define M_IMMEDIATE_QNO    0x3U
+#define V_IMMEDIATE_QNO(x) ((x) << S_IMMEDIATE_QNO)
+#define G_IMMEDIATE_QNO(x) (((x) >> S_IMMEDIATE_QNO) & M_IMMEDIATE_QNO)
+
+#define S_IMMEDIATE_WITH_SE_QNO    16
+#define M_IMMEDIATE_WITH_SE_QNO    0x3U
+#define V_IMMEDIATE_WITH_SE_QNO(x) ((x) << S_IMMEDIATE_WITH_SE_QNO)
+#define G_IMMEDIATE_WITH_SE_QNO(x) (((x) >> S_IMMEDIATE_WITH_SE_QNO) & M_IMMEDIATE_WITH_SE_QNO)
+
+#define S_ATOMIC_WR_OPCODE    12
+#define M_ATOMIC_WR_OPCODE    0xfU
+#define V_ATOMIC_WR_OPCODE(x) ((x) << S_ATOMIC_WR_OPCODE)
+#define G_ATOMIC_WR_OPCODE(x) (((x) >> S_ATOMIC_WR_OPCODE) & M_ATOMIC_WR_OPCODE)
+
+#define S_ATOMIC_RD_OPCODE    8
+#define M_ATOMIC_RD_OPCODE    0xfU
+#define V_ATOMIC_RD_OPCODE(x) ((x) << S_ATOMIC_RD_OPCODE)
+#define G_ATOMIC_RD_OPCODE(x) (((x) >> S_ATOMIC_RD_OPCODE) & M_ATOMIC_RD_OPCODE)
+
+#define S_IMMEDIATE_OPCODE    4
+#define M_IMMEDIATE_OPCODE    0xfU
+#define V_IMMEDIATE_OPCODE(x) ((x) << S_IMMEDIATE_OPCODE)
+#define G_IMMEDIATE_OPCODE(x) (((x) >> S_IMMEDIATE_OPCODE) & M_IMMEDIATE_OPCODE)
+
+#define S_IMMEDIATE_WITH_SE_OPCODE    0
+#define M_IMMEDIATE_WITH_SE_OPCODE    0xfU
+#define V_IMMEDIATE_WITH_SE_OPCODE(x) ((x) << S_IMMEDIATE_WITH_SE_OPCODE)
+#define G_IMMEDIATE_WITH_SE_OPCODE(x) (((x) >> S_IMMEDIATE_WITH_SE_OPCODE) & M_IMMEDIATE_WITH_SE_OPCODE)
+
+#define A_ULP_RX_T10_CRC_ENDIAN_SWITCHING 0x19258
+
+#define S_EN_ORIG_DATA    0
+#define V_EN_ORIG_DATA(x) ((x) << S_EN_ORIG_DATA)
+#define F_EN_ORIG_DATA    V_EN_ORIG_DATA(1U)
+
+#define A_ULP_RX_MISC_FEATURE_ENABLE 0x1925c
+
+#define S_TERMINATE_STATUS_EN    4
+#define V_TERMINATE_STATUS_EN(x) ((x) << S_TERMINATE_STATUS_EN)
+#define F_TERMINATE_STATUS_EN    V_TERMINATE_STATUS_EN(1U)
+
+#define S_MULTIPLE_PREF_ENABLE    3
+#define V_MULTIPLE_PREF_ENABLE(x) ((x) << S_MULTIPLE_PREF_ENABLE)
+#define F_MULTIPLE_PREF_ENABLE    V_MULTIPLE_PREF_ENABLE(1U)
+
+#define S_UMUDP_PBL_PREF_ENABLE    2
+#define V_UMUDP_PBL_PREF_ENABLE(x) ((x) << S_UMUDP_PBL_PREF_ENABLE)
+#define F_UMUDP_PBL_PREF_ENABLE    V_UMUDP_PBL_PREF_ENABLE(1U)
+
+#define S_RDMA_PBL_PREF_EN    1
+#define V_RDMA_PBL_PREF_EN(x) ((x) << S_RDMA_PBL_PREF_EN)
+#define F_RDMA_PBL_PREF_EN    V_RDMA_PBL_PREF_EN(1U)
+
+#define S_SDC_CRC_PROT_EN    0
+#define V_SDC_CRC_PROT_EN(x) ((x) << S_SDC_CRC_PROT_EN)
+#define F_SDC_CRC_PROT_EN    V_SDC_CRC_PROT_EN(1U)
+
+#define S_ISCSI_DCRC_ERROR_CMP_EN    25
+#define V_ISCSI_DCRC_ERROR_CMP_EN(x) ((x) << S_ISCSI_DCRC_ERROR_CMP_EN)
+#define F_ISCSI_DCRC_ERROR_CMP_EN    V_ISCSI_DCRC_ERROR_CMP_EN(1U)
+
+#define S_ISCSITAGPI    24
+#define V_ISCSITAGPI(x) ((x) << S_ISCSITAGPI)
+#define F_ISCSITAGPI    V_ISCSITAGPI(1U)
+
+#define S_DDP_VERSION_1    22
+#define M_DDP_VERSION_1    0x3U
+#define V_DDP_VERSION_1(x) ((x) << S_DDP_VERSION_1)
+#define G_DDP_VERSION_1(x) (((x) >> S_DDP_VERSION_1) & M_DDP_VERSION_1)
+
+#define S_DDP_VERSION_0    20
+#define M_DDP_VERSION_0    0x3U
+#define V_DDP_VERSION_0(x) ((x) << S_DDP_VERSION_0)
+#define G_DDP_VERSION_0(x) (((x) >> S_DDP_VERSION_0) & M_DDP_VERSION_0)
+
+#define S_RDMA_VERSION_1    18
+#define M_RDMA_VERSION_1    0x3U
+#define V_RDMA_VERSION_1(x) ((x) << S_RDMA_VERSION_1)
+#define G_RDMA_VERSION_1(x) (((x) >> S_RDMA_VERSION_1) & M_RDMA_VERSION_1)
+
+#define S_RDMA_VERSION_0    16
+#define M_RDMA_VERSION_0    0x3U
+#define V_RDMA_VERSION_0(x) ((x) << S_RDMA_VERSION_0)
+#define G_RDMA_VERSION_0(x) (((x) >> S_RDMA_VERSION_0) & M_RDMA_VERSION_0)
+
+#define S_PBL_BOUND_CHECK_W_PGLEN    15
+#define V_PBL_BOUND_CHECK_W_PGLEN(x) ((x) << S_PBL_BOUND_CHECK_W_PGLEN)
+#define F_PBL_BOUND_CHECK_W_PGLEN    V_PBL_BOUND_CHECK_W_PGLEN(1U)
+
+#define S_ZBYTE_FIX_DISABLE    14
+#define V_ZBYTE_FIX_DISABLE(x) ((x) << S_ZBYTE_FIX_DISABLE)
+#define F_ZBYTE_FIX_DISABLE    V_ZBYTE_FIX_DISABLE(1U)
+
+#define S_T10_OFFSET_UPDATE_EN    13
+#define V_T10_OFFSET_UPDATE_EN(x) ((x) << S_T10_OFFSET_UPDATE_EN)
+#define F_T10_OFFSET_UPDATE_EN    V_T10_OFFSET_UPDATE_EN(1U)
+
+#define S_ULP_INSERT_PI    12
+#define V_ULP_INSERT_PI(x) ((x) << S_ULP_INSERT_PI)
+#define F_ULP_INSERT_PI    V_ULP_INSERT_PI(1U)
+
+#define S_PDU_DPI    11
+#define V_PDU_DPI(x) ((x) << S_PDU_DPI)
+#define F_PDU_DPI    V_PDU_DPI(1U)
+
+#define S_ISCSI_EFF_OFFSET_EN    10
+#define V_ISCSI_EFF_OFFSET_EN(x) ((x) << S_ISCSI_EFF_OFFSET_EN)
+#define F_ISCSI_EFF_OFFSET_EN    V_ISCSI_EFF_OFFSET_EN(1U)
+
+#define S_ISCSI_ALL_CMP_MODE    9
+#define V_ISCSI_ALL_CMP_MODE(x) ((x) << S_ISCSI_ALL_CMP_MODE)
+#define F_ISCSI_ALL_CMP_MODE    V_ISCSI_ALL_CMP_MODE(1U)
+
+#define S_ISCSI_ENABLE_HDR_CMD    8
+#define V_ISCSI_ENABLE_HDR_CMD(x) ((x) << S_ISCSI_ENABLE_HDR_CMD)
+#define F_ISCSI_ENABLE_HDR_CMD    V_ISCSI_ENABLE_HDR_CMD(1U)
+
+#define S_ISCSI_FORCE_CMP_MODE    7
+#define V_ISCSI_FORCE_CMP_MODE(x) ((x) << S_ISCSI_FORCE_CMP_MODE)
+#define F_ISCSI_FORCE_CMP_MODE    V_ISCSI_FORCE_CMP_MODE(1U)
+
+#define S_ISCSI_ENABLE_CMP_MODE    6
+#define V_ISCSI_ENABLE_CMP_MODE(x) ((x) << S_ISCSI_ENABLE_CMP_MODE)
+#define F_ISCSI_ENABLE_CMP_MODE    V_ISCSI_ENABLE_CMP_MODE(1U)
+
+#define S_PIO_RDMA_SEND_RQE    5
+#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE)
+#define F_PIO_RDMA_SEND_RQE    V_PIO_RDMA_SEND_RQE(1U)
+
+#define A_ULP_RX_CH0_CGEN 0x19260
+
+#define S_BYPASS_CGEN    7
+#define V_BYPASS_CGEN(x) ((x) << S_BYPASS_CGEN)
+#define F_BYPASS_CGEN    V_BYPASS_CGEN(1U)
+
+#define S_TDDP_CGEN    6
+#define V_TDDP_CGEN(x) ((x) << S_TDDP_CGEN)
+#define F_TDDP_CGEN    V_TDDP_CGEN(1U)
+
+#define S_ISCSI_CGEN    5
+#define V_ISCSI_CGEN(x) ((x) << S_ISCSI_CGEN)
+#define F_ISCSI_CGEN    V_ISCSI_CGEN(1U)
+
+#define S_RDMA_CGEN    4
+#define V_RDMA_CGEN(x) ((x) << S_RDMA_CGEN)
+#define F_RDMA_CGEN    V_RDMA_CGEN(1U)
+
+#define S_CHANNEL_CGEN    3
+#define V_CHANNEL_CGEN(x) ((x) << S_CHANNEL_CGEN)
+#define F_CHANNEL_CGEN    V_CHANNEL_CGEN(1U)
+
+#define S_ALL_DATAPATH_CGEN    2
+#define V_ALL_DATAPATH_CGEN(x) ((x) << S_ALL_DATAPATH_CGEN)
+#define F_ALL_DATAPATH_CGEN    V_ALL_DATAPATH_CGEN(1U)
+
+#define S_T10DIFF_DATAPATH_CGEN    1
+#define V_T10DIFF_DATAPATH_CGEN(x) ((x) << S_T10DIFF_DATAPATH_CGEN)
+#define F_T10DIFF_DATAPATH_CGEN    V_T10DIFF_DATAPATH_CGEN(1U)
+
+#define S_RDMA_DATAPATH_CGEN    0
+#define V_RDMA_DATAPATH_CGEN(x) ((x) << S_RDMA_DATAPATH_CGEN)
+#define F_RDMA_DATAPATH_CGEN    V_RDMA_DATAPATH_CGEN(1U)
+
+#define A_ULP_RX_CH1_CGEN 0x19264
+#define A_ULP_RX_RFE_DISABLE 0x19268
+
+#define S_RQE_LIM_CHECK_RFE_DISABLE    0
+#define V_RQE_LIM_CHECK_RFE_DISABLE(x) ((x) << S_RQE_LIM_CHECK_RFE_DISABLE)
+#define F_RQE_LIM_CHECK_RFE_DISABLE    V_RQE_LIM_CHECK_RFE_DISABLE(1U)
+
+#define A_ULP_RX_INT_ENABLE_2 0x1926c
+
+#define S_ULPRX2MA_INTFPERR    8
+#define V_ULPRX2MA_INTFPERR(x) ((x) << S_ULPRX2MA_INTFPERR)
+#define F_ULPRX2MA_INTFPERR    V_ULPRX2MA_INTFPERR(1U)
+
+#define S_ALN_SDC_ERR_1    7
+#define V_ALN_SDC_ERR_1(x) ((x) << S_ALN_SDC_ERR_1)
+#define F_ALN_SDC_ERR_1    V_ALN_SDC_ERR_1(1U)
+
+#define S_ALN_SDC_ERR_0    6
+#define V_ALN_SDC_ERR_0(x) ((x) << S_ALN_SDC_ERR_0)
+#define F_ALN_SDC_ERR_0    V_ALN_SDC_ERR_0(1U)
+
+#define S_PF_UNTAGGED_TPT_1    5
+#define V_PF_UNTAGGED_TPT_1(x) ((x) << S_PF_UNTAGGED_TPT_1)
+#define F_PF_UNTAGGED_TPT_1    V_PF_UNTAGGED_TPT_1(1U)
+
+#define S_PF_UNTAGGED_TPT_0    4
+#define V_PF_UNTAGGED_TPT_0(x) ((x) << S_PF_UNTAGGED_TPT_0)
+#define F_PF_UNTAGGED_TPT_0    V_PF_UNTAGGED_TPT_0(1U)
+
+#define S_PF_PBL_1    3
+#define V_PF_PBL_1(x) ((x) << S_PF_PBL_1)
+#define F_PF_PBL_1    V_PF_PBL_1(1U)
+
+#define S_PF_PBL_0    2
+#define V_PF_PBL_0(x) ((x) << S_PF_PBL_0)
+#define F_PF_PBL_0    V_PF_PBL_0(1U)
+
+#define S_DDP_HINT_1    1
+#define V_DDP_HINT_1(x) ((x) << S_DDP_HINT_1)
+#define F_DDP_HINT_1    V_DDP_HINT_1(1U)
+
+#define S_DDP_HINT_0    0
+#define V_DDP_HINT_0(x) ((x) << S_DDP_HINT_0)
+#define F_DDP_HINT_0    V_DDP_HINT_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_2 0x19270
+#define A_ULP_RX_PERR_ENABLE_2 0x19274
+
+#define S_ENABLE_ULPRX2MA_INTFPERR    8
+#define V_ENABLE_ULPRX2MA_INTFPERR(x) ((x) << S_ENABLE_ULPRX2MA_INTFPERR)
+#define F_ENABLE_ULPRX2MA_INTFPERR    V_ENABLE_ULPRX2MA_INTFPERR(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_1    7
+#define V_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_ENABLE_ALN_SDC_ERR_1)
+#define F_ENABLE_ALN_SDC_ERR_1    V_ENABLE_ALN_SDC_ERR_1(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_0    6
+#define V_ENABLE_ALN_SDC_ERR_0(x) ((x) << S_ENABLE_ALN_SDC_ERR_0)
+#define F_ENABLE_ALN_SDC_ERR_0    V_ENABLE_ALN_SDC_ERR_0(1U)
+
+#define S_ENABLE_PF_UNTAGGED_TPT_1    5
+#define V_ENABLE_PF_UNTAGGED_TPT_1(x) ((x) << S_ENABLE_PF_UNTAGGED_TPT_1)
+#define F_ENABLE_PF_UNTAGGED_TPT_1    V_ENABLE_PF_UNTAGGED_TPT_1(1U)
+
+#define S_ENABLE_PF_UNTAGGED_TPT_0    4
+#define V_ENABLE_PF_UNTAGGED_TPT_0(x) ((x) << S_ENABLE_PF_UNTAGGED_TPT_0)
+#define F_ENABLE_PF_UNTAGGED_TPT_0    V_ENABLE_PF_UNTAGGED_TPT_0(1U)
+
+#define S_ENABLE_PF_PBL_1    3
+#define V_ENABLE_PF_PBL_1(x) ((x) << S_ENABLE_PF_PBL_1)
+#define F_ENABLE_PF_PBL_1    V_ENABLE_PF_PBL_1(1U)
+
+#define S_ENABLE_PF_PBL_0    2
+#define V_ENABLE_PF_PBL_0(x) ((x) << S_ENABLE_PF_PBL_0)
+#define F_ENABLE_PF_PBL_0    V_ENABLE_PF_PBL_0(1U)
+
+#define S_ENABLE_DDP_HINT_1    1
+#define V_ENABLE_DDP_HINT_1(x) ((x) << S_ENABLE_DDP_HINT_1)
+#define F_ENABLE_DDP_HINT_1    V_ENABLE_DDP_HINT_1(1U)
+
+#define S_ENABLE_DDP_HINT_0    0
+#define V_ENABLE_DDP_HINT_0(x) ((x) << S_ENABLE_DDP_HINT_0)
+#define F_ENABLE_DDP_HINT_0    V_ENABLE_DDP_HINT_0(1U)
+
+#define A_ULP_RX_RQE_PBL_MULTIPLE_OUTSTANDING_CNT 0x19278
+
+#define S_PIO_RQE_PBL_MULTIPLE_CNT    0
+#define M_PIO_RQE_PBL_MULTIPLE_CNT    0xfU
+#define V_PIO_RQE_PBL_MULTIPLE_CNT(x) ((x) << S_PIO_RQE_PBL_MULTIPLE_CNT)
+#define G_PIO_RQE_PBL_MULTIPLE_CNT(x) (((x) >> S_PIO_RQE_PBL_MULTIPLE_CNT) & M_PIO_RQE_PBL_MULTIPLE_CNT)
+
+#define A_ULP_RX_ATOMIC_LEN 0x1927c
+
+#define S_ATOMIC_RPL_LEN    16
+#define M_ATOMIC_RPL_LEN    0xffU
+#define V_ATOMIC_RPL_LEN(x) ((x) << S_ATOMIC_RPL_LEN)
+#define G_ATOMIC_RPL_LEN(x) (((x) >> S_ATOMIC_RPL_LEN) & M_ATOMIC_RPL_LEN)
+
+#define S_ATOMIC_REQ_LEN    8
+#define M_ATOMIC_REQ_LEN    0xffU
+#define V_ATOMIC_REQ_LEN(x) ((x) << S_ATOMIC_REQ_LEN)
+#define G_ATOMIC_REQ_LEN(x) (((x) >> S_ATOMIC_REQ_LEN) & M_ATOMIC_REQ_LEN)
+
+#define S_ATOMIC_IMMEDIATE_LEN    0
+#define M_ATOMIC_IMMEDIATE_LEN    0xffU
+#define V_ATOMIC_IMMEDIATE_LEN(x) ((x) << S_ATOMIC_IMMEDIATE_LEN)
+#define G_ATOMIC_IMMEDIATE_LEN(x) (((x) >> S_ATOMIC_IMMEDIATE_LEN) & M_ATOMIC_IMMEDIATE_LEN)
+
+#define A_ULP_RX_CGEN_GLOBAL 0x19280
+#define A_ULP_RX_CTX_SKIP_MA_REQ 0x19284
+
+#define S_CLEAR_CTX_ERR_CNT1    3
+#define V_CLEAR_CTX_ERR_CNT1(x) ((x) << S_CLEAR_CTX_ERR_CNT1)
+#define F_CLEAR_CTX_ERR_CNT1    V_CLEAR_CTX_ERR_CNT1(1U)
+
+#define S_CLEAR_CTX_ERR_CNT0    2
+#define V_CLEAR_CTX_ERR_CNT0(x) ((x) << S_CLEAR_CTX_ERR_CNT0)
+#define F_CLEAR_CTX_ERR_CNT0    V_CLEAR_CTX_ERR_CNT0(1U)
+
+#define S_SKIP_MA_REQ_EN1    1
+#define V_SKIP_MA_REQ_EN1(x) ((x) << S_SKIP_MA_REQ_EN1)
+#define F_SKIP_MA_REQ_EN1    V_SKIP_MA_REQ_EN1(1U)
+
+#define S_SKIP_MA_REQ_EN0    0
+#define V_SKIP_MA_REQ_EN0(x) ((x) << S_SKIP_MA_REQ_EN0)
+#define F_SKIP_MA_REQ_EN0    V_SKIP_MA_REQ_EN0(1U)
+
+#define A_ULP_RX_CHNL0_CTX_ERROR_COUNT_PER_TID 0x19288
+#define A_ULP_RX_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1928c
+#define A_ULP_RX_MSN_CHECK_ENABLE 0x19290
+
+#define S_RD_OR_TERM_MSN_CHECK_ENABLE    2
+#define V_RD_OR_TERM_MSN_CHECK_ENABLE(x) ((x) << S_RD_OR_TERM_MSN_CHECK_ENABLE)
+#define F_RD_OR_TERM_MSN_CHECK_ENABLE    V_RD_OR_TERM_MSN_CHECK_ENABLE(1U)
+
+#define S_ATOMIC_OP_MSN_CHECK_ENABLE    1
+#define V_ATOMIC_OP_MSN_CHECK_ENABLE(x) ((x) << S_ATOMIC_OP_MSN_CHECK_ENABLE)
+#define F_ATOMIC_OP_MSN_CHECK_ENABLE    V_ATOMIC_OP_MSN_CHECK_ENABLE(1U)
+
+#define S_SEND_MSN_CHECK_ENABLE    0
+#define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE)
+#define F_SEND_MSN_CHECK_ENABLE    V_SEND_MSN_CHECK_ENABLE(1U)
+
+#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4
+
+#define S_TLSPPLLIMIT    6
+#define M_TLSPPLLIMIT    0x3ffffffU
+#define V_TLSPPLLIMIT(x) ((x) << S_TLSPPLLIMIT)
+#define G_TLSPPLLIMIT(x) (((x) >> S_TLSPPLLIMIT) & M_TLSPPLLIMIT)
+
+#define A_ULP_RX_TLS_PP_ULIMIT 0x192a8
+
+#define S_TLSPPULIMIT    6
+#define M_TLSPPULIMIT    0x3ffffffU
+#define V_TLSPPULIMIT(x) ((x) << S_TLSPPULIMIT)
+#define G_TLSPPULIMIT(x) (((x) >> S_TLSPPULIMIT) & M_TLSPPULIMIT)
+
+#define A_ULP_RX_TLS_KEY_LLIMIT 0x192ac
+
+#define S_TLSKEYLLIMIT    8
+#define M_TLSKEYLLIMIT    0xffffffU
+#define V_TLSKEYLLIMIT(x) ((x) << S_TLSKEYLLIMIT)
+#define G_TLSKEYLLIMIT(x) (((x) >> S_TLSKEYLLIMIT) & M_TLSKEYLLIMIT)
+
+#define A_ULP_RX_TLS_KEY_ULIMIT 0x192b0
+
+#define S_TLSKEYULIMIT    8
+#define M_TLSKEYULIMIT    0xffffffU
+#define V_TLSKEYULIMIT(x) ((x) << S_TLSKEYULIMIT)
+#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT)
+
+#define A_ULP_RX_TLS_CTL 0x192bc
+#define A_ULP_RX_TLS_IND_CMD 0x19348
+
+#define S_TLS_RX_REG_OFF_ADDR    0
+#define M_TLS_RX_REG_OFF_ADDR    0x3ffU
+#define V_TLS_RX_REG_OFF_ADDR(x) ((x) << S_TLS_RX_REG_OFF_ADDR)
+#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR)
+
+#define A_ULP_RX_TLS_IND_DATA 0x1934c
+
 /* registers for module SF */
 #define SF_BASE_ADDR 0x193f8
 
@@ -18118,6 +37841,28 @@
 #define V_VFID(x) ((x) << S_VFID)
 #define G_VFID(x) (((x) >> S_VFID) & M_VFID)
 
+#define S_T6_SOURCEPF    9
+#define M_T6_SOURCEPF    0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
+#define S_T6_ISVF    8
+#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
+#define F_T6_ISVF    V_T6_ISVF(1U)
+
+#define S_T6_VFID    0
+#define M_T6_VFID    0xffU
+#define V_T6_VFID(x) ((x) << S_T6_VFID)
+#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
+
+#define A_PL_VF_REV 0x4
+
+#define S_CHIPID    4
+#define M_CHIPID    0xfU
+#define V_CHIPID(x) ((x) << S_CHIPID)
+#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+
+#define A_PL_VF_REVISION 0x8
 #define A_PL_PF_INT_CAUSE 0x3c0
 
 #define S_PFSW    3
@@ -18144,6 +37889,21 @@
 #define F_SWINT    V_SWINT(1U)
 
 #define A_PL_WHOAMI 0x19400
+
+#define S_T6_SOURCEPF    9
+#define M_T6_SOURCEPF    0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
+#define S_T6_ISVF    8
+#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
+#define F_T6_ISVF    V_T6_ISVF(1U)
+
+#define S_T6_VFID    0
+#define M_T6_VFID    0xffU
+#define V_T6_VFID(x) ((x) << S_T6_VFID)
+#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
+
 #define A_PL_PERR_CAUSE 0x19404
 
 #define S_UART    28
@@ -18262,6 +38022,18 @@
 #define V_CIM(x) ((x) << S_CIM)
 #define F_CIM    V_CIM(1U)
 
+#define S_MC1    31
+#define V_MC1(x) ((x) << S_MC1)
+#define F_MC1    V_MC1(1U)
+
+#define S_MC0    15
+#define V_MC0(x) ((x) << S_MC0)
+#define F_MC0    V_MC0(1U)
+
+#define S_ANYMAC    9
+#define V_ANYMAC(x) ((x) << S_ANYMAC)
+#define F_ANYMAC    V_ANYMAC(1U)
+
 #define A_PL_PERR_ENABLE 0x19408
 #define A_PL_INT_CAUSE 0x1940c
 
@@ -18273,6 +38045,22 @@
 #define V_SW_CIM(x) ((x) << S_SW_CIM)
 #define F_SW_CIM    V_SW_CIM(1U)
 
+#define S_MAC3    12
+#define V_MAC3(x) ((x) << S_MAC3)
+#define F_MAC3    V_MAC3(1U)
+
+#define S_MAC2    11
+#define V_MAC2(x) ((x) << S_MAC2)
+#define F_MAC2    V_MAC2(1U)
+
+#define S_MAC1    10
+#define V_MAC1(x) ((x) << S_MAC1)
+#define F_MAC1    V_MAC1(1U)
+
+#define S_MAC0    9
+#define V_MAC0(x) ((x) << S_MAC0)
+#define F_MAC0    V_MAC0(1U)
+
 #define A_PL_INT_ENABLE 0x19410
 #define A_PL_INT_MAP0 0x19414
 
@@ -18298,6 +38086,16 @@
 #define V_MAPXGMAC0(x) ((x) << S_MAPXGMAC0)
 #define G_MAPXGMAC0(x) (((x) >> S_MAPXGMAC0) & M_MAPXGMAC0)
 
+#define S_MAPMAC1    16
+#define M_MAPMAC1    0x1ffU
+#define V_MAPMAC1(x) ((x) << S_MAPMAC1)
+#define G_MAPMAC1(x) (((x) >> S_MAPMAC1) & M_MAPMAC1)
+
+#define S_MAPMAC0    0
+#define M_MAPMAC0    0x1ffU
+#define V_MAPMAC0(x) ((x) << S_MAPMAC0)
+#define G_MAPMAC0(x) (((x) >> S_MAPMAC0) & M_MAPMAC0)
+
 #define A_PL_INT_MAP2 0x1941c
 
 #define S_MAPXGMAC_KR1    16
@@ -18310,6 +38108,16 @@
 #define V_MAPXGMAC_KR0(x) ((x) << S_MAPXGMAC_KR0)
 #define G_MAPXGMAC_KR0(x) (((x) >> S_MAPXGMAC_KR0) & M_MAPXGMAC_KR0)
 
+#define S_MAPMAC3    16
+#define M_MAPMAC3    0x1ffU
+#define V_MAPMAC3(x) ((x) << S_MAPMAC3)
+#define G_MAPMAC3(x) (((x) >> S_MAPMAC3) & M_MAPMAC3)
+
+#define S_MAPMAC2    0
+#define M_MAPMAC2    0x1ffU
+#define V_MAPMAC2(x) ((x) << S_MAPMAC2)
+#define G_MAPMAC2(x) (((x) >> S_MAPMAC2) & M_MAPMAC2)
+
 #define A_PL_INT_MAP3 0x19420
 
 #define S_MAPMI    16
@@ -18352,6 +38160,10 @@
 #define V_PIORSTMODE(x) ((x) << S_PIORSTMODE)
 #define F_PIORSTMODE    V_PIORSTMODE(1U)
 
+#define S_AUTOPCIEPAUSE    4
+#define V_AUTOPCIEPAUSE(x) ((x) << S_AUTOPCIEPAUSE)
+#define F_AUTOPCIEPAUSE    V_AUTOPCIEPAUSE(1U)
+
 #define A_PL_PL_PERR_INJECT 0x1942c
 
 #define S_PL_MEMSEL    1
@@ -18384,6 +38196,10 @@
 #define V_PERRVFID(x) ((x) << S_PERRVFID)
 #define F_PERRVFID    V_PERRVFID(1U)
 
+#define S_PL_BUSPERR    6
+#define V_PL_BUSPERR(x) ((x) << S_PL_BUSPERR)
+#define F_PL_BUSPERR    V_PL_BUSPERR(1U)
+
 #define A_PL_PL_INT_ENABLE 0x19434
 #define A_PL_PL_PERR_ENABLE 0x19438
 #define A_PL_REV 0x1943c
@@ -18393,6 +38209,95 @@
 #define V_REV(x) ((x) << S_REV)
 #define G_REV(x) (((x) >> S_REV) & M_REV)
 
+#define A_PL_PCIE_LINK 0x19440
+
+#define S_LN0_AESTAT    26
+#define M_LN0_AESTAT    0x7U
+#define V_LN0_AESTAT(x) ((x) << S_LN0_AESTAT)
+#define G_LN0_AESTAT(x) (((x) >> S_LN0_AESTAT) & M_LN0_AESTAT)
+
+#define S_LN0_AECMD    23
+#define M_LN0_AECMD    0x7U
+#define V_LN0_AECMD(x) ((x) << S_LN0_AECMD)
+#define G_LN0_AECMD(x) (((x) >> S_LN0_AECMD) & M_LN0_AECMD)
+
+#define S_T5_STATECFGINITF    16
+#define M_T5_STATECFGINITF    0x7fU
+#define V_T5_STATECFGINITF(x) ((x) << S_T5_STATECFGINITF)
+#define G_T5_STATECFGINITF(x) (((x) >> S_T5_STATECFGINITF) & M_T5_STATECFGINITF)
+
+#define S_T5_STATECFGINIT    12
+#define M_T5_STATECFGINIT    0xfU
+#define V_T5_STATECFGINIT(x) ((x) << S_T5_STATECFGINIT)
+#define G_T5_STATECFGINIT(x) (((x) >> S_T5_STATECFGINIT) & M_T5_STATECFGINIT)
+
+#define S_PCIE_SPEED    8
+#define M_PCIE_SPEED    0x3U
+#define V_PCIE_SPEED(x) ((x) << S_PCIE_SPEED)
+#define G_PCIE_SPEED(x) (((x) >> S_PCIE_SPEED) & M_PCIE_SPEED)
+
+#define S_T5_PERSTTIMEOUT    7
+#define V_T5_PERSTTIMEOUT(x) ((x) << S_T5_PERSTTIMEOUT)
+#define F_T5_PERSTTIMEOUT    V_T5_PERSTTIMEOUT(1U)
+
+#define S_T5_LTSSMENABLE    6
+#define V_T5_LTSSMENABLE(x) ((x) << S_T5_LTSSMENABLE)
+#define F_T5_LTSSMENABLE    V_T5_LTSSMENABLE(1U)
+
+#define S_LTSSM    0
+#define M_LTSSM    0x3fU
+#define V_LTSSM(x) ((x) << S_LTSSM)
+#define G_LTSSM(x) (((x) >> S_LTSSM) & M_LTSSM)
+
+#define S_T6_LN0_AESTAT    27
+#define M_T6_LN0_AESTAT    0x7U
+#define V_T6_LN0_AESTAT(x) ((x) << S_T6_LN0_AESTAT)
+#define G_T6_LN0_AESTAT(x) (((x) >> S_T6_LN0_AESTAT) & M_T6_LN0_AESTAT)
+
+#define S_T6_LN0_AECMD    24
+#define M_T6_LN0_AECMD    0x7U
+#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD)
+#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD)
+
+#define S_T6_STATECFGINITF    16
+#define M_T6_STATECFGINITF    0xffU
+#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF)
+#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF)
+
+#define S_T6_STATECFGINIT    12
+#define M_T6_STATECFGINIT    0xfU
+#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT)
+#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT)
+
+#define S_PHY_STATUS    10
+#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS)
+#define F_PHY_STATUS    V_PHY_STATUS(1U)
+
+#define S_SPEED_PL    8
+#define M_SPEED_PL    0x3U
+#define V_SPEED_PL(x) ((x) << S_SPEED_PL)
+#define G_SPEED_PL(x) (((x) >> S_SPEED_PL) & M_SPEED_PL)
+
+#define S_PERSTTIMEOUT_PL    7
+#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL)
+#define F_PERSTTIMEOUT_PL    V_PERSTTIMEOUT_PL(1U)
+
+#define S_T6_LTSSMENABLE    6
+#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE)
+#define F_T6_LTSSMENABLE    V_T6_LTSSMENABLE(1U)
+
+#define A_PL_PCIE_CTL_STAT 0x19444
+
+#define S_PCIE_STATUS    16
+#define M_PCIE_STATUS    0xffffU
+#define V_PCIE_STATUS(x) ((x) << S_PCIE_STATUS)
+#define G_PCIE_STATUS(x) (((x) >> S_PCIE_STATUS) & M_PCIE_STATUS)
+
+#define S_PCIE_CONTROL    0
+#define M_PCIE_CONTROL    0xffffU
+#define V_PCIE_CONTROL(x) ((x) << S_PCIE_CONTROL)
+#define G_PCIE_CONTROL(x) (((x) >> S_PCIE_CONTROL) & M_PCIE_CONTROL)
+
 #define A_PL_SEMAPHORE_CTL 0x1944c
 
 #define S_LOCKSTATUS    16
@@ -18513,6 +38418,10 @@
 #define V_PL_TIMEOUT(x) ((x) << S_PL_TIMEOUT)
 #define G_PL_TIMEOUT(x) (((x) >> S_PL_TIMEOUT) & M_PL_TIMEOUT)
 
+#define S_PERRCAPTURE    16
+#define V_PERRCAPTURE(x) ((x) << S_PERRCAPTURE)
+#define F_PERRCAPTURE    V_PERRCAPTURE(1U)
+
 #define A_PL_TIMEOUT_STATUS0 0x194f4
 
 #define S_PL_TOADDR    2
@@ -18549,6 +38458,20 @@
 #define V_PL_TORID(x) ((x) << S_PL_TORID)
 #define G_PL_TORID(x) (((x) >> S_PL_TORID) & M_PL_TORID)
 
+#define S_VALIDPERR    30
+#define V_VALIDPERR(x) ((x) << S_VALIDPERR)
+#define F_VALIDPERR    V_VALIDPERR(1U)
+
+#define S_PL_TOVFID    0
+#define M_PL_TOVFID    0xffU
+#define V_PL_TOVFID(x) ((x) << S_PL_TOVFID)
+#define G_PL_TOVFID(x) (((x) >> S_PL_TOVFID) & M_PL_TOVFID)
+
+#define S_T6_PL_TOVFID    0
+#define M_T6_PL_TOVFID    0x1ffU
+#define V_T6_PL_TOVFID(x) ((x) << S_T6_PL_TOVFID)
+#define G_T6_PL_TOVFID(x) (((x) >> S_T6_PL_TOVFID) & M_T6_PL_TOVFID)
+
 #define A_PL_VFID_MAP 0x19800
 
 #define S_VFID_VLD    7
@@ -18559,6 +38482,7 @@
 #define LE_BASE_ADDR 0x19c00
 
 #define A_LE_BUF_CONFIG 0x19c00
+#define A_LE_DB_ID 0x19c00
 #define A_LE_DB_CONFIG 0x19c04
 
 #define S_TCAMCMDOVLAPEN    21
@@ -18614,6 +38538,103 @@
 #define V_CMDOVERLAPDIS(x) ((x) << S_CMDOVERLAPDIS)
 #define F_CMDOVERLAPDIS    V_CMDOVERLAPDIS(1U)
 
+#define S_MASKCMDOLAPDIS    26
+#define V_MASKCMDOLAPDIS(x) ((x) << S_MASKCMDOLAPDIS)
+#define F_MASKCMDOLAPDIS    V_MASKCMDOLAPDIS(1U)
+
+#define S_IPV4HASHSIZEEN    25
+#define V_IPV4HASHSIZEEN(x) ((x) << S_IPV4HASHSIZEEN)
+#define F_IPV4HASHSIZEEN    V_IPV4HASHSIZEEN(1U)
+
+#define S_PROTOCOLMASKEN    24
+#define V_PROTOCOLMASKEN(x) ((x) << S_PROTOCOLMASKEN)
+#define F_PROTOCOLMASKEN    V_PROTOCOLMASKEN(1U)
+
+#define S_TUPLESIZEEN    23
+#define V_TUPLESIZEEN(x) ((x) << S_TUPLESIZEEN)
+#define F_TUPLESIZEEN    V_TUPLESIZEEN(1U)
+
+#define S_SRVRSRAMEN    22
+#define V_SRVRSRAMEN(x) ((x) << S_SRVRSRAMEN)
+#define F_SRVRSRAMEN    V_SRVRSRAMEN(1U)
+
+#define S_ASBOTHSRCHENPR    19
+#define V_ASBOTHSRCHENPR(x) ((x) << S_ASBOTHSRCHENPR)
+#define F_ASBOTHSRCHENPR    V_ASBOTHSRCHENPR(1U)
+
+#define S_POCLIPTID0    15
+#define V_POCLIPTID0(x) ((x) << S_POCLIPTID0)
+#define F_POCLIPTID0    V_POCLIPTID0(1U)
+
+#define S_TCAMARBOFF    14
+#define V_TCAMARBOFF(x) ((x) << S_TCAMARBOFF)
+#define F_TCAMARBOFF    V_TCAMARBOFF(1U)
+
+#define S_ACCNTFULLEN    13
+#define V_ACCNTFULLEN(x) ((x) << S_ACCNTFULLEN)
+#define F_ACCNTFULLEN    V_ACCNTFULLEN(1U)
+
+#define S_FILTERRWNOCLIP    12
+#define V_FILTERRWNOCLIP(x) ((x) << S_FILTERRWNOCLIP)
+#define F_FILTERRWNOCLIP    V_FILTERRWNOCLIP(1U)
+
+#define S_CRCHASH    10
+#define V_CRCHASH(x) ((x) << S_CRCHASH)
+#define F_CRCHASH    V_CRCHASH(1U)
+
+#define S_COMPTID    9
+#define V_COMPTID(x) ((x) << S_COMPTID)
+#define F_COMPTID    V_COMPTID(1U)
+
+#define S_SINGLETHREAD    6
+#define V_SINGLETHREAD(x) ((x) << S_SINGLETHREAD)
+#define F_SINGLETHREAD    V_SINGLETHREAD(1U)
+
+#define S_CHK_FUL_TUP_ZERO    27
+#define V_CHK_FUL_TUP_ZERO(x) ((x) << S_CHK_FUL_TUP_ZERO)
+#define F_CHK_FUL_TUP_ZERO    V_CHK_FUL_TUP_ZERO(1U)
+
+#define S_PRI_HASH    26
+#define V_PRI_HASH(x) ((x) << S_PRI_HASH)
+#define F_PRI_HASH    V_PRI_HASH(1U)
+
+#define S_EXTN_HASH_IPV4    25
+#define V_EXTN_HASH_IPV4(x) ((x) << S_EXTN_HASH_IPV4)
+#define F_EXTN_HASH_IPV4    V_EXTN_HASH_IPV4(1U)
+
+#define S_ASLIPCOMPEN_IPV4    18
+#define V_ASLIPCOMPEN_IPV4(x) ((x) << S_ASLIPCOMPEN_IPV4)
+#define F_ASLIPCOMPEN_IPV4    V_ASLIPCOMPEN_IPV4(1U)
+
+#define S_IGNR_TUP_ZERO    9
+#define V_IGNR_TUP_ZERO(x) ((x) << S_IGNR_TUP_ZERO)
+#define F_IGNR_TUP_ZERO    V_IGNR_TUP_ZERO(1U)
+
+#define S_IGNR_LIP_ZERO    8
+#define V_IGNR_LIP_ZERO(x) ((x) << S_IGNR_LIP_ZERO)
+#define F_IGNR_LIP_ZERO    V_IGNR_LIP_ZERO(1U)
+
+#define S_CLCAM_INIT_BUSY    7
+#define V_CLCAM_INIT_BUSY(x) ((x) << S_CLCAM_INIT_BUSY)
+#define F_CLCAM_INIT_BUSY    V_CLCAM_INIT_BUSY(1U)
+
+#define S_CLCAM_INIT    6
+#define V_CLCAM_INIT(x) ((x) << S_CLCAM_INIT)
+#define F_CLCAM_INIT    V_CLCAM_INIT(1U)
+
+#define S_MTCAM_INIT_BUSY    5
+#define V_MTCAM_INIT_BUSY(x) ((x) << S_MTCAM_INIT_BUSY)
+#define F_MTCAM_INIT_BUSY    V_MTCAM_INIT_BUSY(1U)
+
+#define S_MTCAM_INIT    4
+#define V_MTCAM_INIT(x) ((x) << S_MTCAM_INIT)
+#define F_MTCAM_INIT    V_MTCAM_INIT(1U)
+
+#define S_REGION_EN    0
+#define M_REGION_EN    0xfU
+#define V_REGION_EN(x) ((x) << S_REGION_EN)
+#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN)
+
 #define A_LE_MISC 0x19c08
 
 #define S_CMPUNVAIL    0
@@ -18621,6 +38642,75 @@
 #define V_CMPUNVAIL(x) ((x) << S_CMPUNVAIL)
 #define G_CMPUNVAIL(x) (((x) >> S_CMPUNVAIL) & M_CMPUNVAIL)
 
+#define S_SRAMDEEPSLEEP_STAT    11
+#define V_SRAMDEEPSLEEP_STAT(x) ((x) << S_SRAMDEEPSLEEP_STAT)
+#define F_SRAMDEEPSLEEP_STAT    V_SRAMDEEPSLEEP_STAT(1U)
+
+#define S_TCAMDEEPSLEEP1_STAT    10
+#define V_TCAMDEEPSLEEP1_STAT(x) ((x) << S_TCAMDEEPSLEEP1_STAT)
+#define F_TCAMDEEPSLEEP1_STAT    V_TCAMDEEPSLEEP1_STAT(1U)
+
+#define S_TCAMDEEPSLEEP0_STAT    9
+#define V_TCAMDEEPSLEEP0_STAT(x) ((x) << S_TCAMDEEPSLEEP0_STAT)
+#define F_TCAMDEEPSLEEP0_STAT    V_TCAMDEEPSLEEP0_STAT(1U)
+
+#define S_SRAMDEEPSLEEP    8
+#define V_SRAMDEEPSLEEP(x) ((x) << S_SRAMDEEPSLEEP)
+#define F_SRAMDEEPSLEEP    V_SRAMDEEPSLEEP(1U)
+
+#define S_TCAMDEEPSLEEP1    7
+#define V_TCAMDEEPSLEEP1(x) ((x) << S_TCAMDEEPSLEEP1)
+#define F_TCAMDEEPSLEEP1    V_TCAMDEEPSLEEP1(1U)
+
+#define S_TCAMDEEPSLEEP0    6
+#define V_TCAMDEEPSLEEP0(x) ((x) << S_TCAMDEEPSLEEP0)
+#define F_TCAMDEEPSLEEP0    V_TCAMDEEPSLEEP0(1U)
+
+#define S_SRVRAMCLKOFF    5
+#define V_SRVRAMCLKOFF(x) ((x) << S_SRVRAMCLKOFF)
+#define F_SRVRAMCLKOFF    V_SRVRAMCLKOFF(1U)
+
+#define S_HASHCLKOFF    4
+#define V_HASHCLKOFF(x) ((x) << S_HASHCLKOFF)
+#define F_HASHCLKOFF    V_HASHCLKOFF(1U)
+
+#define A_LE_DB_EXEC_CTRL 0x19c08
+
+#define S_TPDB_IF_PAUSE_ACK    10
+#define V_TPDB_IF_PAUSE_ACK(x) ((x) << S_TPDB_IF_PAUSE_ACK)
+#define F_TPDB_IF_PAUSE_ACK    V_TPDB_IF_PAUSE_ACK(1U)
+
+#define S_TPDB_IF_PAUSE_REQ    9
+#define V_TPDB_IF_PAUSE_REQ(x) ((x) << S_TPDB_IF_PAUSE_REQ)
+#define F_TPDB_IF_PAUSE_REQ    V_TPDB_IF_PAUSE_REQ(1U)
+
+#define S_ERRSTOP_EN    8
+#define V_ERRSTOP_EN(x) ((x) << S_ERRSTOP_EN)
+#define F_ERRSTOP_EN    V_ERRSTOP_EN(1U)
+
+#define S_CMDLIMIT    0
+#define M_CMDLIMIT    0xffU
+#define V_CMDLIMIT(x) ((x) << S_CMDLIMIT)
+#define G_CMDLIMIT(x) (((x) >> S_CMDLIMIT) & M_CMDLIMIT)
+
+#define A_LE_DB_PS_CTRL 0x19c0c
+
+#define S_CLTCAMDEEPSLEEP_STAT    10
+#define V_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_CLTCAMDEEPSLEEP_STAT)
+#define F_CLTCAMDEEPSLEEP_STAT    V_CLTCAMDEEPSLEEP_STAT(1U)
+
+#define S_TCAMDEEPSLEEP_STAT    9
+#define V_TCAMDEEPSLEEP_STAT(x) ((x) << S_TCAMDEEPSLEEP_STAT)
+#define F_TCAMDEEPSLEEP_STAT    V_TCAMDEEPSLEEP_STAT(1U)
+
+#define S_CLTCAMDEEPSLEEP    7
+#define V_CLTCAMDEEPSLEEP(x) ((x) << S_CLTCAMDEEPSLEEP)
+#define F_CLTCAMDEEPSLEEP    V_CLTCAMDEEPSLEEP(1U)
+
+#define S_TCAMDEEPSLEEP    6
+#define V_TCAMDEEPSLEEP(x) ((x) << S_TCAMDEEPSLEEP)
+#define F_TCAMDEEPSLEEP    V_TCAMDEEPSLEEP(1U)
+
 #define A_LE_DB_ROUTING_TABLE_INDEX 0x19c10
 
 #define S_RTINDX    7
@@ -18628,6 +38718,13 @@
 #define V_RTINDX(x) ((x) << S_RTINDX)
 #define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX)
 
+#define A_LE_DB_ACTIVE_TABLE_START_INDEX 0x19c10
+
+#define S_ATINDX    0
+#define M_ATINDX    0xfffffU
+#define V_ATINDX(x) ((x) << S_ATINDX)
+#define G_ATINDX(x) (((x) >> S_ATINDX) & M_ATINDX)
+
 #define A_LE_DB_FILTER_TABLE_INDEX 0x19c14
 
 #define S_FTINDX    7
@@ -18635,6 +38732,13 @@
 #define V_FTINDX(x) ((x) << S_FTINDX)
 #define G_FTINDX(x) (((x) >> S_FTINDX) & M_FTINDX)
 
+#define A_LE_DB_NORM_FILT_TABLE_START_INDEX 0x19c14
+
+#define S_NFTINDX    0
+#define M_NFTINDX    0xfffffU
+#define V_NFTINDX(x) ((x) << S_NFTINDX)
+#define G_NFTINDX(x) (((x) >> S_NFTINDX) & M_NFTINDX)
+
 #define A_LE_DB_SERVER_INDEX 0x19c18
 
 #define S_SRINDX    7
@@ -18642,6 +38746,13 @@
 #define V_SRINDX(x) ((x) << S_SRINDX)
 #define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX)
 
+#define A_LE_DB_SRVR_START_INDEX 0x19c18
+
+#define S_T6_SRINDX    0
+#define M_T6_SRINDX    0xfffffU
+#define V_T6_SRINDX(x) ((x) << S_T6_SRINDX)
+#define G_T6_SRINDX(x) (((x) >> S_T6_SRINDX) & M_T6_SRINDX)
+
 #define A_LE_DB_CLIP_TABLE_INDEX 0x19c1c
 
 #define S_CLIPTINDX    7
@@ -18649,6 +38760,13 @@
 #define V_CLIPTINDX(x) ((x) << S_CLIPTINDX)
 #define G_CLIPTINDX(x) (((x) >> S_CLIPTINDX) & M_CLIPTINDX)
 
+#define A_LE_DB_HPRI_FILT_TABLE_START_INDEX 0x19c1c
+
+#define S_HFTINDX    0
+#define M_HFTINDX    0xfffffU
+#define V_HFTINDX(x) ((x) << S_HFTINDX)
+#define G_HFTINDX(x) (((x) >> S_HFTINDX) & M_HFTINDX)
+
 #define A_LE_DB_ACT_CNT_IPV4 0x19c20
 
 #define S_ACTCNTIPV4    0
@@ -18675,9 +38793,40 @@
 #define V_HASHSIZE(x) ((x) << S_HASHSIZE)
 #define G_HASHSIZE(x) (((x) >> S_HASHSIZE) & M_HASHSIZE)
 
+#define S_NUMHASHBKT    20
+#define M_NUMHASHBKT    0x1fU
+#define V_NUMHASHBKT(x) ((x) << S_NUMHASHBKT)
+#define G_NUMHASHBKT(x) (((x) >> S_NUMHASHBKT) & M_NUMHASHBKT)
+
+#define S_HASHTBLSIZE    3
+#define M_HASHTBLSIZE    0x1ffffU
+#define V_HASHTBLSIZE(x) ((x) << S_HASHTBLSIZE)
+#define G_HASHTBLSIZE(x) (((x) >> S_HASHTBLSIZE) & M_HASHTBLSIZE)
+
 #define A_LE_DB_HASH_TABLE_BASE 0x19c2c
+#define A_LE_DB_MIN_NUM_ACTV_TCAM_ENTRIES 0x19c2c
+
+#define S_MIN_ATCAM_ENTS    0
+#define M_MIN_ATCAM_ENTS    0xfffffU
+#define V_MIN_ATCAM_ENTS(x) ((x) << S_MIN_ATCAM_ENTS)
+#define G_MIN_ATCAM_ENTS(x) (((x) >> S_MIN_ATCAM_ENTS) & M_MIN_ATCAM_ENTS)
+
 #define A_LE_DB_HASH_TID_BASE 0x19c30
+#define A_LE_DB_HASH_TBL_BASE_ADDR 0x19c30
+
+#define S_HASHTBLADDR    4
+#define M_HASHTBLADDR    0xfffffffU
+#define V_HASHTBLADDR(x) ((x) << S_HASHTBLADDR)
+#define G_HASHTBLADDR(x) (((x) >> S_HASHTBLADDR) & M_HASHTBLADDR)
+
 #define A_LE_DB_SIZE 0x19c34
+#define A_LE_TCAM_SIZE 0x19c34
+
+#define S_TCAM_SIZE    0
+#define M_TCAM_SIZE    0x3U
+#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE)
+#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE)
+
 #define A_LE_DB_INT_ENABLE 0x19c38
 
 #define S_MSGSEL    27
@@ -18749,7 +38898,184 @@
 #define V_SERVERHIT(x) ((x) << S_SERVERHIT)
 #define F_SERVERHIT    V_SERVERHIT(1U)
 
+#define S_ACTCNTIPV6TZERO    21
+#define V_ACTCNTIPV6TZERO(x) ((x) << S_ACTCNTIPV6TZERO)
+#define F_ACTCNTIPV6TZERO    V_ACTCNTIPV6TZERO(1U)
+
+#define S_ACTCNTIPV4TZERO    20
+#define V_ACTCNTIPV4TZERO(x) ((x) << S_ACTCNTIPV4TZERO)
+#define F_ACTCNTIPV4TZERO    V_ACTCNTIPV4TZERO(1U)
+
+#define S_ACTCNTIPV6ZERO    19
+#define V_ACTCNTIPV6ZERO(x) ((x) << S_ACTCNTIPV6ZERO)
+#define F_ACTCNTIPV6ZERO    V_ACTCNTIPV6ZERO(1U)
+
+#define S_ACTCNTIPV4ZERO    18
+#define V_ACTCNTIPV4ZERO(x) ((x) << S_ACTCNTIPV4ZERO)
+#define F_ACTCNTIPV4ZERO    V_ACTCNTIPV4ZERO(1U)
+
+#define S_MARSPPARERR    17
+#define V_MARSPPARERR(x) ((x) << S_MARSPPARERR)
+#define F_MARSPPARERR    V_MARSPPARERR(1U)
+
+#define S_VFPARERR    14
+#define V_VFPARERR(x) ((x) << S_VFPARERR)
+#define F_VFPARERR    V_VFPARERR(1U)
+
+#define S_CLIPSUBERR    29
+#define V_CLIPSUBERR(x) ((x) << S_CLIPSUBERR)
+#define F_CLIPSUBERR    V_CLIPSUBERR(1U)
+
+#define S_CLCAMFIFOERR    28
+#define V_CLCAMFIFOERR(x) ((x) << S_CLCAMFIFOERR)
+#define F_CLCAMFIFOERR    V_CLCAMFIFOERR(1U)
+
+#define S_HASHTBLMEMCRCERR    27
+#define V_HASHTBLMEMCRCERR(x) ((x) << S_HASHTBLMEMCRCERR)
+#define F_HASHTBLMEMCRCERR    V_HASHTBLMEMCRCERR(1U)
+
+#define S_CTCAMINVLDENT    26
+#define V_CTCAMINVLDENT(x) ((x) << S_CTCAMINVLDENT)
+#define F_CTCAMINVLDENT    V_CTCAMINVLDENT(1U)
+
+#define S_TCAMINVLDENT    25
+#define V_TCAMINVLDENT(x) ((x) << S_TCAMINVLDENT)
+#define F_TCAMINVLDENT    V_TCAMINVLDENT(1U)
+
+#define S_TOTCNTERR    24
+#define V_TOTCNTERR(x) ((x) << S_TOTCNTERR)
+#define F_TOTCNTERR    V_TOTCNTERR(1U)
+
+#define S_CMDPRSRINTERR    23
+#define V_CMDPRSRINTERR(x) ((x) << S_CMDPRSRINTERR)
+#define F_CMDPRSRINTERR    V_CMDPRSRINTERR(1U)
+
+#define S_CMDTIDERR    22
+#define V_CMDTIDERR(x) ((x) << S_CMDTIDERR)
+#define F_CMDTIDERR    V_CMDTIDERR(1U)
+
+#define S_T6_ACTRGNFULL    21
+#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
+#define F_T6_ACTRGNFULL    V_T6_ACTRGNFULL(1U)
+
+#define S_T6_ACTCNTIPV6TZERO    20
+#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
+#define F_T6_ACTCNTIPV6TZERO    V_T6_ACTCNTIPV6TZERO(1U)
+
+#define S_T6_ACTCNTIPV4TZERO    19
+#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
+#define F_T6_ACTCNTIPV4TZERO    V_T6_ACTCNTIPV4TZERO(1U)
+
+#define S_T6_ACTCNTIPV6ZERO    18
+#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
+#define F_T6_ACTCNTIPV6ZERO    V_T6_ACTCNTIPV6ZERO(1U)
+
+#define S_T6_ACTCNTIPV4ZERO    17
+#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
+#define F_T6_ACTCNTIPV4ZERO    V_T6_ACTCNTIPV4ZERO(1U)
+
+#define S_MAIFWRINTPERR    16
+#define V_MAIFWRINTPERR(x) ((x) << S_MAIFWRINTPERR)
+#define F_MAIFWRINTPERR    V_MAIFWRINTPERR(1U)
+
+#define S_HASHTBLMEMACCERR    15
+#define V_HASHTBLMEMACCERR(x) ((x) << S_HASHTBLMEMACCERR)
+#define F_HASHTBLMEMACCERR    V_HASHTBLMEMACCERR(1U)
+
+#define S_TCAMCRCERR    14
+#define V_TCAMCRCERR(x) ((x) << S_TCAMCRCERR)
+#define F_TCAMCRCERR    V_TCAMCRCERR(1U)
+
+#define S_TCAMINTPERR    13
+#define V_TCAMINTPERR(x) ((x) << S_TCAMINTPERR)
+#define F_TCAMINTPERR    V_TCAMINTPERR(1U)
+
+#define S_VFSRAMPERR    12
+#define V_VFSRAMPERR(x) ((x) << S_VFSRAMPERR)
+#define F_VFSRAMPERR    V_VFSRAMPERR(1U)
+
+#define S_SRVSRAMPERR    11
+#define V_SRVSRAMPERR(x) ((x) << S_SRVSRAMPERR)
+#define F_SRVSRAMPERR    V_SRVSRAMPERR(1U)
+
+#define S_SSRAMINTPERR    10
+#define V_SSRAMINTPERR(x) ((x) << S_SSRAMINTPERR)
+#define F_SSRAMINTPERR    V_SSRAMINTPERR(1U)
+
+#define S_CLCAMINTPERR    9
+#define V_CLCAMINTPERR(x) ((x) << S_CLCAMINTPERR)
+#define F_CLCAMINTPERR    V_CLCAMINTPERR(1U)
+
+#define S_CLCAMCRCPARERR    8
+#define V_CLCAMCRCPARERR(x) ((x) << S_CLCAMCRCPARERR)
+#define F_CLCAMCRCPARERR    V_CLCAMCRCPARERR(1U)
+
+#define S_HASHTBLACCFAIL    7
+#define V_HASHTBLACCFAIL(x) ((x) << S_HASHTBLACCFAIL)
+#define F_HASHTBLACCFAIL    V_HASHTBLACCFAIL(1U)
+
+#define S_TCAMACCFAIL    6
+#define V_TCAMACCFAIL(x) ((x) << S_TCAMACCFAIL)
+#define F_TCAMACCFAIL    V_TCAMACCFAIL(1U)
+
+#define S_SRVSRAMACCFAIL    5
+#define V_SRVSRAMACCFAIL(x) ((x) << S_SRVSRAMACCFAIL)
+#define F_SRVSRAMACCFAIL    V_SRVSRAMACCFAIL(1U)
+
+#define S_CLIPTCAMACCFAIL    4
+#define V_CLIPTCAMACCFAIL(x) ((x) << S_CLIPTCAMACCFAIL)
+#define F_CLIPTCAMACCFAIL    V_CLIPTCAMACCFAIL(1U)
+
+#define S_T6_UNKNOWNCMD    3
+#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
+#define F_T6_UNKNOWNCMD    V_T6_UNKNOWNCMD(1U)
+
+#define S_T6_LIP0    2
+#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
+#define F_T6_LIP0    V_T6_LIP0(1U)
+
+#define S_T6_LIPMISS    1
+#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
+#define F_T6_LIPMISS    V_T6_LIPMISS(1U)
+
+#define S_PIPELINEERR    0
+#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR)
+#define F_PIPELINEERR    V_PIPELINEERR(1U)
+
 #define A_LE_DB_INT_CAUSE 0x19c3c
+
+#define S_T6_ACTRGNFULL    21
+#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
+#define F_T6_ACTRGNFULL    V_T6_ACTRGNFULL(1U)
+
+#define S_T6_ACTCNTIPV6TZERO    20
+#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
+#define F_T6_ACTCNTIPV6TZERO    V_T6_ACTCNTIPV6TZERO(1U)
+
+#define S_T6_ACTCNTIPV4TZERO    19
+#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
+#define F_T6_ACTCNTIPV4TZERO    V_T6_ACTCNTIPV4TZERO(1U)
+
+#define S_T6_ACTCNTIPV6ZERO    18
+#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
+#define F_T6_ACTCNTIPV6ZERO    V_T6_ACTCNTIPV6ZERO(1U)
+
+#define S_T6_ACTCNTIPV4ZERO    17
+#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
+#define F_T6_ACTCNTIPV4ZERO    V_T6_ACTCNTIPV4ZERO(1U)
+
+#define S_T6_UNKNOWNCMD    3
+#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
+#define F_T6_UNKNOWNCMD    V_T6_UNKNOWNCMD(1U)
+
+#define S_T6_LIP0    2
+#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
+#define F_T6_LIP0    V_T6_LIP0(1U)
+
+#define S_T6_LIPMISS    1
+#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
+#define F_T6_LIPMISS    V_T6_LIPMISS(1U)
+
 #define A_LE_DB_INT_TID 0x19c40
 
 #define S_INTTID    0
@@ -18757,6 +39083,18 @@
 #define V_INTTID(x) ((x) << S_INTTID)
 #define G_INTTID(x) (((x) >> S_INTTID) & M_INTTID)
 
+#define A_LE_DB_DBG_MATCH_CMD_IDX_MASK 0x19c40
+
+#define S_CMD_CMP_MASK    20
+#define M_CMD_CMP_MASK    0x1fU
+#define V_CMD_CMP_MASK(x) ((x) << S_CMD_CMP_MASK)
+#define G_CMD_CMP_MASK(x) (((x) >> S_CMD_CMP_MASK) & M_CMD_CMP_MASK)
+
+#define S_TID_CMP_MASK    0
+#define M_TID_CMP_MASK    0xfffffU
+#define V_TID_CMP_MASK(x) ((x) << S_TID_CMP_MASK)
+#define G_TID_CMP_MASK(x) (((x) >> S_TID_CMP_MASK) & M_TID_CMP_MASK)
+
 #define A_LE_DB_INT_PTID 0x19c44
 
 #define S_INTPTID    0
@@ -18764,6 +39102,18 @@
 #define V_INTPTID(x) ((x) << S_INTPTID)
 #define G_INTPTID(x) (((x) >> S_INTPTID) & M_INTPTID)
 
+#define A_LE_DB_DBG_MATCH_CMD_IDX_DATA 0x19c44
+
+#define S_CMD_CMP    20
+#define M_CMD_CMP    0x1fU
+#define V_CMD_CMP(x) ((x) << S_CMD_CMP)
+#define G_CMD_CMP(x) (((x) >> S_CMD_CMP) & M_CMD_CMP)
+
+#define S_TID_CMP    0
+#define M_TID_CMP    0xfffffU
+#define V_TID_CMP(x) ((x) << S_TID_CMP)
+#define G_TID_CMP(x) (((x) >> S_TID_CMP) & M_TID_CMP)
+
 #define A_LE_DB_INT_INDEX 0x19c48
 
 #define S_INTINDEX    0
@@ -18771,6 +39121,23 @@
 #define V_INTINDEX(x) ((x) << S_INTINDEX)
 #define G_INTINDEX(x) (((x) >> S_INTINDEX) & M_INTINDEX)
 
+#define A_LE_DB_ERR_CMD_TID 0x19c48
+
+#define S_ERR_CID    22
+#define M_ERR_CID    0xffU
+#define V_ERR_CID(x) ((x) << S_ERR_CID)
+#define G_ERR_CID(x) (((x) >> S_ERR_CID) & M_ERR_CID)
+
+#define S_ERR_PROT    20
+#define M_ERR_PROT    0x3U
+#define V_ERR_PROT(x) ((x) << S_ERR_PROT)
+#define G_ERR_PROT(x) (((x) >> S_ERR_PROT) & M_ERR_PROT)
+
+#define S_ERR_TID    0
+#define M_ERR_TID    0xfffffU
+#define V_ERR_TID(x) ((x) << S_ERR_TID)
+#define G_ERR_TID(x) (((x) >> S_ERR_TID) & M_ERR_TID)
+
 #define A_LE_DB_INT_CMD 0x19c4c
 
 #define S_INTCMD    0
@@ -18779,8 +39146,166 @@
 #define G_INTCMD(x) (((x) >> S_INTCMD) & M_INTCMD)
 
 #define A_LE_DB_MASK_IPV4 0x19c50
+#define A_LE_T5_DB_MASK_IPV4 0x19c50
+#define A_LE_DB_DBG_MATCH_DATA_MASK 0x19c50
+#define A_LE_DB_MAX_NUM_HASH_ENTRIES 0x19c70
+
+#define S_MAX_HASH_ENTS    0
+#define M_MAX_HASH_ENTS    0xfffffU
+#define V_MAX_HASH_ENTS(x) ((x) << S_MAX_HASH_ENTS)
+#define G_MAX_HASH_ENTS(x) (((x) >> S_MAX_HASH_ENTS) & M_MAX_HASH_ENTS)
+
+#define A_LE_DB_RSP_CODE_0 0x19c74
+
+#define S_SUCCESS    25
+#define M_SUCCESS    0x1fU
+#define V_SUCCESS(x) ((x) << S_SUCCESS)
+#define G_SUCCESS(x) (((x) >> S_SUCCESS) & M_SUCCESS)
+
+#define S_TCAM_ACTV_SUCC    20
+#define M_TCAM_ACTV_SUCC    0x1fU
+#define V_TCAM_ACTV_SUCC(x) ((x) << S_TCAM_ACTV_SUCC)
+#define G_TCAM_ACTV_SUCC(x) (((x) >> S_TCAM_ACTV_SUCC) & M_TCAM_ACTV_SUCC)
+
+#define S_HASH_ACTV_SUCC    15
+#define M_HASH_ACTV_SUCC    0x1fU
+#define V_HASH_ACTV_SUCC(x) ((x) << S_HASH_ACTV_SUCC)
+#define G_HASH_ACTV_SUCC(x) (((x) >> S_HASH_ACTV_SUCC) & M_HASH_ACTV_SUCC)
+
+#define S_TCAM_SRVR_HIT    10
+#define M_TCAM_SRVR_HIT    0x1fU
+#define V_TCAM_SRVR_HIT(x) ((x) << S_TCAM_SRVR_HIT)
+#define G_TCAM_SRVR_HIT(x) (((x) >> S_TCAM_SRVR_HIT) & M_TCAM_SRVR_HIT)
+
+#define S_SRAM_SRVR_HIT    5
+#define M_SRAM_SRVR_HIT    0x1fU
+#define V_SRAM_SRVR_HIT(x) ((x) << S_SRAM_SRVR_HIT)
+#define G_SRAM_SRVR_HIT(x) (((x) >> S_SRAM_SRVR_HIT) & M_SRAM_SRVR_HIT)
+
+#define S_TCAM_ACTV_HIT    0
+#define M_TCAM_ACTV_HIT    0x1fU
+#define V_TCAM_ACTV_HIT(x) ((x) << S_TCAM_ACTV_HIT)
+#define G_TCAM_ACTV_HIT(x) (((x) >> S_TCAM_ACTV_HIT) & M_TCAM_ACTV_HIT)
+
+#define A_LE_DB_RSP_CODE_1 0x19c78
+
+#define S_HASH_ACTV_HIT    25
+#define M_HASH_ACTV_HIT    0x1fU
+#define V_HASH_ACTV_HIT(x) ((x) << S_HASH_ACTV_HIT)
+#define G_HASH_ACTV_HIT(x) (((x) >> S_HASH_ACTV_HIT) & M_HASH_ACTV_HIT)
+
+#define S_T6_MISS    20
+#define M_T6_MISS    0x1fU
+#define V_T6_MISS(x) ((x) << S_T6_MISS)
+#define G_T6_MISS(x) (((x) >> S_T6_MISS) & M_T6_MISS)
+
+#define S_NORM_FILT_HIT    15
+#define M_NORM_FILT_HIT    0x1fU
+#define V_NORM_FILT_HIT(x) ((x) << S_NORM_FILT_HIT)
+#define G_NORM_FILT_HIT(x) (((x) >> S_NORM_FILT_HIT) & M_NORM_FILT_HIT)
+
+#define S_HPRI_FILT_HIT    10
+#define M_HPRI_FILT_HIT    0x1fU
+#define V_HPRI_FILT_HIT(x) ((x) << S_HPRI_FILT_HIT)
+#define G_HPRI_FILT_HIT(x) (((x) >> S_HPRI_FILT_HIT) & M_HPRI_FILT_HIT)
+
+#define S_ACTV_OPEN_ERR    5
+#define M_ACTV_OPEN_ERR    0x1fU
+#define V_ACTV_OPEN_ERR(x) ((x) << S_ACTV_OPEN_ERR)
+#define G_ACTV_OPEN_ERR(x) (((x) >> S_ACTV_OPEN_ERR) & M_ACTV_OPEN_ERR)
+
+#define S_ACTV_FULL_ERR    0
+#define M_ACTV_FULL_ERR    0x1fU
+#define V_ACTV_FULL_ERR(x) ((x) << S_ACTV_FULL_ERR)
+#define G_ACTV_FULL_ERR(x) (((x) >> S_ACTV_FULL_ERR) & M_ACTV_FULL_ERR)
+
+#define A_LE_DB_RSP_CODE_2 0x19c7c
+
+#define S_SRCH_RGN_HIT    25
+#define M_SRCH_RGN_HIT    0x1fU
+#define V_SRCH_RGN_HIT(x) ((x) << S_SRCH_RGN_HIT)
+#define G_SRCH_RGN_HIT(x) (((x) >> S_SRCH_RGN_HIT) & M_SRCH_RGN_HIT)
+
+#define S_CLIP_FAIL    20
+#define M_CLIP_FAIL    0x1fU
+#define V_CLIP_FAIL(x) ((x) << S_CLIP_FAIL)
+#define G_CLIP_FAIL(x) (((x) >> S_CLIP_FAIL) & M_CLIP_FAIL)
+
+#define S_LIP_ZERO_ERR    15
+#define M_LIP_ZERO_ERR    0x1fU
+#define V_LIP_ZERO_ERR(x) ((x) << S_LIP_ZERO_ERR)
+#define G_LIP_ZERO_ERR(x) (((x) >> S_LIP_ZERO_ERR) & M_LIP_ZERO_ERR)
+
+#define S_UNKNOWN_CMD    10
+#define M_UNKNOWN_CMD    0x1fU
+#define V_UNKNOWN_CMD(x) ((x) << S_UNKNOWN_CMD)
+#define G_UNKNOWN_CMD(x) (((x) >> S_UNKNOWN_CMD) & M_UNKNOWN_CMD)
+
+#define S_CMD_TID_ERR    5
+#define M_CMD_TID_ERR    0x1fU
+#define V_CMD_TID_ERR(x) ((x) << S_CMD_TID_ERR)
+#define G_CMD_TID_ERR(x) (((x) >> S_CMD_TID_ERR) & M_CMD_TID_ERR)
+
+#define S_INTERNAL_ERR    0
+#define M_INTERNAL_ERR    0x1fU
+#define V_INTERNAL_ERR(x) ((x) << S_INTERNAL_ERR)
+#define G_INTERNAL_ERR(x) (((x) >> S_INTERNAL_ERR) & M_INTERNAL_ERR)
+
+#define A_LE_DB_RSP_CODE_3 0x19c80
+
+#define S_SRAM_SRVR_HIT_ACTF    25
+#define M_SRAM_SRVR_HIT_ACTF    0x1fU
+#define V_SRAM_SRVR_HIT_ACTF(x) ((x) << S_SRAM_SRVR_HIT_ACTF)
+#define G_SRAM_SRVR_HIT_ACTF(x) (((x) >> S_SRAM_SRVR_HIT_ACTF) & M_SRAM_SRVR_HIT_ACTF)
+
+#define S_TCAM_SRVR_HIT_ACTF    20
+#define M_TCAM_SRVR_HIT_ACTF    0x1fU
+#define V_TCAM_SRVR_HIT_ACTF(x) ((x) << S_TCAM_SRVR_HIT_ACTF)
+#define G_TCAM_SRVR_HIT_ACTF(x) (((x) >> S_TCAM_SRVR_HIT_ACTF) & M_TCAM_SRVR_HIT_ACTF)
+
+#define S_INVLDRD    15
+#define M_INVLDRD    0x1fU
+#define V_INVLDRD(x) ((x) << S_INVLDRD)
+#define G_INVLDRD(x) (((x) >> S_INVLDRD) & M_INVLDRD)
+
+#define S_TUPLZERO    10
+#define M_TUPLZERO    0x1fU
+#define V_TUPLZERO(x) ((x) << S_TUPLZERO)
+#define G_TUPLZERO(x) (((x) >> S_TUPLZERO) & M_TUPLZERO)
+
+#define A_LE_DB_ACT_CNT_IPV4_TCAM 0x19c94
+#define A_LE_DB_ACT_CNT_IPV6_TCAM 0x19c98
+#define A_LE_ACT_CNT_THRSH 0x19c9c
+
+#define S_ACT_CNT_THRSH    0
+#define M_ACT_CNT_THRSH    0x1fffffU
+#define V_ACT_CNT_THRSH(x) ((x) << S_ACT_CNT_THRSH)
+#define G_ACT_CNT_THRSH(x) (((x) >> S_ACT_CNT_THRSH) & M_ACT_CNT_THRSH)
+
 #define A_LE_DB_MASK_IPV6 0x19ca0
+#define A_LE_DB_DBG_MATCH_DATA 0x19ca0
 #define A_LE_DB_REQ_RSP_CNT 0x19ce4
+
+#define S_T4_RSPCNT    16
+#define M_T4_RSPCNT    0xffffU
+#define V_T4_RSPCNT(x) ((x) << S_T4_RSPCNT)
+#define G_T4_RSPCNT(x) (((x) >> S_T4_RSPCNT) & M_T4_RSPCNT)
+
+#define S_T4_REQCNT    0
+#define M_T4_REQCNT    0xffffU
+#define V_T4_REQCNT(x) ((x) << S_T4_REQCNT)
+#define G_T4_REQCNT(x) (((x) >> S_T4_REQCNT) & M_T4_REQCNT)
+
+#define S_RSPCNTLE    16
+#define M_RSPCNTLE    0xffffU
+#define V_RSPCNTLE(x) ((x) << S_RSPCNTLE)
+#define G_RSPCNTLE(x) (((x) >> S_RSPCNTLE) & M_RSPCNTLE)
+
+#define S_REQCNTLE    0
+#define M_REQCNTLE    0xffffU
+#define V_REQCNTLE(x) ((x) << S_REQCNTLE)
+#define G_REQCNTLE(x) (((x) >> S_REQCNTLE) & M_REQCNTLE)
+
 #define A_LE_DB_DBGI_CONFIG 0x19cf0
 
 #define S_DBGICMDPERR    31
@@ -18838,6 +39363,14 @@
 #define V_DBGICMDMODE(x) ((x) << S_DBGICMDMODE)
 #define G_DBGICMDMODE(x) (((x) >> S_DBGICMDMODE) & M_DBGICMDMODE)
 
+#define S_DBGICMDMSKREAD    21
+#define V_DBGICMDMSKREAD(x) ((x) << S_DBGICMDMSKREAD)
+#define F_DBGICMDMSKREAD    V_DBGICMDMSKREAD(1U)
+
+#define S_DBGICMDWRITE    17
+#define V_DBGICMDWRITE(x) ((x) << S_DBGICMDWRITE)
+#define F_DBGICMDWRITE    V_DBGICMDWRITE(1U)
+
 #define A_LE_DB_DBGI_REQ_TCAM_CMD 0x19cf4
 
 #define S_DBGICMD    20
@@ -18850,6 +39383,13 @@
 #define V_DBGITINDEX(x) ((x) << S_DBGITINDEX)
 #define G_DBGITINDEX(x) (((x) >> S_DBGITINDEX) & M_DBGITINDEX)
 
+#define A_LE_DB_DBGI_REQ_CMD 0x19cf4
+
+#define S_DBGITID    0
+#define M_DBGITID    0xfffffU
+#define V_DBGITID(x) ((x) << S_DBGITID)
+#define G_DBGITID(x) (((x) >> S_DBGITID) & M_DBGITID)
+
 #define A_LE_PERR_ENABLE 0x19cf8
 
 #define S_REQQUEUE    1
@@ -18860,6 +39400,39 @@
 #define V_TCAM(x) ((x) << S_TCAM)
 #define F_TCAM    V_TCAM(1U)
 
+#define S_MARSPPARERRLE    17
+#define V_MARSPPARERRLE(x) ((x) << S_MARSPPARERRLE)
+#define F_MARSPPARERRLE    V_MARSPPARERRLE(1U)
+
+#define S_REQQUEUELE    16
+#define V_REQQUEUELE(x) ((x) << S_REQQUEUELE)
+#define F_REQQUEUELE    V_REQQUEUELE(1U)
+
+#define S_VFPARERRLE    14
+#define V_VFPARERRLE(x) ((x) << S_VFPARERRLE)
+#define F_VFPARERRLE    V_VFPARERRLE(1U)
+
+#define S_TCAMLE    6
+#define V_TCAMLE(x) ((x) << S_TCAMLE)
+#define F_TCAMLE    V_TCAMLE(1U)
+
+#define S_BKCHKPERIOD    22
+#define M_BKCHKPERIOD    0x3ffU
+#define V_BKCHKPERIOD(x) ((x) << S_BKCHKPERIOD)
+#define G_BKCHKPERIOD(x) (((x) >> S_BKCHKPERIOD) & M_BKCHKPERIOD)
+
+#define S_TCAMBKCHKEN    21
+#define V_TCAMBKCHKEN(x) ((x) << S_TCAMBKCHKEN)
+#define F_TCAMBKCHKEN    V_TCAMBKCHKEN(1U)
+
+#define S_T6_CLCAMFIFOERR    2
+#define V_T6_CLCAMFIFOERR(x) ((x) << S_T6_CLCAMFIFOERR)
+#define F_T6_CLCAMFIFOERR    V_T6_CLCAMFIFOERR(1U)
+
+#define S_T6_HASHTBLMEMCRCERR    1
+#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR)
+#define F_T6_HASHTBLMEMCRCERR    V_T6_HASHTBLMEMCRCERR(1U)
+
 #define A_LE_SPARE 0x19cfc
 #define A_LE_DB_DBGI_REQ_DATA 0x19d00
 #define A_LE_DB_DBGI_REQ_MASK 0x19d50
@@ -18891,6 +39464,16 @@
 #define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID)
 #define F_DBGIRSPVALID    V_DBGIRSPVALID(1U)
 
+#define S_DBGIRSPTID    12
+#define M_DBGIRSPTID    0xfffffU
+#define V_DBGIRSPTID(x) ((x) << S_DBGIRSPTID)
+#define G_DBGIRSPTID(x) (((x) >> S_DBGIRSPTID) & M_DBGIRSPTID)
+
+#define S_DBGIRSPLEARN    2
+#define V_DBGIRSPLEARN(x) ((x) << S_DBGIRSPLEARN)
+#define F_DBGIRSPLEARN    V_DBGIRSPLEARN(1U)
+
+#define A_LE_DBG_SEL 0x19d98
 #define A_LE_DB_DBGI_RSP_DATA 0x19da0
 #define A_LE_DB_DBGI_RSP_LAST_CMD 0x19de4
 
@@ -18930,6 +39513,13 @@
 #define V_SVRBASE_ADDR(x) ((x) << S_SVRBASE_ADDR)
 #define G_SVRBASE_ADDR(x) (((x) >> S_SVRBASE_ADDR) & M_SVRBASE_ADDR)
 
+#define A_LE_DB_TCAM_TID_BASE 0x19df0
+
+#define S_TCAM_TID_BASE    0
+#define M_TCAM_TID_BASE    0xfffffU
+#define V_TCAM_TID_BASE(x) ((x) << S_TCAM_TID_BASE)
+#define G_TCAM_TID_BASE(x) (((x) >> S_TCAM_TID_BASE) & M_TCAM_TID_BASE)
+
 #define A_LE_DB_FTID_FLTRBASE 0x19df4
 
 #define S_FLTRBASE_ADDR    2
@@ -18937,6 +39527,13 @@
 #define V_FLTRBASE_ADDR(x) ((x) << S_FLTRBASE_ADDR)
 #define G_FLTRBASE_ADDR(x) (((x) >> S_FLTRBASE_ADDR) & M_FLTRBASE_ADDR)
 
+#define A_LE_DB_CLCAM_TID_BASE 0x19df4
+
+#define S_CLCAM_TID_BASE    0
+#define M_CLCAM_TID_BASE    0xfffffU
+#define V_CLCAM_TID_BASE(x) ((x) << S_CLCAM_TID_BASE)
+#define G_CLCAM_TID_BASE(x) (((x) >> S_CLCAM_TID_BASE) & M_CLCAM_TID_BASE)
+
 #define A_LE_DB_TID_HASHBASE 0x19df8
 
 #define S_HASHBASE_ADDR    2
@@ -18944,6 +39541,13 @@
 #define V_HASHBASE_ADDR(x) ((x) << S_HASHBASE_ADDR)
 #define G_HASHBASE_ADDR(x) (((x) >> S_HASHBASE_ADDR) & M_HASHBASE_ADDR)
 
+#define A_T6_LE_DB_HASH_TID_BASE 0x19df8
+
+#define S_HASH_TID_BASE    0
+#define M_HASH_TID_BASE    0xfffffU
+#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE)
+#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE)
+
 #define A_LE_PERR_INJECT 0x19dfc
 
 #define S_LEMEMSEL    1
@@ -18951,18 +39555,135 @@
 #define V_LEMEMSEL(x) ((x) << S_LEMEMSEL)
 #define G_LEMEMSEL(x) (((x) >> S_LEMEMSEL) & M_LEMEMSEL)
 
+#define A_LE_DB_SSRAM_TID_BASE 0x19dfc
+
+#define S_SSRAM_TID_BASE    0
+#define M_SSRAM_TID_BASE    0xfffffU
+#define V_SSRAM_TID_BASE(x) ((x) << S_SSRAM_TID_BASE)
+#define G_SSRAM_TID_BASE(x) (((x) >> S_SSRAM_TID_BASE) & M_SSRAM_TID_BASE)
+
 #define A_LE_DB_ACTIVE_MASK_IPV4 0x19e00
+#define A_LE_T5_DB_ACTIVE_MASK_IPV4 0x19e00
 #define A_LE_DB_ACTIVE_MASK_IPV6 0x19e50
 #define A_LE_HASH_MASK_GEN_IPV4 0x19ea0
+#define A_LE_HASH_MASK_GEN_IPV4T5 0x19ea0
 #define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
+#define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4
+#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
 #define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
+#define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4
+#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4
 #define A_LE_HASH_MASK_CMP_IPV6 0x19ef0
+#define A_LE_DB_PSV_FILTER_MASK_FLT_IPV4 0x19ef0
+#define A_LE_HASH_MASK_CMP_IPV6T5 0x19ef8
+#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV6 0x19f04
 #define A_LE_DEBUG_LA_CONFIG 0x19f20
 #define A_LE_REQ_DEBUG_LA_DATA 0x19f24
 #define A_LE_REQ_DEBUG_LA_WRPTR 0x19f28
+#define A_LE_DB_PSV_FILTER_MASK_FLT_IPV6 0x19f28
 #define A_LE_RSP_DEBUG_LA_DATA 0x19f2c
 #define A_LE_RSP_DEBUG_LA_WRPTR 0x19f30
+#define A_LE_DEBUG_LA_SELECTOR 0x19f34
+#define A_LE_SRVR_SRAM_INIT 0x19f34
 
+#define S_SRVRSRAMBASE    2
+#define M_SRVRSRAMBASE    0xfffffU
+#define V_SRVRSRAMBASE(x) ((x) << S_SRVRSRAMBASE)
+#define G_SRVRSRAMBASE(x) (((x) >> S_SRVRSRAMBASE) & M_SRVRSRAMBASE)
+
+#define S_SRVRINITBUSY    1
+#define V_SRVRINITBUSY(x) ((x) << S_SRVRINITBUSY)
+#define F_SRVRINITBUSY    V_SRVRINITBUSY(1U)
+
+#define S_SRVRINIT    0
+#define V_SRVRINIT(x) ((x) << S_SRVRINIT)
+#define F_SRVRINIT    V_SRVRINIT(1U)
+
+#define A_LE_DB_SRVR_SRAM_CONFIG 0x19f34
+
+#define S_PRI_HFILT    4
+#define V_PRI_HFILT(x) ((x) << S_PRI_HFILT)
+#define F_PRI_HFILT    V_PRI_HFILT(1U)
+
+#define S_PRI_SRVR    3
+#define V_PRI_SRVR(x) ((x) << S_PRI_SRVR)
+#define F_PRI_SRVR    V_PRI_SRVR(1U)
+
+#define S_PRI_FILT    2
+#define V_PRI_FILT(x) ((x) << S_PRI_FILT)
+#define F_PRI_FILT    V_PRI_FILT(1U)
+
+#define A_LE_DEBUG_LA_CAPTURED_DATA 0x19f38
+#define A_LE_SRVR_VF_SRCH_TABLE 0x19f38
+
+#define S_RDWR    21
+#define V_RDWR(x) ((x) << S_RDWR)
+#define F_RDWR    V_RDWR(1U)
+
+#define S_VFINDEX    14
+#define M_VFINDEX    0x7fU
+#define V_VFINDEX(x) ((x) << S_VFINDEX)
+#define G_VFINDEX(x) (((x) >> S_VFINDEX) & M_VFINDEX)
+
+#define S_SRCHHADDR    7
+#define M_SRCHHADDR    0x7fU
+#define V_SRCHHADDR(x) ((x) << S_SRCHHADDR)
+#define G_SRCHHADDR(x) (((x) >> S_SRCHHADDR) & M_SRCHHADDR)
+
+#define S_SRCHLADDR    0
+#define M_SRCHLADDR    0x7fU
+#define V_SRCHLADDR(x) ((x) << S_SRCHLADDR)
+#define G_SRCHLADDR(x) (((x) >> S_SRCHLADDR) & M_SRCHLADDR)
+
+#define A_LE_DB_SRVR_VF_SRCH_TABLE_CTRL 0x19f38
+
+#define S_VFLUTBUSY    10
+#define V_VFLUTBUSY(x) ((x) << S_VFLUTBUSY)
+#define F_VFLUTBUSY    V_VFLUTBUSY(1U)
+
+#define S_VFLUTSTART    9
+#define V_VFLUTSTART(x) ((x) << S_VFLUTSTART)
+#define F_VFLUTSTART    V_VFLUTSTART(1U)
+
+#define S_T6_RDWR    8
+#define V_T6_RDWR(x) ((x) << S_T6_RDWR)
+#define F_T6_RDWR    V_T6_RDWR(1U)
+
+#define S_T6_VFINDEX    0
+#define M_T6_VFINDEX    0xffU
+#define V_T6_VFINDEX(x) ((x) << S_T6_VFINDEX)
+#define G_T6_VFINDEX(x) (((x) >> S_T6_VFINDEX) & M_T6_VFINDEX)
+
+#define A_LE_MA_DEBUG_LA_DATA 0x19f3c
+#define A_LE_DB_SRVR_VF_SRCH_TABLE_DATA 0x19f3c
+
+#define S_T6_SRCHHADDR    12
+#define M_T6_SRCHHADDR    0xfffU
+#define V_T6_SRCHHADDR(x) ((x) << S_T6_SRCHHADDR)
+#define G_T6_SRCHHADDR(x) (((x) >> S_T6_SRCHHADDR) & M_T6_SRCHHADDR)
+
+#define S_T6_SRCHLADDR    0
+#define M_T6_SRCHLADDR    0xfffU
+#define V_T6_SRCHLADDR(x) ((x) << S_T6_SRCHLADDR)
+#define G_T6_SRCHLADDR(x) (((x) >> S_T6_SRCHLADDR) & M_T6_SRCHLADDR)
+
+#define A_LE_RSP_DEBUG_LA_HASH_WRPTR 0x19f40
+#define A_LE_DB_SECOND_ACTIVE_MASK_IPV4 0x19f40
+#define A_LE_HASH_DEBUG_LA_DATA 0x19f44
+#define A_LE_RSP_DEBUG_LA_TCAM_WRPTR 0x19f48
+#define A_LE_TCAM_DEBUG_LA_DATA 0x19f4c
+#define A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 0x19f90
+#define A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 0x19fa4
+#define A_LE_HASH_COLLISION 0x19fc4
+#define A_LE_GLOBAL_COLLISION 0x19fc8
+#define A_LE_FULL_CNT_COLLISION 0x19fcc
+#define A_LE_DEBUG_LA_CONFIGT5 0x19fd0
+#define A_LE_REQ_DEBUG_LA_DATAT5 0x19fd4
+#define A_LE_REQ_DEBUG_LA_WRPTRT5 0x19fd8
+#define A_LE_RSP_DEBUG_LA_DATAT5 0x19fdc
+#define A_LE_RSP_DEBUG_LA_WRPTRT5 0x19fe0
+#define A_LE_DEBUG_LA_SEL_DATA 0x19fe4
+
 /* registers for module NCSI */
 #define NCSI_BASE_ADDR 0x1a000
 
@@ -19304,6 +40025,19 @@
 #define V_DEBUGSEL(x) ((x) << S_DEBUGSEL)
 #define G_DEBUGSEL(x) (((x) >> S_DEBUGSEL) & M_DEBUGSEL)
 
+#define S_TXFIFO_EMPTY    4
+#define V_TXFIFO_EMPTY(x) ((x) << S_TXFIFO_EMPTY)
+#define F_TXFIFO_EMPTY    V_TXFIFO_EMPTY(1U)
+
+#define S_TXFIFO_FULL    3
+#define V_TXFIFO_FULL(x) ((x) << S_TXFIFO_FULL)
+#define F_TXFIFO_FULL    V_TXFIFO_FULL(1U)
+
+#define S_PKG_ID    0
+#define M_PKG_ID    0x7U
+#define V_PKG_ID(x) ((x) << S_PKG_ID)
+#define G_PKG_ID(x) (((x) >> S_PKG_ID) & M_PKG_ID)
+
 #define A_NCSI_PERR_INJECT 0x1a0f4
 
 #define S_MCSIMELSEL    1
@@ -19310,6 +40044,7 @@
 #define V_MCSIMELSEL(x) ((x) << S_MCSIMELSEL)
 #define F_MCSIMELSEL    V_MCSIMELSEL(1U)
 
+#define A_NCSI_PERR_ENABLE 0x1a0f8
 #define A_NCSI_MACB_NETWORK_CTRL 0x1a100
 
 #define S_TXSNDZEROPAUSE    12
@@ -20091,6 +40826,11 @@
 #define V_RXSOP(x) ((x) << S_RXSOP)
 #define G_RXSOP(x) (((x) >> S_RXSOP) & M_RXSOP)
 
+#define S_T4_RXEOP    0
+#define M_T4_RXEOP    0xffU
+#define V_T4_RXEOP(x) ((x) << S_T4_RXEOP)
+#define G_T4_RXEOP(x) (((x) >> S_T4_RXEOP) & M_T4_RXEOP)
+
 #define A_XGMAC_PORT_LINK_STATUS 0x1034
 
 #define S_REMFLT    3
@@ -23323,6 +44063,16 @@
 #define V_OBQFULL(x) ((x) << S_OBQFULL)
 #define G_OBQFULL(x) (((x) >> S_OBQFULL) & M_OBQFULL)
 
+#define S_T5_OBQGEN    8
+#define M_T5_OBQGEN    0xffffffU
+#define V_T5_OBQGEN(x) ((x) << S_T5_OBQGEN)
+#define G_T5_OBQGEN(x) (((x) >> S_T5_OBQGEN) & M_T5_OBQGEN)
+
+#define S_T5_OBQFULL    0
+#define M_T5_OBQFULL    0xffU
+#define V_T5_OBQFULL(x) ((x) << S_T5_OBQFULL)
+#define G_T5_OBQFULL(x) (((x) >> S_T5_OBQFULL) & M_T5_OBQFULL)
+
 #define A_UP_IBQ_0_RDADDR 0x10
 
 #define S_QUEID    13
@@ -23446,6 +44196,10 @@
 #define V_QUEBAREADDR(x) ((x) << S_QUEBAREADDR)
 #define F_QUEBAREADDR    V_QUEBAREADDR(1U)
 
+#define S_QUE1KEN    6
+#define V_QUE1KEN(x) ((x) << S_QUE1KEN)
+#define F_QUE1KEN    V_QUE1KEN(1U)
+
 #define A_UP_IBQ_0_REALADDR 0xd4
 
 #define S_QUERDADDRWRAP    31
@@ -23541,6 +44295,10 @@
 #define V_UPDBGLAEN(x) ((x) << S_UPDBGLAEN)
 #define F_UPDBGLAEN    V_UPDBGLAEN(1U)
 
+#define S_UPDBGLABUSY    14
+#define V_UPDBGLABUSY(x) ((x) << S_UPDBGLABUSY)
+#define F_UPDBGLABUSY    V_UPDBGLABUSY(1U)
+
 #define A_UP_UP_DBG_LA_DATA 0x144
 #define A_UP_PIO_MST_CONFIG 0x148
 
@@ -23572,6 +44330,20 @@
 #define V_UPRID(x) ((x) << S_UPRID)
 #define G_UPRID(x) (((x) >> S_UPRID) & M_UPRID)
 
+#define S_REQVFVLD    27
+#define V_REQVFVLD(x) ((x) << S_REQVFVLD)
+#define F_REQVFVLD    V_REQVFVLD(1U)
+
+#define S_T5_UPRID    0
+#define M_T5_UPRID    0xffU
+#define V_T5_UPRID(x) ((x) << S_T5_UPRID)
+#define G_T5_UPRID(x) (((x) >> S_T5_UPRID) & M_T5_UPRID)
+
+#define S_T6_UPRID    0
+#define M_T6_UPRID    0x1ffU
+#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
+#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
+
 #define A_UP_UP_SELF_CONTROL 0x14c
 
 #define S_UPSELFRESET    0
@@ -23587,6 +44359,20 @@
 #define A_UP_MAILBOX_PF6_CTL 0x1e0
 #define A_UP_MAILBOX_PF7_CTL 0x1f0
 #define A_UP_TSCH_CHNLN_CLASS_RDY 0x200
+
+#define S_ECO_15444_SGE_DB_BUSY    31
+#define V_ECO_15444_SGE_DB_BUSY(x) ((x) << S_ECO_15444_SGE_DB_BUSY)
+#define F_ECO_15444_SGE_DB_BUSY    V_ECO_15444_SGE_DB_BUSY(1U)
+
+#define S_ECO_15444_PL_INTF_BUSY    30
+#define V_ECO_15444_PL_INTF_BUSY(x) ((x) << S_ECO_15444_PL_INTF_BUSY)
+#define F_ECO_15444_PL_INTF_BUSY    V_ECO_15444_PL_INTF_BUSY(1U)
+
+#define S_TSCHCHNLCRDY    0
+#define M_TSCHCHNLCRDY    0x3fffffffU
+#define V_TSCHCHNLCRDY(x) ((x) << S_TSCHCHNLCRDY)
+#define G_TSCHCHNLCRDY(x) (((x) >> S_TSCHCHNLCRDY) & M_TSCHCHNLCRDY)
+
 #define A_UP_TSCH_CHNLN_CLASS_WATCH_RDY 0x204
 
 #define S_TSCHWRRLIMIT    16
@@ -23623,6 +44409,18 @@
 #define V_TSCHCHNLCCNT(x) ((x) << S_TSCHCHNLCCNT)
 #define G_TSCHCHNLCCNT(x) (((x) >> S_TSCHCHNLCCNT) & M_TSCHCHNLCCNT)
 
+#define S_TSCHCHNLCHDIS    31
+#define V_TSCHCHNLCHDIS(x) ((x) << S_TSCHCHNLCHDIS)
+#define F_TSCHCHNLCHDIS    V_TSCHCHNLCHDIS(1U)
+
+#define S_TSCHCHNLWDIS    30
+#define V_TSCHCHNLWDIS(x) ((x) << S_TSCHCHNLWDIS)
+#define F_TSCHCHNLWDIS    V_TSCHCHNLWDIS(1U)
+
+#define S_TSCHCHNLCLDIS    29
+#define V_TSCHCHNLCLDIS(x) ((x) << S_TSCHCHNLCLDIS)
+#define F_TSCHCHNLCLDIS    V_TSCHCHNLCLDIS(1U)
+
 #define A_UP_UPLADBGPCCHKDATA_0 0x240
 #define A_UP_UPLADBGPCCHKMASK_0 0x244
 #define A_UP_UPLADBGPCCHKDATA_1 0x250
@@ -23631,6 +44429,90 @@
 #define A_UP_UPLADBGPCCHKMASK_2 0x264
 #define A_UP_UPLADBGPCCHKDATA_3 0x270
 #define A_UP_UPLADBGPCCHKMASK_3 0x274
+#define A_UP_IBQ_0_SHADOW_RDADDR 0x280
+#define A_UP_IBQ_0_SHADOW_WRADDR 0x284
+#define A_UP_IBQ_0_SHADOW_STATUS 0x288
+#define A_UP_IBQ_0_SHADOW_PKTCNT 0x28c
+#define A_UP_IBQ_1_SHADOW_RDADDR 0x290
+#define A_UP_IBQ_1_SHADOW_WRADDR 0x294
+#define A_UP_IBQ_1_SHADOW_STATUS 0x298
+#define A_UP_IBQ_1_SHADOW_PKTCNT 0x29c
+#define A_UP_IBQ_2_SHADOW_RDADDR 0x2a0
+#define A_UP_IBQ_2_SHADOW_WRADDR 0x2a4
+#define A_UP_IBQ_2_SHADOW_STATUS 0x2a8
+#define A_UP_IBQ_2_SHADOW_PKTCNT 0x2ac
+#define A_UP_IBQ_3_SHADOW_RDADDR 0x2b0
+#define A_UP_IBQ_3_SHADOW_WRADDR 0x2b4
+#define A_UP_IBQ_3_SHADOW_STATUS 0x2b8
+#define A_UP_IBQ_3_SHADOW_PKTCNT 0x2bc
+#define A_UP_IBQ_4_SHADOW_RDADDR 0x2c0
+#define A_UP_IBQ_4_SHADOW_WRADDR 0x2c4
+#define A_UP_IBQ_4_SHADOW_STATUS 0x2c8
+#define A_UP_IBQ_4_SHADOW_PKTCNT 0x2cc
+#define A_UP_IBQ_5_SHADOW_RDADDR 0x2d0
+#define A_UP_IBQ_5_SHADOW_WRADDR 0x2d4
+#define A_UP_IBQ_5_SHADOW_STATUS 0x2d8
+#define A_UP_IBQ_5_SHADOW_PKTCNT 0x2dc
+#define A_UP_OBQ_0_SHADOW_RDADDR 0x2e0
+#define A_UP_OBQ_0_SHADOW_WRADDR 0x2e4
+#define A_UP_OBQ_0_SHADOW_STATUS 0x2e8
+#define A_UP_OBQ_0_SHADOW_PKTCNT 0x2ec
+#define A_UP_OBQ_1_SHADOW_RDADDR 0x2f0
+#define A_UP_OBQ_1_SHADOW_WRADDR 0x2f4
+#define A_UP_OBQ_1_SHADOW_STATUS 0x2f8
+#define A_UP_OBQ_1_SHADOW_PKTCNT 0x2fc
+#define A_UP_OBQ_2_SHADOW_RDADDR 0x300
+#define A_UP_OBQ_2_SHADOW_WRADDR 0x304
+#define A_UP_OBQ_2_SHADOW_STATUS 0x308
+#define A_UP_OBQ_2_SHADOW_PKTCNT 0x30c
+#define A_UP_OBQ_3_SHADOW_RDADDR 0x310
+#define A_UP_OBQ_3_SHADOW_WRADDR 0x314
+#define A_UP_OBQ_3_SHADOW_STATUS 0x318
+#define A_UP_OBQ_3_SHADOW_PKTCNT 0x31c
+#define A_UP_OBQ_4_SHADOW_RDADDR 0x320
+#define A_UP_OBQ_4_SHADOW_WRADDR 0x324
+#define A_UP_OBQ_4_SHADOW_STATUS 0x328
+#define A_UP_OBQ_4_SHADOW_PKTCNT 0x32c
+#define A_UP_OBQ_5_SHADOW_RDADDR 0x330
+#define A_UP_OBQ_5_SHADOW_WRADDR 0x334
+#define A_UP_OBQ_5_SHADOW_STATUS 0x338
+#define A_UP_OBQ_5_SHADOW_PKTCNT 0x33c
+#define A_UP_OBQ_6_SHADOW_RDADDR 0x340
+#define A_UP_OBQ_6_SHADOW_WRADDR 0x344
+#define A_UP_OBQ_6_SHADOW_STATUS 0x348
+#define A_UP_OBQ_6_SHADOW_PKTCNT 0x34c
+#define A_UP_OBQ_7_SHADOW_RDADDR 0x350
+#define A_UP_OBQ_7_SHADOW_WRADDR 0x354
+#define A_UP_OBQ_7_SHADOW_STATUS 0x358
+#define A_UP_OBQ_7_SHADOW_PKTCNT 0x35c
+#define A_UP_IBQ_0_SHADOW_CONFIG 0x360
+#define A_UP_IBQ_0_SHADOW_REALADDR 0x364
+#define A_UP_IBQ_1_SHADOW_CONFIG 0x368
+#define A_UP_IBQ_1_SHADOW_REALADDR 0x36c
+#define A_UP_IBQ_2_SHADOW_CONFIG 0x370
+#define A_UP_IBQ_2_SHADOW_REALADDR 0x374
+#define A_UP_IBQ_3_SHADOW_CONFIG 0x378
+#define A_UP_IBQ_3_SHADOW_REALADDR 0x37c
+#define A_UP_IBQ_4_SHADOW_CONFIG 0x380
+#define A_UP_IBQ_4_SHADOW_REALADDR 0x384
+#define A_UP_IBQ_5_SHADOW_CONFIG 0x388
+#define A_UP_IBQ_5_SHADOW_REALADDR 0x38c
+#define A_UP_OBQ_0_SHADOW_CONFIG 0x390
+#define A_UP_OBQ_0_SHADOW_REALADDR 0x394
+#define A_UP_OBQ_1_SHADOW_CONFIG 0x398
+#define A_UP_OBQ_1_SHADOW_REALADDR 0x39c
+#define A_UP_OBQ_2_SHADOW_CONFIG 0x3a0
+#define A_UP_OBQ_2_SHADOW_REALADDR 0x3a4
+#define A_UP_OBQ_3_SHADOW_CONFIG 0x3a8
+#define A_UP_OBQ_3_SHADOW_REALADDR 0x3ac
+#define A_UP_OBQ_4_SHADOW_CONFIG 0x3b0
+#define A_UP_OBQ_4_SHADOW_REALADDR 0x3b4
+#define A_UP_OBQ_5_SHADOW_CONFIG 0x3b8
+#define A_UP_OBQ_5_SHADOW_REALADDR 0x3bc
+#define A_UP_OBQ_6_SHADOW_CONFIG 0x3c0
+#define A_UP_OBQ_6_SHADOW_REALADDR 0x3c4
+#define A_UP_OBQ_7_SHADOW_CONFIG 0x3c8
+#define A_UP_OBQ_7_SHADOW_REALADDR 0x3cc
 
 /* registers for module CIM_CTL */
 #define CIM_CTL_BASE_ADDR 0x0
@@ -23670,6 +44552,14 @@
 #define V_PREFEN(x) ((x) << S_PREFEN)
 #define F_PREFEN    V_PREFEN(1U)
 
+#define S_DISSLOWTIMEOUT    14
+#define V_DISSLOWTIMEOUT(x) ((x) << S_DISSLOWTIMEOUT)
+#define F_DISSLOWTIMEOUT    V_DISSLOWTIMEOUT(1U)
+
+#define S_INTLRSPEN    9
+#define V_INTLRSPEN(x) ((x) << S_INTLRSPEN)
+#define F_INTLRSPEN    V_INTLRSPEN(1U)
+
 #define A_CIM_CTL_PREFADDR 0x4
 #define A_CIM_CTL_ALLOCADDR 0x8
 #define A_CIM_CTL_INVLDTADDR 0xc
@@ -23767,6 +44657,10 @@
 #define V_TSCHNRESET(x) ((x) << S_TSCHNRESET)
 #define F_TSCHNRESET    V_TSCHNRESET(1U)
 
+#define S_T6_MIN_MAX_EN    29
+#define V_T6_MIN_MAX_EN(x) ((x) << S_T6_MIN_MAX_EN)
+#define F_T6_MIN_MAX_EN    V_T6_MIN_MAX_EN(1U)
+
 #define A_CIM_CTL_TSCH_CHNLN_TICK 0x904
 
 #define S_TSCHNLTICK    0
@@ -23774,6 +44668,72 @@
 #define V_TSCHNLTICK(x) ((x) << S_TSCHNLTICK)
 #define G_TSCHNLTICK(x) (((x) >> S_TSCHNLTICK) & M_TSCHNLTICK)
 
+#define A_CIM_CTL_TSCH_CHNLN_CLASS_RATECTL 0x904
+
+#define S_TSC15RATECTL    15
+#define V_TSC15RATECTL(x) ((x) << S_TSC15RATECTL)
+#define F_TSC15RATECTL    V_TSC15RATECTL(1U)
+
+#define S_TSC14RATECTL    14
+#define V_TSC14RATECTL(x) ((x) << S_TSC14RATECTL)
+#define F_TSC14RATECTL    V_TSC14RATECTL(1U)
+
+#define S_TSC13RATECTL    13
+#define V_TSC13RATECTL(x) ((x) << S_TSC13RATECTL)
+#define F_TSC13RATECTL    V_TSC13RATECTL(1U)
+
+#define S_TSC12RATECTL    12
+#define V_TSC12RATECTL(x) ((x) << S_TSC12RATECTL)
+#define F_TSC12RATECTL    V_TSC12RATECTL(1U)
+
+#define S_TSC11RATECTL    11
+#define V_TSC11RATECTL(x) ((x) << S_TSC11RATECTL)
+#define F_TSC11RATECTL    V_TSC11RATECTL(1U)
+
+#define S_TSC10RATECTL    10
+#define V_TSC10RATECTL(x) ((x) << S_TSC10RATECTL)
+#define F_TSC10RATECTL    V_TSC10RATECTL(1U)
+
+#define S_TSC9RATECTL    9
+#define V_TSC9RATECTL(x) ((x) << S_TSC9RATECTL)
+#define F_TSC9RATECTL    V_TSC9RATECTL(1U)
+
+#define S_TSC8RATECTL    8
+#define V_TSC8RATECTL(x) ((x) << S_TSC8RATECTL)
+#define F_TSC8RATECTL    V_TSC8RATECTL(1U)
+
+#define S_TSC7RATECTL    7
+#define V_TSC7RATECTL(x) ((x) << S_TSC7RATECTL)
+#define F_TSC7RATECTL    V_TSC7RATECTL(1U)
+
+#define S_TSC6RATECTL    6
+#define V_TSC6RATECTL(x) ((x) << S_TSC6RATECTL)
+#define F_TSC6RATECTL    V_TSC6RATECTL(1U)
+
+#define S_TSC5RATECTL    5
+#define V_TSC5RATECTL(x) ((x) << S_TSC5RATECTL)
+#define F_TSC5RATECTL    V_TSC5RATECTL(1U)
+
+#define S_TSC4RATECTL    4
+#define V_TSC4RATECTL(x) ((x) << S_TSC4RATECTL)
+#define F_TSC4RATECTL    V_TSC4RATECTL(1U)
+
+#define S_TSC3RATECTL    3
+#define V_TSC3RATECTL(x) ((x) << S_TSC3RATECTL)
+#define F_TSC3RATECTL    V_TSC3RATECTL(1U)
+
+#define S_TSC2RATECTL    2
+#define V_TSC2RATECTL(x) ((x) << S_TSC2RATECTL)
+#define F_TSC2RATECTL    V_TSC2RATECTL(1U)
+
+#define S_TSC1RATECTL    1
+#define V_TSC1RATECTL(x) ((x) << S_TSC1RATECTL)
+#define F_TSC1RATECTL    V_TSC1RATECTL(1U)
+
+#define S_TSC0RATECTL    0
+#define V_TSC0RATECTL(x) ((x) << S_TSC0RATECTL)
+#define F_TSC0RATECTL    V_TSC0RATECTL(1U)
+
 #define A_CIM_CTL_TSCH_CHNLN_CLASS_ENABLE_A 0x908
 
 #define S_TSC15WRREN    31
@@ -23921,6 +44881,15 @@
 #define V_TSCHNLRATEL(x) ((x) << S_TSCHNLRATEL)
 #define G_TSCHNLRATEL(x) (((x) >> S_TSCHNLRATEL) & M_TSCHNLRATEL)
 
+#define S_TSCHNLRATEPROT    30
+#define V_TSCHNLRATEPROT(x) ((x) << S_TSCHNLRATEPROT)
+#define F_TSCHNLRATEPROT    V_TSCHNLRATEPROT(1U)
+
+#define S_T6_TSCHNLRATEL    0
+#define M_T6_TSCHNLRATEL    0x3fffffffU
+#define V_T6_TSCHNLRATEL(x) ((x) << S_T6_TSCHNLRATEL)
+#define G_T6_TSCHNLRATEL(x) (((x) >> S_T6_TSCHNLRATEL) & M_T6_TSCHNLRATEL)
+
 #define A_CIM_CTL_TSCH_CHNLN_RATE_PROPERTIES 0x914
 
 #define S_TSCHNLRMAX    16
@@ -23933,6 +44902,16 @@
 #define V_TSCHNLRINCR(x) ((x) << S_TSCHNLRINCR)
 #define G_TSCHNLRINCR(x) (((x) >> S_TSCHNLRINCR) & M_TSCHNLRINCR)
 
+#define S_TSCHNLRTSEL    14
+#define M_TSCHNLRTSEL    0x3U
+#define V_TSCHNLRTSEL(x) ((x) << S_TSCHNLRTSEL)
+#define G_TSCHNLRTSEL(x) (((x) >> S_TSCHNLRTSEL) & M_TSCHNLRTSEL)
+
+#define S_T6_TSCHNLRINCR    0
+#define M_T6_TSCHNLRINCR    0x3fffU
+#define V_T6_TSCHNLRINCR(x) ((x) << S_T6_TSCHNLRINCR)
+#define G_T6_TSCHNLRINCR(x) (((x) >> S_T6_TSCHNLRINCR) & M_T6_TSCHNLRINCR)
+
 #define A_CIM_CTL_TSCH_CHNLN_WRR 0x918
 #define A_CIM_CTL_TSCH_CHNLN_WEIGHT 0x91c
 
@@ -23941,6 +44920,21 @@
 #define V_TSCHNLWEIGHT(x) ((x) << S_TSCHNLWEIGHT)
 #define G_TSCHNLWEIGHT(x) (((x) >> S_TSCHNLWEIGHT) & M_TSCHNLWEIGHT)
 
+#define A_CIM_CTL_TSCH_CHNLN_CLASSM_RATE_LIMITER 0x920
+
+#define S_TSCCLRATENEG    31
+#define V_TSCCLRATENEG(x) ((x) << S_TSCCLRATENEG)
+#define F_TSCCLRATENEG    V_TSCCLRATENEG(1U)
+
+#define S_TSCCLRATEL    0
+#define M_TSCCLRATEL    0xffffffU
+#define V_TSCCLRATEL(x) ((x) << S_TSCCLRATEL)
+#define G_TSCCLRATEL(x) (((x) >> S_TSCCLRATEL) & M_TSCCLRATEL)
+
+#define S_TSCCLRATEPROT    30
+#define V_TSCCLRATEPROT(x) ((x) << S_TSCCLRATEPROT)
+#define F_TSCCLRATEPROT    V_TSCCLRATEPROT(1U)
+
 #define A_CIM_CTL_TSCH_CHNLN_CLASSM_RATE_PROPERTIES 0x924
 
 #define S_TSCCLRMAX    16
@@ -23953,6 +44947,16 @@
 #define V_TSCCLRINCR(x) ((x) << S_TSCCLRINCR)
 #define G_TSCCLRINCR(x) (((x) >> S_TSCCLRINCR) & M_TSCCLRINCR)
 
+#define S_TSCCLRTSEL    14
+#define M_TSCCLRTSEL    0x3U
+#define V_TSCCLRTSEL(x) ((x) << S_TSCCLRTSEL)
+#define G_TSCCLRTSEL(x) (((x) >> S_TSCCLRTSEL) & M_TSCCLRTSEL)
+
+#define S_T6_TSCCLRINCR    0
+#define M_T6_TSCCLRINCR    0x3fffU
+#define V_T6_TSCCLRINCR(x) ((x) << S_T6_TSCCLRINCR)
+#define G_T6_TSCCLRINCR(x) (((x) >> S_T6_TSCCLRINCR) & M_T6_TSCCLRINCR)
+
 #define A_CIM_CTL_TSCH_CHNLN_CLASSM_WRR 0x928
 
 #define S_TSCCLWRRNEG    31
@@ -23964,6 +44968,10 @@
 #define V_TSCCLWRR(x) ((x) << S_TSCCLWRR)
 #define G_TSCCLWRR(x) (((x) >> S_TSCCLWRR) & M_TSCCLWRR)
 
+#define S_TSCCLWRRPROT    30
+#define V_TSCCLWRRPROT(x) ((x) << S_TSCCLWRRPROT)
+#define F_TSCCLWRRPROT    V_TSCCLWRRPROT(1U)
+
 #define A_CIM_CTL_TSCH_CHNLN_CLASSM_WEIGHT 0x92c
 
 #define S_TSCCLWEIGHT    0
@@ -23970,3 +44978,17926 @@
 #define M_TSCCLWEIGHT    0xffffU
 #define V_TSCCLWEIGHT(x) ((x) << S_TSCCLWEIGHT)
 #define G_TSCCLWEIGHT(x) (((x) >> S_TSCCLWEIGHT) & M_TSCCLWEIGHT)
+
+#define S_PAUSEVECSEL    28
+#define M_PAUSEVECSEL    0x3U
+#define V_PAUSEVECSEL(x) ((x) << S_PAUSEVECSEL)
+#define G_PAUSEVECSEL(x) (((x) >> S_PAUSEVECSEL) & M_PAUSEVECSEL)
+
+#define S_MPSPAUSEMASK    20
+#define M_MPSPAUSEMASK    0xffU
+#define V_MPSPAUSEMASK(x) ((x) << S_MPSPAUSEMASK)
+#define G_MPSPAUSEMASK(x) (((x) >> S_MPSPAUSEMASK) & M_MPSPAUSEMASK)
+
+#define A_CIM_CTL_TSCH_TICK0 0xd80
+#define A_CIM_CTL_MAILBOX_PF0_CTL 0xd84
+#define A_CIM_CTL_TSCH_TICK1 0xd84
+#define A_CIM_CTL_MAILBOX_PF1_CTL 0xd88
+#define A_CIM_CTL_TSCH_TICK2 0xd88
+#define A_CIM_CTL_MAILBOX_PF2_CTL 0xd8c
+#define A_CIM_CTL_TSCH_TICK3 0xd8c
+#define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90
+#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90
+#define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94
+#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94
+#define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98
+#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98
+#define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c
+#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
+#define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0
+#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0
+#define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4
+
+#define S_PF7_OWNER_PL    15
+#define V_PF7_OWNER_PL(x) ((x) << S_PF7_OWNER_PL)
+#define F_PF7_OWNER_PL    V_PF7_OWNER_PL(1U)
+
+#define S_PF6_OWNER_PL    14
+#define V_PF6_OWNER_PL(x) ((x) << S_PF6_OWNER_PL)
+#define F_PF6_OWNER_PL    V_PF6_OWNER_PL(1U)
+
+#define S_PF5_OWNER_PL    13
+#define V_PF5_OWNER_PL(x) ((x) << S_PF5_OWNER_PL)
+#define F_PF5_OWNER_PL    V_PF5_OWNER_PL(1U)
+
+#define S_PF4_OWNER_PL    12
+#define V_PF4_OWNER_PL(x) ((x) << S_PF4_OWNER_PL)
+#define F_PF4_OWNER_PL    V_PF4_OWNER_PL(1U)
+
+#define S_PF3_OWNER_PL    11
+#define V_PF3_OWNER_PL(x) ((x) << S_PF3_OWNER_PL)
+#define F_PF3_OWNER_PL    V_PF3_OWNER_PL(1U)
+
+#define S_PF2_OWNER_PL    10
+#define V_PF2_OWNER_PL(x) ((x) << S_PF2_OWNER_PL)
+#define F_PF2_OWNER_PL    V_PF2_OWNER_PL(1U)
+
+#define S_PF1_OWNER_PL    9
+#define V_PF1_OWNER_PL(x) ((x) << S_PF1_OWNER_PL)
+#define F_PF1_OWNER_PL    V_PF1_OWNER_PL(1U)
+
+#define S_PF0_OWNER_PL    8
+#define V_PF0_OWNER_PL(x) ((x) << S_PF0_OWNER_PL)
+#define F_PF0_OWNER_PL    V_PF0_OWNER_PL(1U)
+
+#define S_PF7_OWNER_UP    7
+#define V_PF7_OWNER_UP(x) ((x) << S_PF7_OWNER_UP)
+#define F_PF7_OWNER_UP    V_PF7_OWNER_UP(1U)
+
+#define S_PF6_OWNER_UP    6
+#define V_PF6_OWNER_UP(x) ((x) << S_PF6_OWNER_UP)
+#define F_PF6_OWNER_UP    V_PF6_OWNER_UP(1U)
+
+#define S_PF5_OWNER_UP    5
+#define V_PF5_OWNER_UP(x) ((x) << S_PF5_OWNER_UP)
+#define F_PF5_OWNER_UP    V_PF5_OWNER_UP(1U)
+
+#define S_PF4_OWNER_UP    4
+#define V_PF4_OWNER_UP(x) ((x) << S_PF4_OWNER_UP)
+#define F_PF4_OWNER_UP    V_PF4_OWNER_UP(1U)
+
+#define S_PF3_OWNER_UP    3
+#define V_PF3_OWNER_UP(x) ((x) << S_PF3_OWNER_UP)
+#define F_PF3_OWNER_UP    V_PF3_OWNER_UP(1U)
+
+#define S_PF2_OWNER_UP    2
+#define V_PF2_OWNER_UP(x) ((x) << S_PF2_OWNER_UP)
+#define F_PF2_OWNER_UP    V_PF2_OWNER_UP(1U)
+
+#define S_PF1_OWNER_UP    1
+#define V_PF1_OWNER_UP(x) ((x) << S_PF1_OWNER_UP)
+#define F_PF1_OWNER_UP    V_PF1_OWNER_UP(1U)
+
+#define S_PF0_OWNER_UP    0
+#define V_PF0_OWNER_UP(x) ((x) << S_PF0_OWNER_UP)
+#define F_PF0_OWNER_UP    V_PF0_OWNER_UP(1U)
+
+#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4
+#define A_CIM_CTL_PIO_MST_CONFIG 0xda8
+
+#define S_T5_CTLRID    0
+#define M_T5_CTLRID    0xffU
+#define V_T5_CTLRID(x) ((x) << S_T5_CTLRID)
+#define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID)
+
+#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8
+#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac
+#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
+#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4
+
+#define S_T6_UPRID    0
+#define M_T6_UPRID    0x1ffU
+#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
+#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
+
+#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00
+#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04
+#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08
+#define A_CIM_CTL_ULP_OBQ3_PAUSE_MASK 0xe0c
+#define A_CIM_CTL_ULP_OBQ_CONFIG 0xe10
+
+#define S_CH1_PRIO_EN    1
+#define V_CH1_PRIO_EN(x) ((x) << S_CH1_PRIO_EN)
+#define F_CH1_PRIO_EN    V_CH1_PRIO_EN(1U)
+
+#define S_CH0_PRIO_EN    0
+#define V_CH0_PRIO_EN(x) ((x) << S_CH0_PRIO_EN)
+#define F_CH0_PRIO_EN    V_CH0_PRIO_EN(1U)
+
+#define A_CIM_CTL_PIF_TIMEOUT 0xe40
+
+#define S_SLOW_TIMEOUT    16
+#define M_SLOW_TIMEOUT    0xffffU
+#define V_SLOW_TIMEOUT(x) ((x) << S_SLOW_TIMEOUT)
+#define G_SLOW_TIMEOUT(x) (((x) >> S_SLOW_TIMEOUT) & M_SLOW_TIMEOUT)
+
+#define S_MA_TIMEOUT    0
+#define M_MA_TIMEOUT    0xffffU
+#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT)
+#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT)
+
+/* registers for module MAC */
+#define MAC_BASE_ADDR 0x0
+
+#define A_MAC_PORT_CFG 0x800
+
+#define S_MAC_CLK_SEL    29
+#define M_MAC_CLK_SEL    0x7U
+#define V_MAC_CLK_SEL(x) ((x) << S_MAC_CLK_SEL)
+#define G_MAC_CLK_SEL(x) (((x) >> S_MAC_CLK_SEL) & M_MAC_CLK_SEL)
+
+#define S_SMUXTXSEL    9
+#define V_SMUXTXSEL(x) ((x) << S_SMUXTXSEL)
+#define F_SMUXTXSEL    V_SMUXTXSEL(1U)
+
+#define S_SMUXRXSEL    8
+#define V_SMUXRXSEL(x) ((x) << S_SMUXRXSEL)
+#define F_SMUXRXSEL    V_SMUXRXSEL(1U)
+
+#define S_PORTSPEED    4
+#define M_PORTSPEED    0x3U
+#define V_PORTSPEED(x) ((x) << S_PORTSPEED)
+#define G_PORTSPEED(x) (((x) >> S_PORTSPEED) & M_PORTSPEED)
+
+#define S_ENA_ERR_RSP    28
+#define V_ENA_ERR_RSP(x) ((x) << S_ENA_ERR_RSP)
+#define F_ENA_ERR_RSP    V_ENA_ERR_RSP(1U)
+
+#define S_DEBUG_CLR    25
+#define V_DEBUG_CLR(x) ((x) << S_DEBUG_CLR)
+#define F_DEBUG_CLR    V_DEBUG_CLR(1U)
+
+#define S_PLL_SEL    23
+#define V_PLL_SEL(x) ((x) << S_PLL_SEL)
+#define F_PLL_SEL    V_PLL_SEL(1U)
+
+#define S_PORT_MAP    20
+#define M_PORT_MAP    0x7U
+#define V_PORT_MAP(x) ((x) << S_PORT_MAP)
+#define G_PORT_MAP(x) (((x) >> S_PORT_MAP) & M_PORT_MAP)
+
+#define S_AEC_PAT_DATA    15
+#define V_AEC_PAT_DATA(x) ((x) << S_AEC_PAT_DATA)
+#define F_AEC_PAT_DATA    V_AEC_PAT_DATA(1U)
+
+#define S_MACCLK_SEL    13
+#define V_MACCLK_SEL(x) ((x) << S_MACCLK_SEL)
+#define F_MACCLK_SEL    V_MACCLK_SEL(1U)
+
+#define S_XGMII_SEL    12
+#define V_XGMII_SEL(x) ((x) << S_XGMII_SEL)
+#define F_XGMII_SEL    V_XGMII_SEL(1U)
+
+#define S_DEBUG_PORT_SEL    10
+#define M_DEBUG_PORT_SEL    0x3U
+#define V_DEBUG_PORT_SEL(x) ((x) << S_DEBUG_PORT_SEL)
+#define G_DEBUG_PORT_SEL(x) (((x) >> S_DEBUG_PORT_SEL) & M_DEBUG_PORT_SEL)
+
+#define S_ENABLE_25G    7
+#define V_ENABLE_25G(x) ((x) << S_ENABLE_25G)
+#define F_ENABLE_25G    V_ENABLE_25G(1U)
+
+#define S_ENABLE_50G    6
+#define V_ENABLE_50G(x) ((x) << S_ENABLE_50G)
+#define F_ENABLE_50G    V_ENABLE_50G(1U)
+
+#define S_DEBUG_TX_RX_SEL    1
+#define V_DEBUG_TX_RX_SEL(x) ((x) << S_DEBUG_TX_RX_SEL)
+#define F_DEBUG_TX_RX_SEL    V_DEBUG_TX_RX_SEL(1U)
+
+#define A_MAC_PORT_RESET_CTRL 0x804
+
+#define S_TWGDSK_HSSC16B    31
+#define V_TWGDSK_HSSC16B(x) ((x) << S_TWGDSK_HSSC16B)
+#define F_TWGDSK_HSSC16B    V_TWGDSK_HSSC16B(1U)
+
+#define S_EEE_RESET    30
+#define V_EEE_RESET(x) ((x) << S_EEE_RESET)
+#define F_EEE_RESET    V_EEE_RESET(1U)
+
+#define S_PTP_TIMER    29
+#define V_PTP_TIMER(x) ((x) << S_PTP_TIMER)
+#define F_PTP_TIMER    V_PTP_TIMER(1U)
+
+#define S_MTIPREFRESET    28
+#define V_MTIPREFRESET(x) ((x) << S_MTIPREFRESET)
+#define F_MTIPREFRESET    V_MTIPREFRESET(1U)
+
+#define S_MTIPTXFFRESET    27
+#define V_MTIPTXFFRESET(x) ((x) << S_MTIPTXFFRESET)
+#define F_MTIPTXFFRESET    V_MTIPTXFFRESET(1U)
+
+#define S_MTIPRXFFRESET    26
+#define V_MTIPRXFFRESET(x) ((x) << S_MTIPRXFFRESET)
+#define F_MTIPRXFFRESET    V_MTIPRXFFRESET(1U)
+
+#define S_MTIPREGRESET    25
+#define V_MTIPREGRESET(x) ((x) << S_MTIPREGRESET)
+#define F_MTIPREGRESET    V_MTIPREGRESET(1U)
+
+#define S_AEC3RESET    23
+#define V_AEC3RESET(x) ((x) << S_AEC3RESET)
+#define F_AEC3RESET    V_AEC3RESET(1U)
+
+#define S_AEC2RESET    22
+#define V_AEC2RESET(x) ((x) << S_AEC2RESET)
+#define F_AEC2RESET    V_AEC2RESET(1U)
+
+#define S_AEC1RESET    21
+#define V_AEC1RESET(x) ((x) << S_AEC1RESET)
+#define F_AEC1RESET    V_AEC1RESET(1U)
+
+#define S_AEC0RESET    20
+#define V_AEC0RESET(x) ((x) << S_AEC0RESET)
+#define F_AEC0RESET    V_AEC0RESET(1U)
+
+#define S_AET3RESET    19
+#define V_AET3RESET(x) ((x) << S_AET3RESET)
+#define F_AET3RESET    V_AET3RESET(1U)
+
+#define S_AET2RESET    18
+#define V_AET2RESET(x) ((x) << S_AET2RESET)
+#define F_AET2RESET    V_AET2RESET(1U)
+
+#define S_AET1RESET    17
+#define V_AET1RESET(x) ((x) << S_AET1RESET)
+#define F_AET1RESET    V_AET1RESET(1U)
+
+#define S_AET0RESET    16
+#define V_AET0RESET(x) ((x) << S_AET0RESET)
+#define F_AET0RESET    V_AET0RESET(1U)
+
+#define S_TXIF_RESET    12
+#define V_TXIF_RESET(x) ((x) << S_TXIF_RESET)
+#define F_TXIF_RESET    V_TXIF_RESET(1U)
+
+#define S_RXIF_RESET    11
+#define V_RXIF_RESET(x) ((x) << S_RXIF_RESET)
+#define F_RXIF_RESET    V_RXIF_RESET(1U)
+
+#define S_MTIPSD3TXRST    9
+#define V_MTIPSD3TXRST(x) ((x) << S_MTIPSD3TXRST)
+#define F_MTIPSD3TXRST    V_MTIPSD3TXRST(1U)
+
+#define S_MTIPSD2TXRST    8
+#define V_MTIPSD2TXRST(x) ((x) << S_MTIPSD2TXRST)
+#define F_MTIPSD2TXRST    V_MTIPSD2TXRST(1U)
+
+#define S_MTIPSD1TXRST    7
+#define V_MTIPSD1TXRST(x) ((x) << S_MTIPSD1TXRST)
+#define F_MTIPSD1TXRST    V_MTIPSD1TXRST(1U)
+
+#define S_MTIPSD0TXRST    6
+#define V_MTIPSD0TXRST(x) ((x) << S_MTIPSD0TXRST)
+#define F_MTIPSD0TXRST    V_MTIPSD0TXRST(1U)
+
+#define S_MTIPSD3RXRST    5
+#define V_MTIPSD3RXRST(x) ((x) << S_MTIPSD3RXRST)
+#define F_MTIPSD3RXRST    V_MTIPSD3RXRST(1U)
+
+#define S_MTIPSD2RXRST    4
+#define V_MTIPSD2RXRST(x) ((x) << S_MTIPSD2RXRST)
+#define F_MTIPSD2RXRST    V_MTIPSD2RXRST(1U)
+
+#define S_MTIPSD1RXRST    3
+#define V_MTIPSD1RXRST(x) ((x) << S_MTIPSD1RXRST)
+#define F_MTIPSD1RXRST    V_MTIPSD1RXRST(1U)
+
+#define S_MTIPSD0RXRST    1
+#define V_MTIPSD0RXRST(x) ((x) << S_MTIPSD0RXRST)
+#define F_MTIPSD0RXRST    V_MTIPSD0RXRST(1U)
+
+#define S_MAC100G40G_RESET    27
+#define V_MAC100G40G_RESET(x) ((x) << S_MAC100G40G_RESET)
+#define F_MAC100G40G_RESET    V_MAC100G40G_RESET(1U)
+
+#define S_MAC10G1G_RESET    26
+#define V_MAC10G1G_RESET(x) ((x) << S_MAC10G1G_RESET)
+#define F_MAC10G1G_RESET    V_MAC10G1G_RESET(1U)
+
+#define S_PCS1G_RESET    24
+#define V_PCS1G_RESET(x) ((x) << S_PCS1G_RESET)
+#define F_PCS1G_RESET    V_PCS1G_RESET(1U)
+
+#define S_PCS10G_RESET    15
+#define V_PCS10G_RESET(x) ((x) << S_PCS10G_RESET)
+#define F_PCS10G_RESET    V_PCS10G_RESET(1U)
+
+#define S_PCS40G_RESET    14
+#define V_PCS40G_RESET(x) ((x) << S_PCS40G_RESET)
+#define F_PCS40G_RESET    V_PCS40G_RESET(1U)
+
+#define S_PCS100G_RESET    13
+#define V_PCS100G_RESET(x) ((x) << S_PCS100G_RESET)
+#define F_PCS100G_RESET    V_PCS100G_RESET(1U)
+
+#define A_MAC_PORT_LED_CFG 0x808
+
+#define S_LED1_CFG1    14
+#define M_LED1_CFG1    0x3U
+#define V_LED1_CFG1(x) ((x) << S_LED1_CFG1)
+#define G_LED1_CFG1(x) (((x) >> S_LED1_CFG1) & M_LED1_CFG1)
+
+#define S_LED0_CFG1    12
+#define M_LED0_CFG1    0x3U
+#define V_LED0_CFG1(x) ((x) << S_LED0_CFG1)
+#define G_LED0_CFG1(x) (((x) >> S_LED0_CFG1) & M_LED0_CFG1)
+
+#define S_LED1_TLO    11
+#define V_LED1_TLO(x) ((x) << S_LED1_TLO)
+#define F_LED1_TLO    V_LED1_TLO(1U)
+
+#define S_LED1_THI    10
+#define V_LED1_THI(x) ((x) << S_LED1_THI)
+#define F_LED1_THI    V_LED1_THI(1U)
+
+#define S_LED0_TLO    9
+#define V_LED0_TLO(x) ((x) << S_LED0_TLO)
+#define F_LED0_TLO    V_LED0_TLO(1U)
+
+#define S_LED0_THI    8
+#define V_LED0_THI(x) ((x) << S_LED0_THI)
+#define F_LED0_THI    V_LED0_THI(1U)
+
+#define A_MAC_PORT_LED_COUNTHI 0x80c
+#define A_MAC_PORT_LED_COUNTLO 0x810
+#define A_MAC_PORT_CFG3 0x814
+
+#define S_T5_FPGA_PTP_PORT    26
+#define M_T5_FPGA_PTP_PORT    0x3U
+#define V_T5_FPGA_PTP_PORT(x) ((x) << S_T5_FPGA_PTP_PORT)
+#define G_T5_FPGA_PTP_PORT(x) (((x) >> S_T5_FPGA_PTP_PORT) & M_T5_FPGA_PTP_PORT)
+
+#define S_FCSDISCTRL    25
+#define V_FCSDISCTRL(x) ((x) << S_FCSDISCTRL)
+#define F_FCSDISCTRL    V_FCSDISCTRL(1U)
+
+#define S_SIGDETCTRL    24
+#define V_SIGDETCTRL(x) ((x) << S_SIGDETCTRL)
+#define F_SIGDETCTRL    V_SIGDETCTRL(1U)
+
+#define S_TX_LANE    23
+#define V_TX_LANE(x) ((x) << S_TX_LANE)
+#define F_TX_LANE    V_TX_LANE(1U)
+
+#define S_RX_LANE    22
+#define V_RX_LANE(x) ((x) << S_RX_LANE)
+#define F_RX_LANE    V_RX_LANE(1U)
+
+#define S_SE_CLR    21
+#define V_SE_CLR(x) ((x) << S_SE_CLR)
+#define F_SE_CLR    V_SE_CLR(1U)
+
+#define S_AN_ENA    17
+#define M_AN_ENA    0xfU
+#define V_AN_ENA(x) ((x) << S_AN_ENA)
+#define G_AN_ENA(x) (((x) >> S_AN_ENA) & M_AN_ENA)
+
+#define S_SD_RX_CLK_ENA    13
+#define M_SD_RX_CLK_ENA    0xfU
+#define V_SD_RX_CLK_ENA(x) ((x) << S_SD_RX_CLK_ENA)
+#define G_SD_RX_CLK_ENA(x) (((x) >> S_SD_RX_CLK_ENA) & M_SD_RX_CLK_ENA)
+
+#define S_SD_TX_CLK_ENA    9
+#define M_SD_TX_CLK_ENA    0xfU
+#define V_SD_TX_CLK_ENA(x) ((x) << S_SD_TX_CLK_ENA)
+#define G_SD_TX_CLK_ENA(x) (((x) >> S_SD_TX_CLK_ENA) & M_SD_TX_CLK_ENA)
+
+#define S_SGMIISEL    8
+#define V_SGMIISEL(x) ((x) << S_SGMIISEL)
+#define F_SGMIISEL    V_SGMIISEL(1U)
+
+#define S_HSSPLLSEL    4
+#define M_HSSPLLSEL    0xfU
+#define V_HSSPLLSEL(x) ((x) << S_HSSPLLSEL)
+#define G_HSSPLLSEL(x) (((x) >> S_HSSPLLSEL) & M_HSSPLLSEL)
+
+#define S_HSSC16C20SEL    0
+#define M_HSSC16C20SEL    0xfU
+#define V_HSSC16C20SEL(x) ((x) << S_HSSC16C20SEL)
+#define G_HSSC16C20SEL(x) (((x) >> S_HSSC16C20SEL) & M_HSSC16C20SEL)
+
+#define S_REF_CLK_SEL    30
+#define M_REF_CLK_SEL    0x3U
+#define V_REF_CLK_SEL(x) ((x) << S_REF_CLK_SEL)
+#define G_REF_CLK_SEL(x) (((x) >> S_REF_CLK_SEL) & M_REF_CLK_SEL)
+
+#define S_SGMII_SD_SIG_DET    29
+#define V_SGMII_SD_SIG_DET(x) ((x) << S_SGMII_SD_SIG_DET)
+#define F_SGMII_SD_SIG_DET    V_SGMII_SD_SIG_DET(1U)
+
+#define S_SGMII_SGPCS_ENA    28
+#define V_SGMII_SGPCS_ENA(x) ((x) << S_SGMII_SGPCS_ENA)
+#define F_SGMII_SGPCS_ENA    V_SGMII_SGPCS_ENA(1U)
+
+#define S_MAC_FPGA_PTP_PORT    26
+#define M_MAC_FPGA_PTP_PORT    0x3U
+#define V_MAC_FPGA_PTP_PORT(x) ((x) << S_MAC_FPGA_PTP_PORT)
+#define G_MAC_FPGA_PTP_PORT(x) (((x) >> S_MAC_FPGA_PTP_PORT) & M_MAC_FPGA_PTP_PORT)
+
+#define A_MAC_PORT_CFG2 0x818
+
+#define S_T5_AEC_PMA_TX_READY    4
+#define M_T5_AEC_PMA_TX_READY    0xfU
+#define V_T5_AEC_PMA_TX_READY(x) ((x) << S_T5_AEC_PMA_TX_READY)
+#define G_T5_AEC_PMA_TX_READY(x) (((x) >> S_T5_AEC_PMA_TX_READY) & M_T5_AEC_PMA_TX_READY)
+
+#define S_T5_AEC_PMA_RX_READY    0
+#define M_T5_AEC_PMA_RX_READY    0xfU
+#define V_T5_AEC_PMA_RX_READY(x) ((x) << S_T5_AEC_PMA_RX_READY)
+#define G_T5_AEC_PMA_RX_READY(x) (((x) >> S_T5_AEC_PMA_RX_READY) & M_T5_AEC_PMA_RX_READY)
+
+#define S_AN_DATA_CTL    19
+#define V_AN_DATA_CTL(x) ((x) << S_AN_DATA_CTL)
+#define F_AN_DATA_CTL    V_AN_DATA_CTL(1U)
+
+#define A_MAC_PORT_PKT_COUNT 0x81c
+#define A_MAC_PORT_CFG4 0x820
+
+#define S_AEC3_RX_WIDTH    14
+#define M_AEC3_RX_WIDTH    0x3U
+#define V_AEC3_RX_WIDTH(x) ((x) << S_AEC3_RX_WIDTH)
+#define G_AEC3_RX_WIDTH(x) (((x) >> S_AEC3_RX_WIDTH) & M_AEC3_RX_WIDTH)
+
+#define S_AEC2_RX_WIDTH    12
+#define M_AEC2_RX_WIDTH    0x3U
+#define V_AEC2_RX_WIDTH(x) ((x) << S_AEC2_RX_WIDTH)
+#define G_AEC2_RX_WIDTH(x) (((x) >> S_AEC2_RX_WIDTH) & M_AEC2_RX_WIDTH)
+
+#define S_AEC1_RX_WIDTH    10
+#define M_AEC1_RX_WIDTH    0x3U
+#define V_AEC1_RX_WIDTH(x) ((x) << S_AEC1_RX_WIDTH)
+#define G_AEC1_RX_WIDTH(x) (((x) >> S_AEC1_RX_WIDTH) & M_AEC1_RX_WIDTH)
+
+#define S_AEC0_RX_WIDTH    8
+#define M_AEC0_RX_WIDTH    0x3U
+#define V_AEC0_RX_WIDTH(x) ((x) << S_AEC0_RX_WIDTH)
+#define G_AEC0_RX_WIDTH(x) (((x) >> S_AEC0_RX_WIDTH) & M_AEC0_RX_WIDTH)
+
+#define S_AEC3_TX_WIDTH    6
+#define M_AEC3_TX_WIDTH    0x3U
+#define V_AEC3_TX_WIDTH(x) ((x) << S_AEC3_TX_WIDTH)
+#define G_AEC3_TX_WIDTH(x) (((x) >> S_AEC3_TX_WIDTH) & M_AEC3_TX_WIDTH)
+
+#define S_AEC2_TX_WIDTH    4
+#define M_AEC2_TX_WIDTH    0x3U
+#define V_AEC2_TX_WIDTH(x) ((x) << S_AEC2_TX_WIDTH)
+#define G_AEC2_TX_WIDTH(x) (((x) >> S_AEC2_TX_WIDTH) & M_AEC2_TX_WIDTH)
+
+#define S_AEC1_TX_WIDTH    2
+#define M_AEC1_TX_WIDTH    0x3U
+#define V_AEC1_TX_WIDTH(x) ((x) << S_AEC1_TX_WIDTH)
+#define G_AEC1_TX_WIDTH(x) (((x) >> S_AEC1_TX_WIDTH) & M_AEC1_TX_WIDTH)
+
+#define S_AEC0_TX_WIDTH    0
+#define M_AEC0_TX_WIDTH    0x3U
+#define V_AEC0_TX_WIDTH(x) ((x) << S_AEC0_TX_WIDTH)
+#define G_AEC0_TX_WIDTH(x) (((x) >> S_AEC0_TX_WIDTH) & M_AEC0_TX_WIDTH)
+
+#define A_MAC_PORT_MAGIC_MACID_LO 0x824
+#define A_MAC_PORT_MAGIC_MACID_HI 0x828
+#define A_MAC_PORT_MTIP_RESET_CTRL 0x82c
+
+#define S_AN_RESET_SD_TX_CLK    31
+#define V_AN_RESET_SD_TX_CLK(x) ((x) << S_AN_RESET_SD_TX_CLK)
+#define F_AN_RESET_SD_TX_CLK    V_AN_RESET_SD_TX_CLK(1U)
+
+#define S_AN_RESET_SD_RX_CLK    30
+#define V_AN_RESET_SD_RX_CLK(x) ((x) << S_AN_RESET_SD_RX_CLK)
+#define F_AN_RESET_SD_RX_CLK    V_AN_RESET_SD_RX_CLK(1U)
+
+#define S_SGMII_RESET_TX_CLK    29
+#define V_SGMII_RESET_TX_CLK(x) ((x) << S_SGMII_RESET_TX_CLK)
+#define F_SGMII_RESET_TX_CLK    V_SGMII_RESET_TX_CLK(1U)
+
+#define S_SGMII_RESET_RX_CLK    28
+#define V_SGMII_RESET_RX_CLK(x) ((x) << S_SGMII_RESET_RX_CLK)
+#define F_SGMII_RESET_RX_CLK    V_SGMII_RESET_RX_CLK(1U)
+
+#define S_SGMII_RESET_REF_CLK    27
+#define V_SGMII_RESET_REF_CLK(x) ((x) << S_SGMII_RESET_REF_CLK)
+#define F_SGMII_RESET_REF_CLK    V_SGMII_RESET_REF_CLK(1U)
+
+#define S_PCS10G_RESET_XFI_RXCLK    26
+#define V_PCS10G_RESET_XFI_RXCLK(x) ((x) << S_PCS10G_RESET_XFI_RXCLK)
+#define F_PCS10G_RESET_XFI_RXCLK    V_PCS10G_RESET_XFI_RXCLK(1U)
+
+#define S_PCS10G_RESET_XFI_TXCLK    25
+#define V_PCS10G_RESET_XFI_TXCLK(x) ((x) << S_PCS10G_RESET_XFI_TXCLK)
+#define F_PCS10G_RESET_XFI_TXCLK    V_PCS10G_RESET_XFI_TXCLK(1U)
+
+#define S_PCS10G_RESET_SD_TX_CLK    24
+#define V_PCS10G_RESET_SD_TX_CLK(x) ((x) << S_PCS10G_RESET_SD_TX_CLK)
+#define F_PCS10G_RESET_SD_TX_CLK    V_PCS10G_RESET_SD_TX_CLK(1U)
+
+#define S_PCS10G_RESET_SD_RX_CLK    23
+#define V_PCS10G_RESET_SD_RX_CLK(x) ((x) << S_PCS10G_RESET_SD_RX_CLK)
+#define F_PCS10G_RESET_SD_RX_CLK    V_PCS10G_RESET_SD_RX_CLK(1U)
+
+#define S_PCS40G_RESET_RXCLK    22
+#define V_PCS40G_RESET_RXCLK(x) ((x) << S_PCS40G_RESET_RXCLK)
+#define F_PCS40G_RESET_RXCLK    V_PCS40G_RESET_RXCLK(1U)
+
+#define S_PCS40G_RESET_SD_TX_CLK    21
+#define V_PCS40G_RESET_SD_TX_CLK(x) ((x) << S_PCS40G_RESET_SD_TX_CLK)
+#define F_PCS40G_RESET_SD_TX_CLK    V_PCS40G_RESET_SD_TX_CLK(1U)
+
+#define S_PCS40G_RESET_SD0_RX_CLK    20
+#define V_PCS40G_RESET_SD0_RX_CLK(x) ((x) << S_PCS40G_RESET_SD0_RX_CLK)
+#define F_PCS40G_RESET_SD0_RX_CLK    V_PCS40G_RESET_SD0_RX_CLK(1U)
+
+#define S_PCS40G_RESET_SD1_RX_CLK    19
+#define V_PCS40G_RESET_SD1_RX_CLK(x) ((x) << S_PCS40G_RESET_SD1_RX_CLK)
+#define F_PCS40G_RESET_SD1_RX_CLK    V_PCS40G_RESET_SD1_RX_CLK(1U)
+
+#define S_PCS40G_RESET_SD2_RX_CLK    18
+#define V_PCS40G_RESET_SD2_RX_CLK(x) ((x) << S_PCS40G_RESET_SD2_RX_CLK)
+#define F_PCS40G_RESET_SD2_RX_CLK    V_PCS40G_RESET_SD2_RX_CLK(1U)
+
+#define S_PCS40G_RESET_SD3_RX_CLK    17
+#define V_PCS40G_RESET_SD3_RX_CLK(x) ((x) << S_PCS40G_RESET_SD3_RX_CLK)
+#define F_PCS40G_RESET_SD3_RX_CLK    V_PCS40G_RESET_SD3_RX_CLK(1U)
+
+#define S_PCS100G_RESET_CGMII_RXCLK    16
+#define V_PCS100G_RESET_CGMII_RXCLK(x) ((x) << S_PCS100G_RESET_CGMII_RXCLK)
+#define F_PCS100G_RESET_CGMII_RXCLK    V_PCS100G_RESET_CGMII_RXCLK(1U)
+
+#define S_PCS100G_RESET_CGMII_TXCLK    15
+#define V_PCS100G_RESET_CGMII_TXCLK(x) ((x) << S_PCS100G_RESET_CGMII_TXCLK)
+#define F_PCS100G_RESET_CGMII_TXCLK    V_PCS100G_RESET_CGMII_TXCLK(1U)
+
+#define S_PCS100G_RESET_TX_CLK    14
+#define V_PCS100G_RESET_TX_CLK(x) ((x) << S_PCS100G_RESET_TX_CLK)
+#define F_PCS100G_RESET_TX_CLK    V_PCS100G_RESET_TX_CLK(1U)
+
+#define S_PCS100G_RESET_SD0_RX_CLK    13
+#define V_PCS100G_RESET_SD0_RX_CLK(x) ((x) << S_PCS100G_RESET_SD0_RX_CLK)
+#define F_PCS100G_RESET_SD0_RX_CLK    V_PCS100G_RESET_SD0_RX_CLK(1U)
+
+#define S_PCS100G_RESET_SD1_RX_CLK    12
+#define V_PCS100G_RESET_SD1_RX_CLK(x) ((x) << S_PCS100G_RESET_SD1_RX_CLK)
+#define F_PCS100G_RESET_SD1_RX_CLK    V_PCS100G_RESET_SD1_RX_CLK(1U)
+
+#define S_PCS100G_RESET_SD2_RX_CLK    11
+#define V_PCS100G_RESET_SD2_RX_CLK(x) ((x) << S_PCS100G_RESET_SD2_RX_CLK)
+#define F_PCS100G_RESET_SD2_RX_CLK    V_PCS100G_RESET_SD2_RX_CLK(1U)
+
+#define S_PCS100G_RESET_SD3_RX_CLK    10
+#define V_PCS100G_RESET_SD3_RX_CLK(x) ((x) << S_PCS100G_RESET_SD3_RX_CLK)
+#define F_PCS100G_RESET_SD3_RX_CLK    V_PCS100G_RESET_SD3_RX_CLK(1U)
+
+#define S_MAC40G100G_RESET_TXCLK    9
+#define V_MAC40G100G_RESET_TXCLK(x) ((x) << S_MAC40G100G_RESET_TXCLK)
+#define F_MAC40G100G_RESET_TXCLK    V_MAC40G100G_RESET_TXCLK(1U)
+
+#define S_MAC40G100G_RESET_RXCLK    8
+#define V_MAC40G100G_RESET_RXCLK(x) ((x) << S_MAC40G100G_RESET_RXCLK)
+#define F_MAC40G100G_RESET_RXCLK    V_MAC40G100G_RESET_RXCLK(1U)
+
+#define S_MAC40G100G_RESET_FF_TX_CLK    7
+#define V_MAC40G100G_RESET_FF_TX_CLK(x) ((x) << S_MAC40G100G_RESET_FF_TX_CLK)
+#define F_MAC40G100G_RESET_FF_TX_CLK    V_MAC40G100G_RESET_FF_TX_CLK(1U)
+
+#define S_MAC40G100G_RESET_FF_RX_CLK    6
+#define V_MAC40G100G_RESET_FF_RX_CLK(x) ((x) << S_MAC40G100G_RESET_FF_RX_CLK)
+#define F_MAC40G100G_RESET_FF_RX_CLK    V_MAC40G100G_RESET_FF_RX_CLK(1U)
+
+#define S_MAC40G100G_RESET_TS_CLK    5
+#define V_MAC40G100G_RESET_TS_CLK(x) ((x) << S_MAC40G100G_RESET_TS_CLK)
+#define F_MAC40G100G_RESET_TS_CLK    V_MAC40G100G_RESET_TS_CLK(1U)
+
+#define S_MAC1G10G_RESET_RXCLK    4
+#define V_MAC1G10G_RESET_RXCLK(x) ((x) << S_MAC1G10G_RESET_RXCLK)
+#define F_MAC1G10G_RESET_RXCLK    V_MAC1G10G_RESET_RXCLK(1U)
+
+#define S_MAC1G10G_RESET_TXCLK    3
+#define V_MAC1G10G_RESET_TXCLK(x) ((x) << S_MAC1G10G_RESET_TXCLK)
+#define F_MAC1G10G_RESET_TXCLK    V_MAC1G10G_RESET_TXCLK(1U)
+
+#define S_MAC1G10G_RESET_FF_RX_CLK    2
+#define V_MAC1G10G_RESET_FF_RX_CLK(x) ((x) << S_MAC1G10G_RESET_FF_RX_CLK)
+#define F_MAC1G10G_RESET_FF_RX_CLK    V_MAC1G10G_RESET_FF_RX_CLK(1U)
+
+#define S_MAC1G10G_RESET_FF_TX_CLK    1
+#define V_MAC1G10G_RESET_FF_TX_CLK(x) ((x) << S_MAC1G10G_RESET_FF_TX_CLK)
+#define F_MAC1G10G_RESET_FF_TX_CLK    V_MAC1G10G_RESET_FF_TX_CLK(1U)
+
+#define S_XGMII_CLK_RESET    0
+#define V_XGMII_CLK_RESET(x) ((x) << S_XGMII_CLK_RESET)
+#define F_XGMII_CLK_RESET    V_XGMII_CLK_RESET(1U)
+
+#define A_MAC_PORT_MTIP_GATE_CTRL 0x830
+
+#define S_AN_GATE_SD_TX_CLK    31
+#define V_AN_GATE_SD_TX_CLK(x) ((x) << S_AN_GATE_SD_TX_CLK)
+#define F_AN_GATE_SD_TX_CLK    V_AN_GATE_SD_TX_CLK(1U)
+
+#define S_AN_GATE_SD_RX_CLK    30
+#define V_AN_GATE_SD_RX_CLK(x) ((x) << S_AN_GATE_SD_RX_CLK)
+#define F_AN_GATE_SD_RX_CLK    V_AN_GATE_SD_RX_CLK(1U)
+
+#define S_SGMII_GATE_TX_CLK    29
+#define V_SGMII_GATE_TX_CLK(x) ((x) << S_SGMII_GATE_TX_CLK)
+#define F_SGMII_GATE_TX_CLK    V_SGMII_GATE_TX_CLK(1U)
+
+#define S_SGMII_GATE_RX_CLK    28
+#define V_SGMII_GATE_RX_CLK(x) ((x) << S_SGMII_GATE_RX_CLK)
+#define F_SGMII_GATE_RX_CLK    V_SGMII_GATE_RX_CLK(1U)
+
+#define S_SGMII_GATE_REF_CLK    27
+#define V_SGMII_GATE_REF_CLK(x) ((x) << S_SGMII_GATE_REF_CLK)
+#define F_SGMII_GATE_REF_CLK    V_SGMII_GATE_REF_CLK(1U)
+
+#define S_PCS10G_GATE_XFI_RXCLK    26
+#define V_PCS10G_GATE_XFI_RXCLK(x) ((x) << S_PCS10G_GATE_XFI_RXCLK)
+#define F_PCS10G_GATE_XFI_RXCLK    V_PCS10G_GATE_XFI_RXCLK(1U)
+
+#define S_PCS10G_GATE_XFI_TXCLK    25
+#define V_PCS10G_GATE_XFI_TXCLK(x) ((x) << S_PCS10G_GATE_XFI_TXCLK)
+#define F_PCS10G_GATE_XFI_TXCLK    V_PCS10G_GATE_XFI_TXCLK(1U)
+
+#define S_PCS10G_GATE_SD_TX_CLK    24
+#define V_PCS10G_GATE_SD_TX_CLK(x) ((x) << S_PCS10G_GATE_SD_TX_CLK)
+#define F_PCS10G_GATE_SD_TX_CLK    V_PCS10G_GATE_SD_TX_CLK(1U)
+
+#define S_PCS10G_GATE_SD_RX_CLK    23
+#define V_PCS10G_GATE_SD_RX_CLK(x) ((x) << S_PCS10G_GATE_SD_RX_CLK)
+#define F_PCS10G_GATE_SD_RX_CLK    V_PCS10G_GATE_SD_RX_CLK(1U)
+
+#define S_PCS40G_GATE_RXCLK    22
+#define V_PCS40G_GATE_RXCLK(x) ((x) << S_PCS40G_GATE_RXCLK)
+#define F_PCS40G_GATE_RXCLK    V_PCS40G_GATE_RXCLK(1U)
+
+#define S_PCS40G_GATE_SD_TX_CLK    21
+#define V_PCS40G_GATE_SD_TX_CLK(x) ((x) << S_PCS40G_GATE_SD_TX_CLK)
+#define F_PCS40G_GATE_SD_TX_CLK    V_PCS40G_GATE_SD_TX_CLK(1U)
+
+#define S_PCS40G_GATE_SD_RX_CLK    20
+#define V_PCS40G_GATE_SD_RX_CLK(x) ((x) << S_PCS40G_GATE_SD_RX_CLK)
+#define F_PCS40G_GATE_SD_RX_CLK    V_PCS40G_GATE_SD_RX_CLK(1U)
+
+#define S_PCS100G_GATE_CGMII_RXCLK    19
+#define V_PCS100G_GATE_CGMII_RXCLK(x) ((x) << S_PCS100G_GATE_CGMII_RXCLK)
+#define F_PCS100G_GATE_CGMII_RXCLK    V_PCS100G_GATE_CGMII_RXCLK(1U)
+
+#define S_PCS100G_GATE_CGMII_TXCLK    18
+#define V_PCS100G_GATE_CGMII_TXCLK(x) ((x) << S_PCS100G_GATE_CGMII_TXCLK)
+#define F_PCS100G_GATE_CGMII_TXCLK    V_PCS100G_GATE_CGMII_TXCLK(1U)
+
+#define S_PCS100G_GATE_TX_CLK    17
+#define V_PCS100G_GATE_TX_CLK(x) ((x) << S_PCS100G_GATE_TX_CLK)
+#define F_PCS100G_GATE_TX_CLK    V_PCS100G_GATE_TX_CLK(1U)
+
+#define S_PCS100G_GATE_SD_RX_CLK    16
+#define V_PCS100G_GATE_SD_RX_CLK(x) ((x) << S_PCS100G_GATE_SD_RX_CLK)
+#define F_PCS100G_GATE_SD_RX_CLK    V_PCS100G_GATE_SD_RX_CLK(1U)
+
+#define S_MAC40G100G_GATE_TXCLK    15
+#define V_MAC40G100G_GATE_TXCLK(x) ((x) << S_MAC40G100G_GATE_TXCLK)
+#define F_MAC40G100G_GATE_TXCLK    V_MAC40G100G_GATE_TXCLK(1U)
+
+#define S_MAC40G100G_GATE_RXCLK    14
+#define V_MAC40G100G_GATE_RXCLK(x) ((x) << S_MAC40G100G_GATE_RXCLK)
+#define F_MAC40G100G_GATE_RXCLK    V_MAC40G100G_GATE_RXCLK(1U)
+
+#define S_MAC40G100G_GATE_FF_TX_CLK    13
+#define V_MAC40G100G_GATE_FF_TX_CLK(x) ((x) << S_MAC40G100G_GATE_FF_TX_CLK)
+#define F_MAC40G100G_GATE_FF_TX_CLK    V_MAC40G100G_GATE_FF_TX_CLK(1U)
+
+#define S_MAC40G100G_GATE_FF_RX_CLK    12
+#define V_MAC40G100G_GATE_FF_RX_CLK(x) ((x) << S_MAC40G100G_GATE_FF_RX_CLK)
+#define F_MAC40G100G_GATE_FF_RX_CLK    V_MAC40G100G_GATE_FF_RX_CLK(1U)
+
+#define S_MAC40G100G_TS_CLK    11
+#define V_MAC40G100G_TS_CLK(x) ((x) << S_MAC40G100G_TS_CLK)
+#define F_MAC40G100G_TS_CLK    V_MAC40G100G_TS_CLK(1U)
+
+#define S_MAC1G10G_GATE_RXCLK    10
+#define V_MAC1G10G_GATE_RXCLK(x) ((x) << S_MAC1G10G_GATE_RXCLK)
+#define F_MAC1G10G_GATE_RXCLK    V_MAC1G10G_GATE_RXCLK(1U)
+
+#define S_MAC1G10G_GATE_TXCLK    9
+#define V_MAC1G10G_GATE_TXCLK(x) ((x) << S_MAC1G10G_GATE_TXCLK)
+#define F_MAC1G10G_GATE_TXCLK    V_MAC1G10G_GATE_TXCLK(1U)
+
+#define S_MAC1G10G_GATE_FF_RX_CLK    8
+#define V_MAC1G10G_GATE_FF_RX_CLK(x) ((x) << S_MAC1G10G_GATE_FF_RX_CLK)
+#define F_MAC1G10G_GATE_FF_RX_CLK    V_MAC1G10G_GATE_FF_RX_CLK(1U)
+
+#define S_MAC1G10G_GATE_FF_TX_CLK    7
+#define V_MAC1G10G_GATE_FF_TX_CLK(x) ((x) << S_MAC1G10G_GATE_FF_TX_CLK)
+#define F_MAC1G10G_GATE_FF_TX_CLK    V_MAC1G10G_GATE_FF_TX_CLK(1U)
+
+#define S_AEC_RX    6
+#define V_AEC_RX(x) ((x) << S_AEC_RX)
+#define F_AEC_RX    V_AEC_RX(1U)
+
+#define S_AEC_TX    5
+#define V_AEC_TX(x) ((x) << S_AEC_TX)
+#define F_AEC_TX    V_AEC_TX(1U)
+
+#define S_PCS100G_CLK_ENABLE    4
+#define V_PCS100G_CLK_ENABLE(x) ((x) << S_PCS100G_CLK_ENABLE)
+#define F_PCS100G_CLK_ENABLE    V_PCS100G_CLK_ENABLE(1U)
+
+#define S_PCS40G_CLK_ENABLE    3
+#define V_PCS40G_CLK_ENABLE(x) ((x) << S_PCS40G_CLK_ENABLE)
+#define F_PCS40G_CLK_ENABLE    V_PCS40G_CLK_ENABLE(1U)
+
+#define S_PCS10G_CLK_ENABLE    2
+#define V_PCS10G_CLK_ENABLE(x) ((x) << S_PCS10G_CLK_ENABLE)
+#define F_PCS10G_CLK_ENABLE    V_PCS10G_CLK_ENABLE(1U)
+
+#define S_PCS1G_CLK_ENABLE    1
+#define V_PCS1G_CLK_ENABLE(x) ((x) << S_PCS1G_CLK_ENABLE)
+#define F_PCS1G_CLK_ENABLE    V_PCS1G_CLK_ENABLE(1U)
+
+#define S_AN_CLK_ENABLE    0
+#define V_AN_CLK_ENABLE(x) ((x) << S_AN_CLK_ENABLE)
+#define F_AN_CLK_ENABLE    V_AN_CLK_ENABLE(1U)
+
+#define A_MAC_PORT_LINK_STATUS 0x834
+
+#define S_AN_DONE    6
+#define V_AN_DONE(x) ((x) << S_AN_DONE)
+#define F_AN_DONE    V_AN_DONE(1U)
+
+#define S_ALIGN_DONE    5
+#define V_ALIGN_DONE(x) ((x) << S_ALIGN_DONE)
+#define F_ALIGN_DONE    V_ALIGN_DONE(1U)
+
+#define S_BLOCK_LOCK    4
+#define V_BLOCK_LOCK(x) ((x) << S_BLOCK_LOCK)
+#define F_BLOCK_LOCK    V_BLOCK_LOCK(1U)
+
+#define S_HI_BER_ST    7
+#define V_HI_BER_ST(x) ((x) << S_HI_BER_ST)
+#define F_HI_BER_ST    V_HI_BER_ST(1U)
+
+#define S_AN_DONE_ST    6
+#define V_AN_DONE_ST(x) ((x) << S_AN_DONE_ST)
+#define F_AN_DONE_ST    V_AN_DONE_ST(1U)
+
+#define A_MAC_PORT_AEC_ADD_CTL_STAT_0 0x838
+
+#define S_AEC_SYS_LANE_TYPE_3    11
+#define V_AEC_SYS_LANE_TYPE_3(x) ((x) << S_AEC_SYS_LANE_TYPE_3)
+#define F_AEC_SYS_LANE_TYPE_3    V_AEC_SYS_LANE_TYPE_3(1U)
+
+#define S_AEC_SYS_LANE_TYPE_2    10
+#define V_AEC_SYS_LANE_TYPE_2(x) ((x) << S_AEC_SYS_LANE_TYPE_2)
+#define F_AEC_SYS_LANE_TYPE_2    V_AEC_SYS_LANE_TYPE_2(1U)
+
+#define S_AEC_SYS_LANE_TYPE_1    9
+#define V_AEC_SYS_LANE_TYPE_1(x) ((x) << S_AEC_SYS_LANE_TYPE_1)
+#define F_AEC_SYS_LANE_TYPE_1    V_AEC_SYS_LANE_TYPE_1(1U)
+
+#define S_AEC_SYS_LANE_TYPE_0    8
+#define V_AEC_SYS_LANE_TYPE_0(x) ((x) << S_AEC_SYS_LANE_TYPE_0)
+#define F_AEC_SYS_LANE_TYPE_0    V_AEC_SYS_LANE_TYPE_0(1U)
+
+#define S_AEC_SYS_LANE_SELECT_3    6
+#define M_AEC_SYS_LANE_SELECT_3    0x3U
+#define V_AEC_SYS_LANE_SELECT_3(x) ((x) << S_AEC_SYS_LANE_SELECT_3)
+#define G_AEC_SYS_LANE_SELECT_3(x) (((x) >> S_AEC_SYS_LANE_SELECT_3) & M_AEC_SYS_LANE_SELECT_3)
+
+#define S_AEC_SYS_LANE_SELECT_2    4
+#define M_AEC_SYS_LANE_SELECT_2    0x3U
+#define V_AEC_SYS_LANE_SELECT_2(x) ((x) << S_AEC_SYS_LANE_SELECT_2)
+#define G_AEC_SYS_LANE_SELECT_2(x) (((x) >> S_AEC_SYS_LANE_SELECT_2) & M_AEC_SYS_LANE_SELECT_2)
+
+#define S_AEC_SYS_LANE_SELECT_1    2
+#define M_AEC_SYS_LANE_SELECT_1    0x3U
+#define V_AEC_SYS_LANE_SELECT_1(x) ((x) << S_AEC_SYS_LANE_SELECT_1)
+#define G_AEC_SYS_LANE_SELECT_1(x) (((x) >> S_AEC_SYS_LANE_SELECT_1) & M_AEC_SYS_LANE_SELECT_1)
+
+#define S_AEC_SYS_LANE_SELECT_O    0
+#define M_AEC_SYS_LANE_SELECT_O    0x3U
+#define V_AEC_SYS_LANE_SELECT_O(x) ((x) << S_AEC_SYS_LANE_SELECT_O)
+#define G_AEC_SYS_LANE_SELECT_O(x) (((x) >> S_AEC_SYS_LANE_SELECT_O) & M_AEC_SYS_LANE_SELECT_O)
+
+#define A_MAC_PORT_AEC_ADD_CTL_STAT_1 0x83c
+
+#define S_AEC_RX_UNKNOWN_LANE_3    11
+#define V_AEC_RX_UNKNOWN_LANE_3(x) ((x) << S_AEC_RX_UNKNOWN_LANE_3)
+#define F_AEC_RX_UNKNOWN_LANE_3    V_AEC_RX_UNKNOWN_LANE_3(1U)
+
+#define S_AEC_RX_UNKNOWN_LANE_2    10
+#define V_AEC_RX_UNKNOWN_LANE_2(x) ((x) << S_AEC_RX_UNKNOWN_LANE_2)
+#define F_AEC_RX_UNKNOWN_LANE_2    V_AEC_RX_UNKNOWN_LANE_2(1U)
+
+#define S_AEC_RX_UNKNOWN_LANE_1    9
+#define V_AEC_RX_UNKNOWN_LANE_1(x) ((x) << S_AEC_RX_UNKNOWN_LANE_1)
+#define F_AEC_RX_UNKNOWN_LANE_1    V_AEC_RX_UNKNOWN_LANE_1(1U)
+
+#define S_AEC_RX_UNKNOWN_LANE_0    8
+#define V_AEC_RX_UNKNOWN_LANE_0(x) ((x) << S_AEC_RX_UNKNOWN_LANE_0)
+#define F_AEC_RX_UNKNOWN_LANE_0    V_AEC_RX_UNKNOWN_LANE_0(1U)
+
+#define S_AEC_RX_LANE_ID_3    6
+#define M_AEC_RX_LANE_ID_3    0x3U
+#define V_AEC_RX_LANE_ID_3(x) ((x) << S_AEC_RX_LANE_ID_3)
+#define G_AEC_RX_LANE_ID_3(x) (((x) >> S_AEC_RX_LANE_ID_3) & M_AEC_RX_LANE_ID_3)
+
+#define S_AEC_RX_LANE_ID_2    4
+#define M_AEC_RX_LANE_ID_2    0x3U
+#define V_AEC_RX_LANE_ID_2(x) ((x) << S_AEC_RX_LANE_ID_2)
+#define G_AEC_RX_LANE_ID_2(x) (((x) >> S_AEC_RX_LANE_ID_2) & M_AEC_RX_LANE_ID_2)
+
+#define S_AEC_RX_LANE_ID_1    2
+#define M_AEC_RX_LANE_ID_1    0x3U
+#define V_AEC_RX_LANE_ID_1(x) ((x) << S_AEC_RX_LANE_ID_1)
+#define G_AEC_RX_LANE_ID_1(x) (((x) >> S_AEC_RX_LANE_ID_1) & M_AEC_RX_LANE_ID_1)
+
+#define S_AEC_RX_LANE_ID_O    0
+#define M_AEC_RX_LANE_ID_O    0x3U
+#define V_AEC_RX_LANE_ID_O(x) ((x) << S_AEC_RX_LANE_ID_O)
+#define G_AEC_RX_LANE_ID_O(x) (((x) >> S_AEC_RX_LANE_ID_O) & M_AEC_RX_LANE_ID_O)
+
+#define A_MAC_PORT_AEC_XGMII_TIMER_LO_40G 0x840
+
+#define S_XGMII_CLK_IN_1MS_LO_40G    0
+#define M_XGMII_CLK_IN_1MS_LO_40G    0xffffU
+#define V_XGMII_CLK_IN_1MS_LO_40G(x) ((x) << S_XGMII_CLK_IN_1MS_LO_40G)
+#define G_XGMII_CLK_IN_1MS_LO_40G(x) (((x) >> S_XGMII_CLK_IN_1MS_LO_40G) & M_XGMII_CLK_IN_1MS_LO_40G)
+
+#define A_MAC_PORT_AEC_XGMII_TIMER_HI_40G 0x844
+
+#define S_XGMII_CLK_IN_1MS_HI_40G    0
+#define M_XGMII_CLK_IN_1MS_HI_40G    0xfU
+#define V_XGMII_CLK_IN_1MS_HI_40G(x) ((x) << S_XGMII_CLK_IN_1MS_HI_40G)
+#define G_XGMII_CLK_IN_1MS_HI_40G(x) (((x) >> S_XGMII_CLK_IN_1MS_HI_40G) & M_XGMII_CLK_IN_1MS_HI_40G)
+
+#define A_MAC_PORT_AEC_XGMII_TIMER_LO_100G 0x848
+
+#define S_XGMII_CLK_IN_1MS_LO_100G    0
+#define M_XGMII_CLK_IN_1MS_LO_100G    0xffffU
+#define V_XGMII_CLK_IN_1MS_LO_100G(x) ((x) << S_XGMII_CLK_IN_1MS_LO_100G)
+#define G_XGMII_CLK_IN_1MS_LO_100G(x) (((x) >> S_XGMII_CLK_IN_1MS_LO_100G) & M_XGMII_CLK_IN_1MS_LO_100G)
+
+#define A_MAC_PORT_AEC_XGMII_TIMER_HI_100G 0x84c
+
+#define S_XGMII_CLK_IN_1MS_HI_100G    0
+#define M_XGMII_CLK_IN_1MS_HI_100G    0xfU
+#define V_XGMII_CLK_IN_1MS_HI_100G(x) ((x) << S_XGMII_CLK_IN_1MS_HI_100G)
+#define G_XGMII_CLK_IN_1MS_HI_100G(x) (((x) >> S_XGMII_CLK_IN_1MS_HI_100G) & M_XGMII_CLK_IN_1MS_HI_100G)
+
+#define A_MAC_PORT_AEC_DEBUG_LO_0 0x850
+
+#define S_CTL_FSM_CUR_STATE    28
+#define M_CTL_FSM_CUR_STATE    0x7U
+#define V_CTL_FSM_CUR_STATE(x) ((x) << S_CTL_FSM_CUR_STATE)
+#define G_CTL_FSM_CUR_STATE(x) (((x) >> S_CTL_FSM_CUR_STATE) & M_CTL_FSM_CUR_STATE)
+
+#define S_CIN_FSM_CUR_STATE    26
+#define M_CIN_FSM_CUR_STATE    0x3U
+#define V_CIN_FSM_CUR_STATE(x) ((x) << S_CIN_FSM_CUR_STATE)
+#define G_CIN_FSM_CUR_STATE(x) (((x) >> S_CIN_FSM_CUR_STATE) & M_CIN_FSM_CUR_STATE)
+
+#define S_CRI_FSM_CUR_STATE    23
+#define M_CRI_FSM_CUR_STATE    0x7U
+#define V_CRI_FSM_CUR_STATE(x) ((x) << S_CRI_FSM_CUR_STATE)
+#define G_CRI_FSM_CUR_STATE(x) (((x) >> S_CRI_FSM_CUR_STATE) & M_CRI_FSM_CUR_STATE)
+
+#define S_CU_C3_ACK_VALUE    21
+#define M_CU_C3_ACK_VALUE    0x3U
+#define V_CU_C3_ACK_VALUE(x) ((x) << S_CU_C3_ACK_VALUE)
+#define G_CU_C3_ACK_VALUE(x) (((x) >> S_CU_C3_ACK_VALUE) & M_CU_C3_ACK_VALUE)
+
+#define S_CU_C2_ACK_VALUE    19
+#define M_CU_C2_ACK_VALUE    0x3U
+#define V_CU_C2_ACK_VALUE(x) ((x) << S_CU_C2_ACK_VALUE)
+#define G_CU_C2_ACK_VALUE(x) (((x) >> S_CU_C2_ACK_VALUE) & M_CU_C2_ACK_VALUE)
+
+#define S_CU_C1_ACK_VALUE    17
+#define M_CU_C1_ACK_VALUE    0x3U
+#define V_CU_C1_ACK_VALUE(x) ((x) << S_CU_C1_ACK_VALUE)
+#define G_CU_C1_ACK_VALUE(x) (((x) >> S_CU_C1_ACK_VALUE) & M_CU_C1_ACK_VALUE)
+
+#define S_CU_C0_ACK_VALUE    15
+#define M_CU_C0_ACK_VALUE    0x3U
+#define V_CU_C0_ACK_VALUE(x) ((x) << S_CU_C0_ACK_VALUE)
+#define G_CU_C0_ACK_VALUE(x) (((x) >> S_CU_C0_ACK_VALUE) & M_CU_C0_ACK_VALUE)
+
+#define S_CX_INIT    13
+#define V_CX_INIT(x) ((x) << S_CX_INIT)
+#define F_CX_INIT    V_CX_INIT(1U)
+
+#define S_CX_PRESET    12
+#define V_CX_PRESET(x) ((x) << S_CX_PRESET)
+#define F_CX_PRESET    V_CX_PRESET(1U)
+
+#define S_CUF_C3_UPDATE    9
+#define M_CUF_C3_UPDATE    0x3U
+#define V_CUF_C3_UPDATE(x) ((x) << S_CUF_C3_UPDATE)
+#define G_CUF_C3_UPDATE(x) (((x) >> S_CUF_C3_UPDATE) & M_CUF_C3_UPDATE)
+
+#define S_CUF_C2_UPDATE    7
+#define M_CUF_C2_UPDATE    0x3U
+#define V_CUF_C2_UPDATE(x) ((x) << S_CUF_C2_UPDATE)
+#define G_CUF_C2_UPDATE(x) (((x) >> S_CUF_C2_UPDATE) & M_CUF_C2_UPDATE)
+
+#define S_CUF_C1_UPDATE    5
+#define M_CUF_C1_UPDATE    0x3U
+#define V_CUF_C1_UPDATE(x) ((x) << S_CUF_C1_UPDATE)
+#define G_CUF_C1_UPDATE(x) (((x) >> S_CUF_C1_UPDATE) & M_CUF_C1_UPDATE)
+
+#define S_CUF_C0_UPDATE    3
+#define M_CUF_C0_UPDATE    0x3U
+#define V_CUF_C0_UPDATE(x) ((x) << S_CUF_C0_UPDATE)
+#define G_CUF_C0_UPDATE(x) (((x) >> S_CUF_C0_UPDATE) & M_CUF_C0_UPDATE)
+
+#define S_REG_FPH_ATTR_TXUPDAT_VALID    2
+#define V_REG_FPH_ATTR_TXUPDAT_VALID(x) ((x) << S_REG_FPH_ATTR_TXUPDAT_VALID)
+#define F_REG_FPH_ATTR_TXUPDAT_VALID    V_REG_FPH_ATTR_TXUPDAT_VALID(1U)
+
+#define S_REG_FPH_ATTR_TXSTAT_VALID    1
+#define V_REG_FPH_ATTR_TXSTAT_VALID(x) ((x) << S_REG_FPH_ATTR_TXSTAT_VALID)
+#define F_REG_FPH_ATTR_TXSTAT_VALID    V_REG_FPH_ATTR_TXSTAT_VALID(1U)
+
+#define S_REG_MAN_DEC_REQ    0
+#define V_REG_MAN_DEC_REQ(x) ((x) << S_REG_MAN_DEC_REQ)
+#define F_REG_MAN_DEC_REQ    V_REG_MAN_DEC_REQ(1U)
+
+#define A_MAC_PORT_AEC_DEBUG_HI_0 0x854
+
+#define S_FC_LSNA_    12
+#define V_FC_LSNA_(x) ((x) << S_FC_LSNA_)
+#define F_FC_LSNA_    V_FC_LSNA_(1U)
+
+#define S_CUF_C0_FSM_DEBUG    9
+#define M_CUF_C0_FSM_DEBUG    0x7U
+#define V_CUF_C0_FSM_DEBUG(x) ((x) << S_CUF_C0_FSM_DEBUG)
+#define G_CUF_C0_FSM_DEBUG(x) (((x) >> S_CUF_C0_FSM_DEBUG) & M_CUF_C0_FSM_DEBUG)
+
+#define S_CUF_C1_FSM_DEBUG    6
+#define M_CUF_C1_FSM_DEBUG    0x7U
+#define V_CUF_C1_FSM_DEBUG(x) ((x) << S_CUF_C1_FSM_DEBUG)
+#define G_CUF_C1_FSM_DEBUG(x) (((x) >> S_CUF_C1_FSM_DEBUG) & M_CUF_C1_FSM_DEBUG)
+
+#define S_CUF_C2_FSM_DEBUG    3
+#define M_CUF_C2_FSM_DEBUG    0x7U
+#define V_CUF_C2_FSM_DEBUG(x) ((x) << S_CUF_C2_FSM_DEBUG)
+#define G_CUF_C2_FSM_DEBUG(x) (((x) >> S_CUF_C2_FSM_DEBUG) & M_CUF_C2_FSM_DEBUG)
+
+#define S_LCK_FSM_CUR_STATE    0
+#define M_LCK_FSM_CUR_STATE    0x7U
+#define V_LCK_FSM_CUR_STATE(x) ((x) << S_LCK_FSM_CUR_STATE)
+#define G_LCK_FSM_CUR_STATE(x) (((x) >> S_LCK_FSM_CUR_STATE) & M_LCK_FSM_CUR_STATE)
+
+#define A_MAC_PORT_AEC_DEBUG_LO_1 0x858
+#define A_MAC_PORT_AEC_DEBUG_HI_1 0x85c
+#define A_MAC_PORT_AEC_DEBUG_LO_2 0x860
+#define A_MAC_PORT_AEC_DEBUG_HI_2 0x864
+#define A_MAC_PORT_AEC_DEBUG_LO_3 0x868
+#define A_MAC_PORT_AEC_DEBUG_HI_3 0x86c
+#define A_MAC_PORT_MAC_DEBUG_RO 0x870
+
+#define S_MAC40G100G_TX_UNDERFLOW    13
+#define V_MAC40G100G_TX_UNDERFLOW(x) ((x) << S_MAC40G100G_TX_UNDERFLOW)
+#define F_MAC40G100G_TX_UNDERFLOW    V_MAC40G100G_TX_UNDERFLOW(1U)
+
+#define S_MAC1G10G_MAGIC_IND    12
+#define V_MAC1G10G_MAGIC_IND(x) ((x) << S_MAC1G10G_MAGIC_IND)
+#define F_MAC1G10G_MAGIC_IND    V_MAC1G10G_MAGIC_IND(1U)
+
+#define S_MAC1G10G_FF_RX_EMPTY    11
+#define V_MAC1G10G_FF_RX_EMPTY(x) ((x) << S_MAC1G10G_FF_RX_EMPTY)
+#define F_MAC1G10G_FF_RX_EMPTY    V_MAC1G10G_FF_RX_EMPTY(1U)
+
+#define S_MAC1G10G_FF_TX_OVR_ERR    10
+#define V_MAC1G10G_FF_TX_OVR_ERR(x) ((x) << S_MAC1G10G_FF_TX_OVR_ERR)
+#define F_MAC1G10G_FF_TX_OVR_ERR    V_MAC1G10G_FF_TX_OVR_ERR(1U)
+
+#define S_MAC1G10G_IF_MODE_ENA    8
+#define M_MAC1G10G_IF_MODE_ENA    0x3U
+#define V_MAC1G10G_IF_MODE_ENA(x) ((x) << S_MAC1G10G_IF_MODE_ENA)
+#define G_MAC1G10G_IF_MODE_ENA(x) (((x) >> S_MAC1G10G_IF_MODE_ENA) & M_MAC1G10G_IF_MODE_ENA)
+
+#define S_MAC1G10G_MII_ENA_10    7
+#define V_MAC1G10G_MII_ENA_10(x) ((x) << S_MAC1G10G_MII_ENA_10)
+#define F_MAC1G10G_MII_ENA_10    V_MAC1G10G_MII_ENA_10(1U)
+
+#define S_MAC1G10G_PAUSE_ON    6
+#define V_MAC1G10G_PAUSE_ON(x) ((x) << S_MAC1G10G_PAUSE_ON)
+#define F_MAC1G10G_PAUSE_ON    V_MAC1G10G_PAUSE_ON(1U)
+
+#define S_MAC1G10G_PFC_MODE    5
+#define V_MAC1G10G_PFC_MODE(x) ((x) << S_MAC1G10G_PFC_MODE)
+#define F_MAC1G10G_PFC_MODE    V_MAC1G10G_PFC_MODE(1U)
+
+#define S_MAC1G10G_RX_SFD_O    4
+#define V_MAC1G10G_RX_SFD_O(x) ((x) << S_MAC1G10G_RX_SFD_O)
+#define F_MAC1G10G_RX_SFD_O    V_MAC1G10G_RX_SFD_O(1U)
+
+#define S_MAC1G10G_TX_EMPTY    3
+#define V_MAC1G10G_TX_EMPTY(x) ((x) << S_MAC1G10G_TX_EMPTY)
+#define F_MAC1G10G_TX_EMPTY    V_MAC1G10G_TX_EMPTY(1U)
+
+#define S_MAC1G10G_TX_SFD_O    2
+#define V_MAC1G10G_TX_SFD_O(x) ((x) << S_MAC1G10G_TX_SFD_O)
+#define F_MAC1G10G_TX_SFD_O    V_MAC1G10G_TX_SFD_O(1U)
+
+#define S_MAC1G10G_TX_TS_FRM_OUT    1
+#define V_MAC1G10G_TX_TS_FRM_OUT(x) ((x) << S_MAC1G10G_TX_TS_FRM_OUT)
+#define F_MAC1G10G_TX_TS_FRM_OUT    V_MAC1G10G_TX_TS_FRM_OUT(1U)
+
+#define S_MAC1G10G_TX_UNDERFLOW    0
+#define V_MAC1G10G_TX_UNDERFLOW(x) ((x) << S_MAC1G10G_TX_UNDERFLOW)
+#define F_MAC1G10G_TX_UNDERFLOW    V_MAC1G10G_TX_UNDERFLOW(1U)
+
+#define A_MAC_PORT_MAC_CTRL_RW 0x874
+
+#define S_MAC40G100G_FF_TX_PFC_XOFF    17
+#define M_MAC40G100G_FF_TX_PFC_XOFF    0xffU
+#define V_MAC40G100G_FF_TX_PFC_XOFF(x) ((x) << S_MAC40G100G_FF_TX_PFC_XOFF)
+#define G_MAC40G100G_FF_TX_PFC_XOFF(x) (((x) >> S_MAC40G100G_FF_TX_PFC_XOFF) & M_MAC40G100G_FF_TX_PFC_XOFF)
+
+#define S_MAC40G100G_TX_LOC_FAULT    16
+#define V_MAC40G100G_TX_LOC_FAULT(x) ((x) << S_MAC40G100G_TX_LOC_FAULT)
+#define F_MAC40G100G_TX_LOC_FAULT    V_MAC40G100G_TX_LOC_FAULT(1U)
+
+#define S_MAC40G100G_TX_REM_FAULT    15
+#define V_MAC40G100G_TX_REM_FAULT(x) ((x) << S_MAC40G100G_TX_REM_FAULT)
+#define F_MAC40G100G_TX_REM_FAULT    V_MAC40G100G_TX_REM_FAULT(1U)
+
+#define S_MAC40G_LOOP_BCK    14
+#define V_MAC40G_LOOP_BCK(x) ((x) << S_MAC40G_LOOP_BCK)
+#define F_MAC40G_LOOP_BCK    V_MAC40G_LOOP_BCK(1U)
+
+#define S_MAC1G10G_MAGIC_ENA    13
+#define V_MAC1G10G_MAGIC_ENA(x) ((x) << S_MAC1G10G_MAGIC_ENA)
+#define F_MAC1G10G_MAGIC_ENA    V_MAC1G10G_MAGIC_ENA(1U)
+
+#define S_MAC1G10G_IF_MODE_SET    11
+#define M_MAC1G10G_IF_MODE_SET    0x3U
+#define V_MAC1G10G_IF_MODE_SET(x) ((x) << S_MAC1G10G_IF_MODE_SET)
+#define G_MAC1G10G_IF_MODE_SET(x) (((x) >> S_MAC1G10G_IF_MODE_SET) & M_MAC1G10G_IF_MODE_SET)
+
+#define S_MAC1G10G_TX_LOC_FAULT    10
+#define V_MAC1G10G_TX_LOC_FAULT(x) ((x) << S_MAC1G10G_TX_LOC_FAULT)
+#define F_MAC1G10G_TX_LOC_FAULT    V_MAC1G10G_TX_LOC_FAULT(1U)
+
+#define S_MAC1G10G_TX_REM_FAULT    9
+#define V_MAC1G10G_TX_REM_FAULT(x) ((x) << S_MAC1G10G_TX_REM_FAULT)
+#define F_MAC1G10G_TX_REM_FAULT    V_MAC1G10G_TX_REM_FAULT(1U)
+
+#define S_MAC1G10G_XOFF_GEN    1
+#define M_MAC1G10G_XOFF_GEN    0xffU
+#define V_MAC1G10G_XOFF_GEN(x) ((x) << S_MAC1G10G_XOFF_GEN)
+#define G_MAC1G10G_XOFF_GEN(x) (((x) >> S_MAC1G10G_XOFF_GEN) & M_MAC1G10G_XOFF_GEN)
+
+#define S_MAC1G_LOOP_BCK    0
+#define V_MAC1G_LOOP_BCK(x) ((x) << S_MAC1G_LOOP_BCK)
+#define F_MAC1G_LOOP_BCK    V_MAC1G_LOOP_BCK(1U)
+
+#define A_MAC_PORT_PCS_DEBUG0_RO 0x878
+
+#define S_FPGA_LOCK    26
+#define M_FPGA_LOCK    0xfU
+#define V_FPGA_LOCK(x) ((x) << S_FPGA_LOCK)
+#define G_FPGA_LOCK(x) (((x) >> S_FPGA_LOCK) & M_FPGA_LOCK)
+
+#define S_T6_AN_DONE    25
+#define V_T6_AN_DONE(x) ((x) << S_T6_AN_DONE)
+#define F_T6_AN_DONE    V_T6_AN_DONE(1U)
+
+#define S_AN_INT    24
+#define V_AN_INT(x) ((x) << S_AN_INT)
+#define F_AN_INT    V_AN_INT(1U)
+
+#define S_AN_PCS_RX_CLK_ENA    23
+#define V_AN_PCS_RX_CLK_ENA(x) ((x) << S_AN_PCS_RX_CLK_ENA)
+#define F_AN_PCS_RX_CLK_ENA    V_AN_PCS_RX_CLK_ENA(1U)
+
+#define S_AN_PCS_TX_CLK_ENA    22
+#define V_AN_PCS_TX_CLK_ENA(x) ((x) << S_AN_PCS_TX_CLK_ENA)
+#define F_AN_PCS_TX_CLK_ENA    V_AN_PCS_TX_CLK_ENA(1U)
+
+#define S_AN_SELECT    17
+#define M_AN_SELECT    0x1fU
+#define V_AN_SELECT(x) ((x) << S_AN_SELECT)
+#define G_AN_SELECT(x) (((x) >> S_AN_SELECT) & M_AN_SELECT)
+
+#define S_AN_PROG    16
+#define V_AN_PROG(x) ((x) << S_AN_PROG)
+#define F_AN_PROG    V_AN_PROG(1U)
+
+#define S_PCS40G_BLOCK_LOCK    12
+#define M_PCS40G_BLOCK_LOCK    0xfU
+#define V_PCS40G_BLOCK_LOCK(x) ((x) << S_PCS40G_BLOCK_LOCK)
+#define G_PCS40G_BLOCK_LOCK(x) (((x) >> S_PCS40G_BLOCK_LOCK) & M_PCS40G_BLOCK_LOCK)
+
+#define S_PCS40G_BER_TIMER_DONE    11
+#define V_PCS40G_BER_TIMER_DONE(x) ((x) << S_PCS40G_BER_TIMER_DONE)
+#define F_PCS40G_BER_TIMER_DONE    V_PCS40G_BER_TIMER_DONE(1U)
+
+#define S_PCS10G_FEC_LOCKED    10
+#define V_PCS10G_FEC_LOCKED(x) ((x) << S_PCS10G_FEC_LOCKED)
+#define F_PCS10G_FEC_LOCKED    V_PCS10G_FEC_LOCKED(1U)
+
+#define S_PCS10G_BLOCK_LOCK    9
+#define V_PCS10G_BLOCK_LOCK(x) ((x) << S_PCS10G_BLOCK_LOCK)
+#define F_PCS10G_BLOCK_LOCK    V_PCS10G_BLOCK_LOCK(1U)
+
+#define S_SGMII_GMII_COL    8
+#define V_SGMII_GMII_COL(x) ((x) << S_SGMII_GMII_COL)
+#define F_SGMII_GMII_COL    V_SGMII_GMII_COL(1U)
+
+#define S_SGMII_GMII_CRS    7
+#define V_SGMII_GMII_CRS(x) ((x) << S_SGMII_GMII_CRS)
+#define F_SGMII_GMII_CRS    V_SGMII_GMII_CRS(1U)
+
+#define S_SGMII_SD_LOOPBACK    6
+#define V_SGMII_SD_LOOPBACK(x) ((x) << S_SGMII_SD_LOOPBACK)
+#define F_SGMII_SD_LOOPBACK    V_SGMII_SD_LOOPBACK(1U)
+
+#define S_SGMII_SG_AN_DONE    5
+#define V_SGMII_SG_AN_DONE(x) ((x) << S_SGMII_SG_AN_DONE)
+#define F_SGMII_SG_AN_DONE    V_SGMII_SG_AN_DONE(1U)
+
+#define S_SGMII_SG_HD    4
+#define V_SGMII_SG_HD(x) ((x) << S_SGMII_SG_HD)
+#define F_SGMII_SG_HD    V_SGMII_SG_HD(1U)
+
+#define S_SGMII_SG_PAGE_RX    3
+#define V_SGMII_SG_PAGE_RX(x) ((x) << S_SGMII_SG_PAGE_RX)
+#define F_SGMII_SG_PAGE_RX    V_SGMII_SG_PAGE_RX(1U)
+
+#define S_SGMII_SG_RX_SYNC    2
+#define V_SGMII_SG_RX_SYNC(x) ((x) << S_SGMII_SG_RX_SYNC)
+#define F_SGMII_SG_RX_SYNC    V_SGMII_SG_RX_SYNC(1U)
+
+#define S_SGMII_SG_SPEED    0
+#define M_SGMII_SG_SPEED    0x3U
+#define V_SGMII_SG_SPEED(x) ((x) << S_SGMII_SG_SPEED)
+#define G_SGMII_SG_SPEED(x) (((x) >> S_SGMII_SG_SPEED) & M_SGMII_SG_SPEED)
+
+#define A_MAC_PORT_PCS_CTRL_RW 0x87c
+
+#define S_TX_LI_FAULT    31
+#define V_TX_LI_FAULT(x) ((x) << S_TX_LI_FAULT)
+#define F_TX_LI_FAULT    V_TX_LI_FAULT(1U)
+
+#define S_T6_PAD    30
+#define V_T6_PAD(x) ((x) << S_T6_PAD)
+#define F_T6_PAD    V_T6_PAD(1U)
+
+#define S_BLK_STB_VAL    22
+#define M_BLK_STB_VAL    0xffU
+#define V_BLK_STB_VAL(x) ((x) << S_BLK_STB_VAL)
+#define G_BLK_STB_VAL(x) (((x) >> S_BLK_STB_VAL) & M_BLK_STB_VAL)
+
+#define S_DEBUG_SEL    18
+#define M_DEBUG_SEL    0xfU
+#define V_DEBUG_SEL(x) ((x) << S_DEBUG_SEL)
+#define G_DEBUG_SEL(x) (((x) >> S_DEBUG_SEL) & M_DEBUG_SEL)
+
+#define S_SGMII_LOOP    15
+#define M_SGMII_LOOP    0x7U
+#define V_SGMII_LOOP(x) ((x) << S_SGMII_LOOP)
+#define G_SGMII_LOOP(x) (((x) >> S_SGMII_LOOP) & M_SGMII_LOOP)
+
+#define S_AN_DIS_TIMER    14
+#define V_AN_DIS_TIMER(x) ((x) << S_AN_DIS_TIMER)
+#define F_AN_DIS_TIMER    V_AN_DIS_TIMER(1U)
+
+#define S_PCS100G_BER_TIMER_SHORT    13
+#define V_PCS100G_BER_TIMER_SHORT(x) ((x) << S_PCS100G_BER_TIMER_SHORT)
+#define F_PCS100G_BER_TIMER_SHORT    V_PCS100G_BER_TIMER_SHORT(1U)
+
+#define S_PCS100G_TX_LANE_THRESH    9
+#define M_PCS100G_TX_LANE_THRESH    0xfU
+#define V_PCS100G_TX_LANE_THRESH(x) ((x) << S_PCS100G_TX_LANE_THRESH)
+#define G_PCS100G_TX_LANE_THRESH(x) (((x) >> S_PCS100G_TX_LANE_THRESH) & M_PCS100G_TX_LANE_THRESH)
+
+#define S_PCS100G_VL_INTVL    8
+#define V_PCS100G_VL_INTVL(x) ((x) << S_PCS100G_VL_INTVL)
+#define F_PCS100G_VL_INTVL    V_PCS100G_VL_INTVL(1U)
+
+#define S_SGMII_TX_LANE_CKMULT    4
+#define M_SGMII_TX_LANE_CKMULT    0x7U
+#define V_SGMII_TX_LANE_CKMULT(x) ((x) << S_SGMII_TX_LANE_CKMULT)
+#define G_SGMII_TX_LANE_CKMULT(x) (((x) >> S_SGMII_TX_LANE_CKMULT) & M_SGMII_TX_LANE_CKMULT)
+
+#define S_SGMII_TX_LANE_THRESH    0
+#define M_SGMII_TX_LANE_THRESH    0xfU
+#define V_SGMII_TX_LANE_THRESH(x) ((x) << S_SGMII_TX_LANE_THRESH)
+#define G_SGMII_TX_LANE_THRESH(x) (((x) >> S_SGMII_TX_LANE_THRESH) & M_SGMII_TX_LANE_THRESH)
+
+#define A_MAC_PORT_PCS_DEBUG1_RO 0x880
+
+#define S_PCS100G_ALIGN_LOCK    21
+#define V_PCS100G_ALIGN_LOCK(x) ((x) << S_PCS100G_ALIGN_LOCK)
+#define F_PCS100G_ALIGN_LOCK    V_PCS100G_ALIGN_LOCK(1U)
+
+#define S_PCS100G_BER_TIMER_DONE    20
+#define V_PCS100G_BER_TIMER_DONE(x) ((x) << S_PCS100G_BER_TIMER_DONE)
+#define F_PCS100G_BER_TIMER_DONE    V_PCS100G_BER_TIMER_DONE(1U)
+
+#define S_PCS100G_BLOCK_LOCK    0
+#define M_PCS100G_BLOCK_LOCK    0xfffffU
+#define V_PCS100G_BLOCK_LOCK(x) ((x) << S_PCS100G_BLOCK_LOCK)
+#define G_PCS100G_BLOCK_LOCK(x) (((x) >> S_PCS100G_BLOCK_LOCK) & M_PCS100G_BLOCK_LOCK)
+
+#define A_MAC_PORT_PERR_INT_EN_100G 0x884
+
+#define S_PERR_RX_FEC100G_DLY    29
+#define V_PERR_RX_FEC100G_DLY(x) ((x) << S_PERR_RX_FEC100G_DLY)
+#define F_PERR_RX_FEC100G_DLY    V_PERR_RX_FEC100G_DLY(1U)
+
+#define S_PERR_RX_FEC100G    28
+#define V_PERR_RX_FEC100G(x) ((x) << S_PERR_RX_FEC100G)
+#define F_PERR_RX_FEC100G    V_PERR_RX_FEC100G(1U)
+
+#define S_PERR_RX3_FEC100G_DK    27
+#define V_PERR_RX3_FEC100G_DK(x) ((x) << S_PERR_RX3_FEC100G_DK)
+#define F_PERR_RX3_FEC100G_DK    V_PERR_RX3_FEC100G_DK(1U)
+
+#define S_PERR_RX2_FEC100G_DK    26
+#define V_PERR_RX2_FEC100G_DK(x) ((x) << S_PERR_RX2_FEC100G_DK)
+#define F_PERR_RX2_FEC100G_DK    V_PERR_RX2_FEC100G_DK(1U)
+
+#define S_PERR_RX1_FEC100G_DK    25
+#define V_PERR_RX1_FEC100G_DK(x) ((x) << S_PERR_RX1_FEC100G_DK)
+#define F_PERR_RX1_FEC100G_DK    V_PERR_RX1_FEC100G_DK(1U)
+
+#define S_PERR_RX0_FEC100G_DK    24
+#define V_PERR_RX0_FEC100G_DK(x) ((x) << S_PERR_RX0_FEC100G_DK)
+#define F_PERR_RX0_FEC100G_DK    V_PERR_RX0_FEC100G_DK(1U)
+
+#define S_PERR_TX3_PCS100G    23
+#define V_PERR_TX3_PCS100G(x) ((x) << S_PERR_TX3_PCS100G)
+#define F_PERR_TX3_PCS100G    V_PERR_TX3_PCS100G(1U)
+
+#define S_PERR_TX2_PCS100G    22
+#define V_PERR_TX2_PCS100G(x) ((x) << S_PERR_TX2_PCS100G)
+#define F_PERR_TX2_PCS100G    V_PERR_TX2_PCS100G(1U)
+
+#define S_PERR_TX1_PCS100G    21
+#define V_PERR_TX1_PCS100G(x) ((x) << S_PERR_TX1_PCS100G)
+#define F_PERR_TX1_PCS100G    V_PERR_TX1_PCS100G(1U)
+
+#define S_PERR_TX0_PCS100G    20
+#define V_PERR_TX0_PCS100G(x) ((x) << S_PERR_TX0_PCS100G)
+#define F_PERR_TX0_PCS100G    V_PERR_TX0_PCS100G(1U)
+
+#define S_PERR_RX19_PCS100G    19
+#define V_PERR_RX19_PCS100G(x) ((x) << S_PERR_RX19_PCS100G)
+#define F_PERR_RX19_PCS100G    V_PERR_RX19_PCS100G(1U)
+
+#define S_PERR_RX18_PCS100G    18
+#define V_PERR_RX18_PCS100G(x) ((x) << S_PERR_RX18_PCS100G)
+#define F_PERR_RX18_PCS100G    V_PERR_RX18_PCS100G(1U)
+
+#define S_PERR_RX17_PCS100G    17
+#define V_PERR_RX17_PCS100G(x) ((x) << S_PERR_RX17_PCS100G)
+#define F_PERR_RX17_PCS100G    V_PERR_RX17_PCS100G(1U)
+
+#define S_PERR_RX16_PCS100G    16
+#define V_PERR_RX16_PCS100G(x) ((x) << S_PERR_RX16_PCS100G)
+#define F_PERR_RX16_PCS100G    V_PERR_RX16_PCS100G(1U)
+
+#define S_PERR_RX15_PCS100G    15
+#define V_PERR_RX15_PCS100G(x) ((x) << S_PERR_RX15_PCS100G)
+#define F_PERR_RX15_PCS100G    V_PERR_RX15_PCS100G(1U)
+
+#define S_PERR_RX14_PCS100G    14
+#define V_PERR_RX14_PCS100G(x) ((x) << S_PERR_RX14_PCS100G)
+#define F_PERR_RX14_PCS100G    V_PERR_RX14_PCS100G(1U)
+
+#define S_PERR_RX13_PCS100G    13
+#define V_PERR_RX13_PCS100G(x) ((x) << S_PERR_RX13_PCS100G)
+#define F_PERR_RX13_PCS100G    V_PERR_RX13_PCS100G(1U)
+
+#define S_PERR_RX12_PCS100G    12
+#define V_PERR_RX12_PCS100G(x) ((x) << S_PERR_RX12_PCS100G)
+#define F_PERR_RX12_PCS100G    V_PERR_RX12_PCS100G(1U)
+
+#define S_PERR_RX11_PCS100G    11
+#define V_PERR_RX11_PCS100G(x) ((x) << S_PERR_RX11_PCS100G)
+#define F_PERR_RX11_PCS100G    V_PERR_RX11_PCS100G(1U)
+
+#define S_PERR_RX10_PCS100G    10
+#define V_PERR_RX10_PCS100G(x) ((x) << S_PERR_RX10_PCS100G)
+#define F_PERR_RX10_PCS100G    V_PERR_RX10_PCS100G(1U)
+
+#define S_PERR_RX9_PCS100G    9
+#define V_PERR_RX9_PCS100G(x) ((x) << S_PERR_RX9_PCS100G)
+#define F_PERR_RX9_PCS100G    V_PERR_RX9_PCS100G(1U)
+
+#define S_PERR_RX8_PCS100G    8
+#define V_PERR_RX8_PCS100G(x) ((x) << S_PERR_RX8_PCS100G)
+#define F_PERR_RX8_PCS100G    V_PERR_RX8_PCS100G(1U)
+
+#define S_PERR_RX7_PCS100G    7
+#define V_PERR_RX7_PCS100G(x) ((x) << S_PERR_RX7_PCS100G)
+#define F_PERR_RX7_PCS100G    V_PERR_RX7_PCS100G(1U)
+
+#define S_PERR_RX6_PCS100G    6
+#define V_PERR_RX6_PCS100G(x) ((x) << S_PERR_RX6_PCS100G)
+#define F_PERR_RX6_PCS100G    V_PERR_RX6_PCS100G(1U)
+
+#define S_PERR_RX5_PCS100G    5
+#define V_PERR_RX5_PCS100G(x) ((x) << S_PERR_RX5_PCS100G)
+#define F_PERR_RX5_PCS100G    V_PERR_RX5_PCS100G(1U)
+
+#define S_PERR_RX4_PCS100G    4
+#define V_PERR_RX4_PCS100G(x) ((x) << S_PERR_RX4_PCS100G)
+#define F_PERR_RX4_PCS100G    V_PERR_RX4_PCS100G(1U)
+
+#define S_PERR_RX3_PCS100G    3
+#define V_PERR_RX3_PCS100G(x) ((x) << S_PERR_RX3_PCS100G)
+#define F_PERR_RX3_PCS100G    V_PERR_RX3_PCS100G(1U)
+
+#define S_PERR_RX2_PCS100G    2
+#define V_PERR_RX2_PCS100G(x) ((x) << S_PERR_RX2_PCS100G)
+#define F_PERR_RX2_PCS100G    V_PERR_RX2_PCS100G(1U)
+
+#define S_PERR_RX1_PCS100G    1
+#define V_PERR_RX1_PCS100G(x) ((x) << S_PERR_RX1_PCS100G)
+#define F_PERR_RX1_PCS100G    V_PERR_RX1_PCS100G(1U)
+
+#define S_PERR_RX0_PCS100G    0
+#define V_PERR_RX0_PCS100G(x) ((x) << S_PERR_RX0_PCS100G)
+#define F_PERR_RX0_PCS100G    V_PERR_RX0_PCS100G(1U)
+
+#define A_MAC_PORT_PERR_INT_CAUSE_100G 0x888
+#define A_MAC_PORT_PERR_ENABLE_100G 0x88c
+#define A_MAC_PORT_MAC_STAT_DEBUG 0x890
+#define A_MAC_PORT_MAC_25G_50G_AM0 0x894
+#define A_MAC_PORT_MAC_25G_50G_AM1 0x898
+#define A_MAC_PORT_MAC_25G_50G_AM2 0x89c
+#define A_MAC_PORT_MAC_25G_50G_AM3 0x8a0
+#define A_MAC_PORT_MAC_AN_STATE_STATUS 0x8a4
+#define A_MAC_PORT_EPIO_DATA0 0x8c0
+#define A_MAC_PORT_EPIO_DATA1 0x8c4
+#define A_MAC_PORT_EPIO_DATA2 0x8c8
+#define A_MAC_PORT_EPIO_DATA3 0x8cc
+#define A_MAC_PORT_EPIO_OP 0x8d0
+#define A_MAC_PORT_WOL_STATUS 0x8d4
+#define A_MAC_PORT_INT_EN 0x8d8
+
+#define S_TX_TS_AVAIL    29
+#define V_TX_TS_AVAIL(x) ((x) << S_TX_TS_AVAIL)
+#define F_TX_TS_AVAIL    V_TX_TS_AVAIL(1U)
+
+#define S_AN_PAGE_RCVD    2
+#define V_AN_PAGE_RCVD(x) ((x) << S_AN_PAGE_RCVD)
+#define F_AN_PAGE_RCVD    V_AN_PAGE_RCVD(1U)
+
+#define S_PPS    30
+#define V_PPS(x) ((x) << S_PPS)
+#define F_PPS    V_PPS(1U)
+
+#define S_SINGLE_ALARM    28
+#define V_SINGLE_ALARM(x) ((x) << S_SINGLE_ALARM)
+#define F_SINGLE_ALARM    V_SINGLE_ALARM(1U)
+
+#define S_PERIODIC_ALARM    27
+#define V_PERIODIC_ALARM(x) ((x) << S_PERIODIC_ALARM)
+#define F_PERIODIC_ALARM    V_PERIODIC_ALARM(1U)
+
+#define A_MAC_PORT_INT_CAUSE 0x8dc
+#define A_MAC_PORT_PERR_INT_EN 0x8e0
+
+#define S_PERR_PKT_RAM    24
+#define V_PERR_PKT_RAM(x) ((x) << S_PERR_PKT_RAM)
+#define F_PERR_PKT_RAM    V_PERR_PKT_RAM(1U)
+
+#define S_PERR_MASK_RAM    23
+#define V_PERR_MASK_RAM(x) ((x) << S_PERR_MASK_RAM)
+#define F_PERR_MASK_RAM    V_PERR_MASK_RAM(1U)
+
+#define S_PERR_CRC_RAM    22
+#define V_PERR_CRC_RAM(x) ((x) << S_PERR_CRC_RAM)
+#define F_PERR_CRC_RAM    V_PERR_CRC_RAM(1U)
+
+#define S_RX_DFF_SEG0    21
+#define V_RX_DFF_SEG0(x) ((x) << S_RX_DFF_SEG0)
+#define F_RX_DFF_SEG0    V_RX_DFF_SEG0(1U)
+
+#define S_RX_SFF_SEG0    20
+#define V_RX_SFF_SEG0(x) ((x) << S_RX_SFF_SEG0)
+#define F_RX_SFF_SEG0    V_RX_SFF_SEG0(1U)
+
+#define S_RX_DFF_MAC10    19
+#define V_RX_DFF_MAC10(x) ((x) << S_RX_DFF_MAC10)
+#define F_RX_DFF_MAC10    V_RX_DFF_MAC10(1U)
+
+#define S_RX_SFF_MAC10    18
+#define V_RX_SFF_MAC10(x) ((x) << S_RX_SFF_MAC10)
+#define F_RX_SFF_MAC10    V_RX_SFF_MAC10(1U)
+
+#define S_TX_DFF_SEG0    17
+#define V_TX_DFF_SEG0(x) ((x) << S_TX_DFF_SEG0)
+#define F_TX_DFF_SEG0    V_TX_DFF_SEG0(1U)
+
+#define S_TX_SFF_SEG0    16
+#define V_TX_SFF_SEG0(x) ((x) << S_TX_SFF_SEG0)
+#define F_TX_SFF_SEG0    V_TX_SFF_SEG0(1U)
+
+#define S_TX_DFF_MAC10    15
+#define V_TX_DFF_MAC10(x) ((x) << S_TX_DFF_MAC10)
+#define F_TX_DFF_MAC10    V_TX_DFF_MAC10(1U)
+
+#define S_TX_SFF_MAC10    14
+#define V_TX_SFF_MAC10(x) ((x) << S_TX_SFF_MAC10)
+#define F_TX_SFF_MAC10    V_TX_SFF_MAC10(1U)
+
+#define S_RX_STATS    13
+#define V_RX_STATS(x) ((x) << S_RX_STATS)
+#define F_RX_STATS    V_RX_STATS(1U)
+
+#define S_TX_STATS    12
+#define V_TX_STATS(x) ((x) << S_TX_STATS)
+#define F_TX_STATS    V_TX_STATS(1U)
+
+#define S_PERR3_RX_MIX    11
+#define V_PERR3_RX_MIX(x) ((x) << S_PERR3_RX_MIX)
+#define F_PERR3_RX_MIX    V_PERR3_RX_MIX(1U)
+
+#define S_PERR3_RX_SD    10
+#define V_PERR3_RX_SD(x) ((x) << S_PERR3_RX_SD)
+#define F_PERR3_RX_SD    V_PERR3_RX_SD(1U)
+
+#define S_PERR3_TX    9
+#define V_PERR3_TX(x) ((x) << S_PERR3_TX)
+#define F_PERR3_TX    V_PERR3_TX(1U)
+
+#define S_PERR2_RX_MIX    8
+#define V_PERR2_RX_MIX(x) ((x) << S_PERR2_RX_MIX)
+#define F_PERR2_RX_MIX    V_PERR2_RX_MIX(1U)
+
+#define S_PERR2_RX_SD    7
+#define V_PERR2_RX_SD(x) ((x) << S_PERR2_RX_SD)
+#define F_PERR2_RX_SD    V_PERR2_RX_SD(1U)
+
+#define S_PERR2_TX    6
+#define V_PERR2_TX(x) ((x) << S_PERR2_TX)
+#define F_PERR2_TX    V_PERR2_TX(1U)
+
+#define S_PERR1_RX_MIX    5
+#define V_PERR1_RX_MIX(x) ((x) << S_PERR1_RX_MIX)
+#define F_PERR1_RX_MIX    V_PERR1_RX_MIX(1U)
+
+#define S_PERR1_RX_SD    4
+#define V_PERR1_RX_SD(x) ((x) << S_PERR1_RX_SD)
+#define F_PERR1_RX_SD    V_PERR1_RX_SD(1U)
+
+#define S_PERR1_TX    3
+#define V_PERR1_TX(x) ((x) << S_PERR1_TX)
+#define F_PERR1_TX    V_PERR1_TX(1U)
+
+#define S_PERR0_RX_MIX    2
+#define V_PERR0_RX_MIX(x) ((x) << S_PERR0_RX_MIX)
+#define F_PERR0_RX_MIX    V_PERR0_RX_MIX(1U)
+
+#define S_PERR0_RX_SD    1
+#define V_PERR0_RX_SD(x) ((x) << S_PERR0_RX_SD)
+#define F_PERR0_RX_SD    V_PERR0_RX_SD(1U)
+
+#define S_PERR0_TX    0
+#define V_PERR0_TX(x) ((x) << S_PERR0_TX)
+#define F_PERR0_TX    V_PERR0_TX(1U)
+
+#define S_T6_PERR_PKT_RAM    31
+#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
+#define F_T6_PERR_PKT_RAM    V_T6_PERR_PKT_RAM(1U)
+
+#define S_T6_PERR_MASK_RAM    30
+#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
+#define F_T6_PERR_MASK_RAM    V_T6_PERR_MASK_RAM(1U)
+
+#define S_T6_PERR_CRC_RAM    29
+#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
+#define F_T6_PERR_CRC_RAM    V_T6_PERR_CRC_RAM(1U)
+
+#define S_RX_MAC40G    28
+#define V_RX_MAC40G(x) ((x) << S_RX_MAC40G)
+#define F_RX_MAC40G    V_RX_MAC40G(1U)
+
+#define S_TX_MAC40G    27
+#define V_TX_MAC40G(x) ((x) << S_TX_MAC40G)
+#define F_TX_MAC40G    V_TX_MAC40G(1U)
+
+#define S_RX_ST_MAC40G    26
+#define V_RX_ST_MAC40G(x) ((x) << S_RX_ST_MAC40G)
+#define F_RX_ST_MAC40G    V_RX_ST_MAC40G(1U)
+
+#define S_TX_ST_MAC40G    25
+#define V_TX_ST_MAC40G(x) ((x) << S_TX_ST_MAC40G)
+#define F_TX_ST_MAC40G    V_TX_ST_MAC40G(1U)
+
+#define S_TX_MAC1G10G    24
+#define V_TX_MAC1G10G(x) ((x) << S_TX_MAC1G10G)
+#define F_TX_MAC1G10G    V_TX_MAC1G10G(1U)
+
+#define S_RX_MAC1G10G    23
+#define V_RX_MAC1G10G(x) ((x) << S_RX_MAC1G10G)
+#define F_RX_MAC1G10G    V_RX_MAC1G10G(1U)
+
+#define S_RX_STATUS_MAC1G10G    22
+#define V_RX_STATUS_MAC1G10G(x) ((x) << S_RX_STATUS_MAC1G10G)
+#define F_RX_STATUS_MAC1G10G    V_RX_STATUS_MAC1G10G(1U)
+
+#define S_RX_ST_MAC1G10G    21
+#define V_RX_ST_MAC1G10G(x) ((x) << S_RX_ST_MAC1G10G)
+#define F_RX_ST_MAC1G10G    V_RX_ST_MAC1G10G(1U)
+
+#define S_TX_ST_MAC1G10G    20
+#define V_TX_ST_MAC1G10G(x) ((x) << S_TX_ST_MAC1G10G)
+#define F_TX_ST_MAC1G10G    V_TX_ST_MAC1G10G(1U)
+
+#define S_PERR_TX0_PCS40G    19
+#define V_PERR_TX0_PCS40G(x) ((x) << S_PERR_TX0_PCS40G)
+#define F_PERR_TX0_PCS40G    V_PERR_TX0_PCS40G(1U)
+
+#define S_PERR_TX1_PCS40G    18
+#define V_PERR_TX1_PCS40G(x) ((x) << S_PERR_TX1_PCS40G)
+#define F_PERR_TX1_PCS40G    V_PERR_TX1_PCS40G(1U)
+
+#define S_PERR_TX2_PCS40G    17
+#define V_PERR_TX2_PCS40G(x) ((x) << S_PERR_TX2_PCS40G)
+#define F_PERR_TX2_PCS40G    V_PERR_TX2_PCS40G(1U)
+
+#define S_PERR_TX3_PCS40G    16
+#define V_PERR_TX3_PCS40G(x) ((x) << S_PERR_TX3_PCS40G)
+#define F_PERR_TX3_PCS40G    V_PERR_TX3_PCS40G(1U)
+
+#define S_PERR_TX0_FEC40G    15
+#define V_PERR_TX0_FEC40G(x) ((x) << S_PERR_TX0_FEC40G)
+#define F_PERR_TX0_FEC40G    V_PERR_TX0_FEC40G(1U)
+
+#define S_PERR_TX1_FEC40G    14
+#define V_PERR_TX1_FEC40G(x) ((x) << S_PERR_TX1_FEC40G)
+#define F_PERR_TX1_FEC40G    V_PERR_TX1_FEC40G(1U)
+
+#define S_PERR_TX2_FEC40G    13
+#define V_PERR_TX2_FEC40G(x) ((x) << S_PERR_TX2_FEC40G)
+#define F_PERR_TX2_FEC40G    V_PERR_TX2_FEC40G(1U)
+
+#define S_PERR_TX3_FEC40G    12
+#define V_PERR_TX3_FEC40G(x) ((x) << S_PERR_TX3_FEC40G)
+#define F_PERR_TX3_FEC40G    V_PERR_TX3_FEC40G(1U)
+
+#define S_PERR_RX0_PCS40G    11
+#define V_PERR_RX0_PCS40G(x) ((x) << S_PERR_RX0_PCS40G)
+#define F_PERR_RX0_PCS40G    V_PERR_RX0_PCS40G(1U)
+
+#define S_PERR_RX1_PCS40G    10
+#define V_PERR_RX1_PCS40G(x) ((x) << S_PERR_RX1_PCS40G)
+#define F_PERR_RX1_PCS40G    V_PERR_RX1_PCS40G(1U)
+
+#define S_PERR_RX2_PCS40G    9
+#define V_PERR_RX2_PCS40G(x) ((x) << S_PERR_RX2_PCS40G)
+#define F_PERR_RX2_PCS40G    V_PERR_RX2_PCS40G(1U)
+
+#define S_PERR_RX3_PCS40G    8
+#define V_PERR_RX3_PCS40G(x) ((x) << S_PERR_RX3_PCS40G)
+#define F_PERR_RX3_PCS40G    V_PERR_RX3_PCS40G(1U)
+
+#define S_PERR_RX0_FEC40G    7
+#define V_PERR_RX0_FEC40G(x) ((x) << S_PERR_RX0_FEC40G)
+#define F_PERR_RX0_FEC40G    V_PERR_RX0_FEC40G(1U)
+
+#define S_PERR_RX1_FEC40G    6
+#define V_PERR_RX1_FEC40G(x) ((x) << S_PERR_RX1_FEC40G)
+#define F_PERR_RX1_FEC40G    V_PERR_RX1_FEC40G(1U)
+
+#define S_PERR_RX2_FEC40G    5
+#define V_PERR_RX2_FEC40G(x) ((x) << S_PERR_RX2_FEC40G)
+#define F_PERR_RX2_FEC40G    V_PERR_RX2_FEC40G(1U)
+
+#define S_PERR_RX3_FEC40G    4
+#define V_PERR_RX3_FEC40G(x) ((x) << S_PERR_RX3_FEC40G)
+#define F_PERR_RX3_FEC40G    V_PERR_RX3_FEC40G(1U)
+
+#define S_PERR_RX_PCS10G_LPBK    3
+#define V_PERR_RX_PCS10G_LPBK(x) ((x) << S_PERR_RX_PCS10G_LPBK)
+#define F_PERR_RX_PCS10G_LPBK    V_PERR_RX_PCS10G_LPBK(1U)
+
+#define S_PERR_RX_PCS10G    2
+#define V_PERR_RX_PCS10G(x) ((x) << S_PERR_RX_PCS10G)
+#define F_PERR_RX_PCS10G    V_PERR_RX_PCS10G(1U)
+
+#define S_PERR_RX_PCS1G    1
+#define V_PERR_RX_PCS1G(x) ((x) << S_PERR_RX_PCS1G)
+#define F_PERR_RX_PCS1G    V_PERR_RX_PCS1G(1U)
+
+#define S_PERR_TX_PCS1G    0
+#define V_PERR_TX_PCS1G(x) ((x) << S_PERR_TX_PCS1G)
+#define F_PERR_TX_PCS1G    V_PERR_TX_PCS1G(1U)
+
+#define A_MAC_PORT_PERR_INT_CAUSE 0x8e4
+
+#define S_T6_PERR_PKT_RAM    31
+#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
+#define F_T6_PERR_PKT_RAM    V_T6_PERR_PKT_RAM(1U)
+
+#define S_T6_PERR_MASK_RAM    30
+#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
+#define F_T6_PERR_MASK_RAM    V_T6_PERR_MASK_RAM(1U)
+
+#define S_T6_PERR_CRC_RAM    29
+#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
+#define F_T6_PERR_CRC_RAM    V_T6_PERR_CRC_RAM(1U)
+
+#define A_MAC_PORT_PERR_ENABLE 0x8e8
+
+#define S_T6_PERR_PKT_RAM    31
+#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
+#define F_T6_PERR_PKT_RAM    V_T6_PERR_PKT_RAM(1U)
+
+#define S_T6_PERR_MASK_RAM    30
+#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
+#define F_T6_PERR_MASK_RAM    V_T6_PERR_MASK_RAM(1U)
+
+#define S_T6_PERR_CRC_RAM    29
+#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
+#define F_T6_PERR_CRC_RAM    V_T6_PERR_CRC_RAM(1U)
+
+#define A_MAC_PORT_PERR_INJECT 0x8ec
+
+#define S_MEMSEL_PERR    1
+#define M_MEMSEL_PERR    0x3fU
+#define V_MEMSEL_PERR(x) ((x) << S_MEMSEL_PERR)
+#define G_MEMSEL_PERR(x) (((x) >> S_MEMSEL_PERR) & M_MEMSEL_PERR)
+
+#define A_MAC_PORT_HSS_CFG0 0x8f0
+
+#define S_HSSREFCLKVALIDA    20
+#define V_HSSREFCLKVALIDA(x) ((x) << S_HSSREFCLKVALIDA)
+#define F_HSSREFCLKVALIDA    V_HSSREFCLKVALIDA(1U)
+
+#define S_HSSREFCLKVALIDB    19
+#define V_HSSREFCLKVALIDB(x) ((x) << S_HSSREFCLKVALIDB)
+#define F_HSSREFCLKVALIDB    V_HSSREFCLKVALIDB(1U)
+
+#define S_HSSRESYNCA    18
+#define V_HSSRESYNCA(x) ((x) << S_HSSRESYNCA)
+#define F_HSSRESYNCA    V_HSSRESYNCA(1U)
+
+#define S_HSSRESYNCB    16
+#define V_HSSRESYNCB(x) ((x) << S_HSSRESYNCB)
+#define F_HSSRESYNCB    V_HSSRESYNCB(1U)
+
+#define S_HSSRECCALA    15
+#define V_HSSRECCALA(x) ((x) << S_HSSRECCALA)
+#define F_HSSRECCALA    V_HSSRECCALA(1U)
+
+#define S_HSSRECCALB    13
+#define V_HSSRECCALB(x) ((x) << S_HSSRECCALB)
+#define F_HSSRECCALB    V_HSSRECCALB(1U)
+
+#define S_HSSPLLBYPA    12
+#define V_HSSPLLBYPA(x) ((x) << S_HSSPLLBYPA)
+#define F_HSSPLLBYPA    V_HSSPLLBYPA(1U)
+
+#define S_HSSPLLBYPB    11
+#define V_HSSPLLBYPB(x) ((x) << S_HSSPLLBYPB)
+#define F_HSSPLLBYPB    V_HSSPLLBYPB(1U)
+
+#define S_HSSPDWNPLLA    10
+#define V_HSSPDWNPLLA(x) ((x) << S_HSSPDWNPLLA)
+#define F_HSSPDWNPLLA    V_HSSPDWNPLLA(1U)
+
+#define S_HSSPDWNPLLB    9
+#define V_HSSPDWNPLLB(x) ((x) << S_HSSPDWNPLLB)
+#define F_HSSPDWNPLLB    V_HSSPDWNPLLB(1U)
+
+#define S_HSSVCOSELA    8
+#define V_HSSVCOSELA(x) ((x) << S_HSSVCOSELA)
+#define F_HSSVCOSELA    V_HSSVCOSELA(1U)
+
+#define S_HSSVCOSELB    7
+#define V_HSSVCOSELB(x) ((x) << S_HSSVCOSELB)
+#define F_HSSVCOSELB    V_HSSVCOSELB(1U)
+
+#define S_HSSCALCOMP    6
+#define V_HSSCALCOMP(x) ((x) << S_HSSCALCOMP)
+#define F_HSSCALCOMP    V_HSSCALCOMP(1U)
+
+#define S_HSSCALENAB    5
+#define V_HSSCALENAB(x) ((x) << S_HSSCALENAB)
+#define F_HSSCALENAB    V_HSSCALENAB(1U)
+
+#define A_MAC_PORT_HSS_CFG1 0x8f4
+
+#define S_RXACONFIGSEL    30
+#define M_RXACONFIGSEL    0x3U
+#define V_RXACONFIGSEL(x) ((x) << S_RXACONFIGSEL)
+#define G_RXACONFIGSEL(x) (((x) >> S_RXACONFIGSEL) & M_RXACONFIGSEL)
+
+#define S_RXAQUIET    29
+#define V_RXAQUIET(x) ((x) << S_RXAQUIET)
+#define F_RXAQUIET    V_RXAQUIET(1U)
+
+#define S_RXAREFRESH    28
+#define V_RXAREFRESH(x) ((x) << S_RXAREFRESH)
+#define F_RXAREFRESH    V_RXAREFRESH(1U)
+
+#define S_RXBCONFIGSEL    26
+#define M_RXBCONFIGSEL    0x3U
+#define V_RXBCONFIGSEL(x) ((x) << S_RXBCONFIGSEL)
+#define G_RXBCONFIGSEL(x) (((x) >> S_RXBCONFIGSEL) & M_RXBCONFIGSEL)
+
+#define S_RXBQUIET    25
+#define V_RXBQUIET(x) ((x) << S_RXBQUIET)
+#define F_RXBQUIET    V_RXBQUIET(1U)
+
+#define S_RXBREFRESH    24
+#define V_RXBREFRESH(x) ((x) << S_RXBREFRESH)
+#define F_RXBREFRESH    V_RXBREFRESH(1U)
+
+#define S_RXCCONFIGSEL    22
+#define M_RXCCONFIGSEL    0x3U
+#define V_RXCCONFIGSEL(x) ((x) << S_RXCCONFIGSEL)
+#define G_RXCCONFIGSEL(x) (((x) >> S_RXCCONFIGSEL) & M_RXCCONFIGSEL)
+
+#define S_RXCQUIET    21
+#define V_RXCQUIET(x) ((x) << S_RXCQUIET)
+#define F_RXCQUIET    V_RXCQUIET(1U)
+
+#define S_RXCREFRESH    20
+#define V_RXCREFRESH(x) ((x) << S_RXCREFRESH)
+#define F_RXCREFRESH    V_RXCREFRESH(1U)
+
+#define S_RXDCONFIGSEL    18
+#define M_RXDCONFIGSEL    0x3U
+#define V_RXDCONFIGSEL(x) ((x) << S_RXDCONFIGSEL)
+#define G_RXDCONFIGSEL(x) (((x) >> S_RXDCONFIGSEL) & M_RXDCONFIGSEL)
+
+#define S_RXDQUIET    17
+#define V_RXDQUIET(x) ((x) << S_RXDQUIET)
+#define F_RXDQUIET    V_RXDQUIET(1U)
+
+#define S_RXDREFRESH    16
+#define V_RXDREFRESH(x) ((x) << S_RXDREFRESH)
+#define F_RXDREFRESH    V_RXDREFRESH(1U)
+
+#define S_TXACONFIGSEL    14
+#define M_TXACONFIGSEL    0x3U
+#define V_TXACONFIGSEL(x) ((x) << S_TXACONFIGSEL)
+#define G_TXACONFIGSEL(x) (((x) >> S_TXACONFIGSEL) & M_TXACONFIGSEL)
+
+#define S_TXAQUIET    13
+#define V_TXAQUIET(x) ((x) << S_TXAQUIET)
+#define F_TXAQUIET    V_TXAQUIET(1U)
+
+#define S_TXAREFRESH    12
+#define V_TXAREFRESH(x) ((x) << S_TXAREFRESH)
+#define F_TXAREFRESH    V_TXAREFRESH(1U)
+
+#define S_TXBCONFIGSEL    10
+#define M_TXBCONFIGSEL    0x3U
+#define V_TXBCONFIGSEL(x) ((x) << S_TXBCONFIGSEL)
+#define G_TXBCONFIGSEL(x) (((x) >> S_TXBCONFIGSEL) & M_TXBCONFIGSEL)
+
+#define S_TXBQUIET    9
+#define V_TXBQUIET(x) ((x) << S_TXBQUIET)
+#define F_TXBQUIET    V_TXBQUIET(1U)
+
+#define S_TXBREFRESH    8
+#define V_TXBREFRESH(x) ((x) << S_TXBREFRESH)
+#define F_TXBREFRESH    V_TXBREFRESH(1U)
+
+#define S_TXCCONFIGSEL    6
+#define M_TXCCONFIGSEL    0x3U
+#define V_TXCCONFIGSEL(x) ((x) << S_TXCCONFIGSEL)
+#define G_TXCCONFIGSEL(x) (((x) >> S_TXCCONFIGSEL) & M_TXCCONFIGSEL)
+
+#define S_TXCQUIET    5
+#define V_TXCQUIET(x) ((x) << S_TXCQUIET)
+#define F_TXCQUIET    V_TXCQUIET(1U)
+
+#define S_TXCREFRESH    4
+#define V_TXCREFRESH(x) ((x) << S_TXCREFRESH)
+#define F_TXCREFRESH    V_TXCREFRESH(1U)
+
+#define S_TXDCONFIGSEL    2
+#define M_TXDCONFIGSEL    0x3U
+#define V_TXDCONFIGSEL(x) ((x) << S_TXDCONFIGSEL)
+#define G_TXDCONFIGSEL(x) (((x) >> S_TXDCONFIGSEL) & M_TXDCONFIGSEL)
+
+#define S_TXDQUIET    1
+#define V_TXDQUIET(x) ((x) << S_TXDQUIET)
+#define F_TXDQUIET    V_TXDQUIET(1U)
+
+#define S_TXDREFRESH    0
+#define V_TXDREFRESH(x) ((x) << S_TXDREFRESH)
+#define F_TXDREFRESH    V_TXDREFRESH(1U)
+
+#define A_MAC_PORT_HSS_CFG2 0x8f8
+
+#define S_RXAASSTCLK    31
+#define V_RXAASSTCLK(x) ((x) << S_RXAASSTCLK)
+#define F_RXAASSTCLK    V_RXAASSTCLK(1U)
+
+#define S_T5RXAPRBSRST    30
+#define V_T5RXAPRBSRST(x) ((x) << S_T5RXAPRBSRST)
+#define F_T5RXAPRBSRST    V_T5RXAPRBSRST(1U)
+
+#define S_RXBASSTCLK    29
+#define V_RXBASSTCLK(x) ((x) << S_RXBASSTCLK)
+#define F_RXBASSTCLK    V_RXBASSTCLK(1U)
+
+#define S_T5RXBPRBSRST    28
+#define V_T5RXBPRBSRST(x) ((x) << S_T5RXBPRBSRST)
+#define F_T5RXBPRBSRST    V_T5RXBPRBSRST(1U)
+
+#define S_RXCASSTCLK    27
+#define V_RXCASSTCLK(x) ((x) << S_RXCASSTCLK)
+#define F_RXCASSTCLK    V_RXCASSTCLK(1U)
+
+#define S_T5RXCPRBSRST    26
+#define V_T5RXCPRBSRST(x) ((x) << S_T5RXCPRBSRST)
+#define F_T5RXCPRBSRST    V_T5RXCPRBSRST(1U)
+
+#define S_RXDASSTCLK    25
+#define V_RXDASSTCLK(x) ((x) << S_RXDASSTCLK)
+#define F_RXDASSTCLK    V_RXDASSTCLK(1U)
+
+#define S_T5RXDPRBSRST    24
+#define V_T5RXDPRBSRST(x) ((x) << S_T5RXDPRBSRST)
+#define F_T5RXDPRBSRST    V_T5RXDPRBSRST(1U)
+
+#define A_MAC_PORT_HSS_CFG3 0x8fc
+
+#define S_HSSCALSSTN    25
+#define M_HSSCALSSTN    0x7U
+#define V_HSSCALSSTN(x) ((x) << S_HSSCALSSTN)
+#define G_HSSCALSSTN(x) (((x) >> S_HSSCALSSTN) & M_HSSCALSSTN)
+
+#define S_HSSCALSSTP    22
+#define M_HSSCALSSTP    0x7U
+#define V_HSSCALSSTP(x) ((x) << S_HSSCALSSTP)
+#define G_HSSCALSSTP(x) (((x) >> S_HSSCALSSTP) & M_HSSCALSSTP)
+
+#define S_HSSVBOOSTDIVB    19
+#define M_HSSVBOOSTDIVB    0x7U
+#define V_HSSVBOOSTDIVB(x) ((x) << S_HSSVBOOSTDIVB)
+#define G_HSSVBOOSTDIVB(x) (((x) >> S_HSSVBOOSTDIVB) & M_HSSVBOOSTDIVB)
+
+#define S_HSSVBOOSTDIVA    16
+#define M_HSSVBOOSTDIVA    0x7U
+#define V_HSSVBOOSTDIVA(x) ((x) << S_HSSVBOOSTDIVA)
+#define G_HSSVBOOSTDIVA(x) (((x) >> S_HSSVBOOSTDIVA) & M_HSSVBOOSTDIVA)
+
+#define S_HSSPLLCONFIGB    8
+#define M_HSSPLLCONFIGB    0xffU
+#define V_HSSPLLCONFIGB(x) ((x) << S_HSSPLLCONFIGB)
+#define G_HSSPLLCONFIGB(x) (((x) >> S_HSSPLLCONFIGB) & M_HSSPLLCONFIGB)
+
+#define S_HSSPLLCONFIGA    0
+#define M_HSSPLLCONFIGA    0xffU
+#define V_HSSPLLCONFIGA(x) ((x) << S_HSSPLLCONFIGA)
+#define G_HSSPLLCONFIGA(x) (((x) >> S_HSSPLLCONFIGA) & M_HSSPLLCONFIGA)
+
+#define S_T6_HSSCALSSTN    22
+#define M_T6_HSSCALSSTN    0x3fU
+#define V_T6_HSSCALSSTN(x) ((x) << S_T6_HSSCALSSTN)
+#define G_T6_HSSCALSSTN(x) (((x) >> S_T6_HSSCALSSTN) & M_T6_HSSCALSSTN)
+
+#define S_T6_HSSCALSSTP    16
+#define M_T6_HSSCALSSTP    0x3fU
+#define V_T6_HSSCALSSTP(x) ((x) << S_T6_HSSCALSSTP)
+#define G_T6_HSSCALSSTP(x) (((x) >> S_T6_HSSCALSSTP) & M_T6_HSSCALSSTP)
+
+#define A_MAC_PORT_HSS_CFG4 0x900
+
+#define S_HSSDIVSELA    9
+#define M_HSSDIVSELA    0x1ffU
+#define V_HSSDIVSELA(x) ((x) << S_HSSDIVSELA)
+#define G_HSSDIVSELA(x) (((x) >> S_HSSDIVSELA) & M_HSSDIVSELA)
+
+#define S_HSSDIVSELB    0
+#define M_HSSDIVSELB    0x1ffU
+#define V_HSSDIVSELB(x) ((x) << S_HSSDIVSELB)
+#define G_HSSDIVSELB(x) (((x) >> S_HSSDIVSELB) & M_HSSDIVSELB)
+
+#define S_HSSREFDIVA    24
+#define M_HSSREFDIVA    0xfU
+#define V_HSSREFDIVA(x) ((x) << S_HSSREFDIVA)
+#define G_HSSREFDIVA(x) (((x) >> S_HSSREFDIVA) & M_HSSREFDIVA)
+
+#define S_HSSREFDIVB    20
+#define M_HSSREFDIVB    0xfU
+#define V_HSSREFDIVB(x) ((x) << S_HSSREFDIVB)
+#define G_HSSREFDIVB(x) (((x) >> S_HSSREFDIVB) & M_HSSREFDIVB)
+
+#define S_HSSPLLDIV2B    19
+#define V_HSSPLLDIV2B(x) ((x) << S_HSSPLLDIV2B)
+#define F_HSSPLLDIV2B    V_HSSPLLDIV2B(1U)
+
+#define S_HSSPLLDIV2A    18
+#define V_HSSPLLDIV2A(x) ((x) << S_HSSPLLDIV2A)
+#define F_HSSPLLDIV2A    V_HSSPLLDIV2A(1U)
+
+#define A_MAC_PORT_HSS_STATUS 0x904
+
+#define S_HSSPLLLOCKB    3
+#define V_HSSPLLLOCKB(x) ((x) << S_HSSPLLLOCKB)
+#define F_HSSPLLLOCKB    V_HSSPLLLOCKB(1U)
+
+#define S_HSSPLLLOCKA    2
+#define V_HSSPLLLOCKA(x) ((x) << S_HSSPLLLOCKA)
+#define F_HSSPLLLOCKA    V_HSSPLLLOCKA(1U)
+
+#define S_HSSPRTREADYB    1
+#define V_HSSPRTREADYB(x) ((x) << S_HSSPRTREADYB)
+#define F_HSSPRTREADYB    V_HSSPRTREADYB(1U)
+
+#define S_HSSPRTREADYA    0
+#define V_HSSPRTREADYA(x) ((x) << S_HSSPRTREADYA)
+#define F_HSSPRTREADYA    V_HSSPRTREADYA(1U)
+
+#define S_RXDERROFLOW    19
+#define V_RXDERROFLOW(x) ((x) << S_RXDERROFLOW)
+#define F_RXDERROFLOW    V_RXDERROFLOW(1U)
+
+#define S_RXCERROFLOW    18
+#define V_RXCERROFLOW(x) ((x) << S_RXCERROFLOW)
+#define F_RXCERROFLOW    V_RXCERROFLOW(1U)
+
+#define S_RXBERROFLOW    17
+#define V_RXBERROFLOW(x) ((x) << S_RXBERROFLOW)
+#define F_RXBERROFLOW    V_RXBERROFLOW(1U)
+
+#define S_RXAERROFLOW    16
+#define V_RXAERROFLOW(x) ((x) << S_RXAERROFLOW)
+#define F_RXAERROFLOW    V_RXAERROFLOW(1U)
+
+#define A_MAC_PORT_HSS_EEE_STATUS 0x908
+
+#define S_RXAQUIET_STATUS    15
+#define V_RXAQUIET_STATUS(x) ((x) << S_RXAQUIET_STATUS)
+#define F_RXAQUIET_STATUS    V_RXAQUIET_STATUS(1U)
+
+#define S_RXAREFRESH_STATUS    14
+#define V_RXAREFRESH_STATUS(x) ((x) << S_RXAREFRESH_STATUS)
+#define F_RXAREFRESH_STATUS    V_RXAREFRESH_STATUS(1U)
+
+#define S_RXBQUIET_STATUS    13
+#define V_RXBQUIET_STATUS(x) ((x) << S_RXBQUIET_STATUS)
+#define F_RXBQUIET_STATUS    V_RXBQUIET_STATUS(1U)
+
+#define S_RXBREFRESH_STATUS    12
+#define V_RXBREFRESH_STATUS(x) ((x) << S_RXBREFRESH_STATUS)
+#define F_RXBREFRESH_STATUS    V_RXBREFRESH_STATUS(1U)
+
+#define S_RXCQUIET_STATUS    11
+#define V_RXCQUIET_STATUS(x) ((x) << S_RXCQUIET_STATUS)
+#define F_RXCQUIET_STATUS    V_RXCQUIET_STATUS(1U)
+
+#define S_RXCREFRESH_STATUS    10
+#define V_RXCREFRESH_STATUS(x) ((x) << S_RXCREFRESH_STATUS)
+#define F_RXCREFRESH_STATUS    V_RXCREFRESH_STATUS(1U)
+
+#define S_RXDQUIET_STATUS    9
+#define V_RXDQUIET_STATUS(x) ((x) << S_RXDQUIET_STATUS)
+#define F_RXDQUIET_STATUS    V_RXDQUIET_STATUS(1U)
+
+#define S_RXDREFRESH_STATUS    8
+#define V_RXDREFRESH_STATUS(x) ((x) << S_RXDREFRESH_STATUS)
+#define F_RXDREFRESH_STATUS    V_RXDREFRESH_STATUS(1U)
+
+#define S_TXAQUIET_STATUS    7
+#define V_TXAQUIET_STATUS(x) ((x) << S_TXAQUIET_STATUS)
+#define F_TXAQUIET_STATUS    V_TXAQUIET_STATUS(1U)
+
+#define S_TXAREFRESH_STATUS    6
+#define V_TXAREFRESH_STATUS(x) ((x) << S_TXAREFRESH_STATUS)
+#define F_TXAREFRESH_STATUS    V_TXAREFRESH_STATUS(1U)
+
+#define S_TXBQUIET_STATUS    5
+#define V_TXBQUIET_STATUS(x) ((x) << S_TXBQUIET_STATUS)
+#define F_TXBQUIET_STATUS    V_TXBQUIET_STATUS(1U)
+
+#define S_TXBREFRESH_STATUS    4
+#define V_TXBREFRESH_STATUS(x) ((x) << S_TXBREFRESH_STATUS)
+#define F_TXBREFRESH_STATUS    V_TXBREFRESH_STATUS(1U)
+
+#define S_TXCQUIET_STATUS    3
+#define V_TXCQUIET_STATUS(x) ((x) << S_TXCQUIET_STATUS)
+#define F_TXCQUIET_STATUS    V_TXCQUIET_STATUS(1U)
+
+#define S_TXCREFRESH_STATUS    2
+#define V_TXCREFRESH_STATUS(x) ((x) << S_TXCREFRESH_STATUS)
+#define F_TXCREFRESH_STATUS    V_TXCREFRESH_STATUS(1U)
+
+#define S_TXDQUIET_STATUS    1
+#define V_TXDQUIET_STATUS(x) ((x) << S_TXDQUIET_STATUS)
+#define F_TXDQUIET_STATUS    V_TXDQUIET_STATUS(1U)
+
+#define S_TXDREFRESH_STATUS    0
+#define V_TXDREFRESH_STATUS(x) ((x) << S_TXDREFRESH_STATUS)
+#define F_TXDREFRESH_STATUS    V_TXDREFRESH_STATUS(1U)
+
+#define A_MAC_PORT_HSS_SIGDET_STATUS 0x90c
+#define A_MAC_PORT_HSS_PL_CTL 0x910
+
+#define S_TOV    16
+#define M_TOV    0xffU
+#define V_TOV(x) ((x) << S_TOV)
+#define G_TOV(x) (((x) >> S_TOV) & M_TOV)
+
+#define S_TSU    8
+#define M_TSU    0xffU
+#define V_TSU(x) ((x) << S_TSU)
+#define G_TSU(x) (((x) >> S_TSU) & M_TSU)
+
+#define S_IPW    0
+#define M_IPW    0xffU
+#define V_IPW(x) ((x) << S_IPW)
+#define G_IPW(x) (((x) >> S_IPW) & M_IPW)
+
+#define A_MAC_PORT_RUNT_FRAME 0x914
+
+#define S_RUNTCLEAR    16
+#define V_RUNTCLEAR(x) ((x) << S_RUNTCLEAR)
+#define F_RUNTCLEAR    V_RUNTCLEAR(1U)
+
+#define S_RUNT    0
+#define M_RUNT    0xffffU
+#define V_RUNT(x) ((x) << S_RUNT)
+#define G_RUNT(x) (((x) >> S_RUNT) & M_RUNT)
+
+#define A_MAC_PORT_EEE_STATUS 0x918
+
+#define S_EEE_TX_10G_STATE    10
+#define M_EEE_TX_10G_STATE    0x3U
+#define V_EEE_TX_10G_STATE(x) ((x) << S_EEE_TX_10G_STATE)
+#define G_EEE_TX_10G_STATE(x) (((x) >> S_EEE_TX_10G_STATE) & M_EEE_TX_10G_STATE)
+
+#define S_EEE_RX_10G_STATE    8
+#define M_EEE_RX_10G_STATE    0x3U
+#define V_EEE_RX_10G_STATE(x) ((x) << S_EEE_RX_10G_STATE)
+#define G_EEE_RX_10G_STATE(x) (((x) >> S_EEE_RX_10G_STATE) & M_EEE_RX_10G_STATE)
+
+#define S_EEE_TX_1G_STATE    6
+#define M_EEE_TX_1G_STATE    0x3U
+#define V_EEE_TX_1G_STATE(x) ((x) << S_EEE_TX_1G_STATE)
+#define G_EEE_TX_1G_STATE(x) (((x) >> S_EEE_TX_1G_STATE) & M_EEE_TX_1G_STATE)
+
+#define S_EEE_RX_1G_STATE    4
+#define M_EEE_RX_1G_STATE    0x3U
+#define V_EEE_RX_1G_STATE(x) ((x) << S_EEE_RX_1G_STATE)
+#define G_EEE_RX_1G_STATE(x) (((x) >> S_EEE_RX_1G_STATE) & M_EEE_RX_1G_STATE)
+
+#define S_PMA_RX_REFRESH    3
+#define V_PMA_RX_REFRESH(x) ((x) << S_PMA_RX_REFRESH)
+#define F_PMA_RX_REFRESH    V_PMA_RX_REFRESH(1U)
+
+#define S_PMA_RX_QUIET    2
+#define V_PMA_RX_QUIET(x) ((x) << S_PMA_RX_QUIET)
+#define F_PMA_RX_QUIET    V_PMA_RX_QUIET(1U)
+
+#define S_PMA_TX_REFRESH    1
+#define V_PMA_TX_REFRESH(x) ((x) << S_PMA_TX_REFRESH)
+#define F_PMA_TX_REFRESH    V_PMA_TX_REFRESH(1U)
+
+#define S_PMA_TX_QUIET    0
+#define V_PMA_TX_QUIET(x) ((x) << S_PMA_TX_QUIET)
+#define F_PMA_TX_QUIET    V_PMA_TX_QUIET(1U)
+
+#define A_MAC_PORT_CGEN 0x91c
+
+#define S_CGEN    8
+#define V_CGEN(x) ((x) << S_CGEN)
+#define F_CGEN    V_CGEN(1U)
+
+#define S_SD7_CGEN    7
+#define V_SD7_CGEN(x) ((x) << S_SD7_CGEN)
+#define F_SD7_CGEN    V_SD7_CGEN(1U)
+
+#define S_SD6_CGEN    6
+#define V_SD6_CGEN(x) ((x) << S_SD6_CGEN)
+#define F_SD6_CGEN    V_SD6_CGEN(1U)
+
+#define S_SD5_CGEN    5
+#define V_SD5_CGEN(x) ((x) << S_SD5_CGEN)
+#define F_SD5_CGEN    V_SD5_CGEN(1U)
+
+#define S_SD4_CGEN    4
+#define V_SD4_CGEN(x) ((x) << S_SD4_CGEN)
+#define F_SD4_CGEN    V_SD4_CGEN(1U)
+
+#define S_SD3_CGEN    3
+#define V_SD3_CGEN(x) ((x) << S_SD3_CGEN)
+#define F_SD3_CGEN    V_SD3_CGEN(1U)
+
+#define S_SD2_CGEN    2
+#define V_SD2_CGEN(x) ((x) << S_SD2_CGEN)
+#define F_SD2_CGEN    V_SD2_CGEN(1U)
+
+#define S_SD1_CGEN    1
+#define V_SD1_CGEN(x) ((x) << S_SD1_CGEN)
+#define F_SD1_CGEN    V_SD1_CGEN(1U)
+
+#define S_SD0_CGEN    0
+#define V_SD0_CGEN(x) ((x) << S_SD0_CGEN)
+#define F_SD0_CGEN    V_SD0_CGEN(1U)
+
+#define A_MAC_PORT_CGEN_MTIP 0x920
+
+#define S_MACSEG5_CGEN    11
+#define V_MACSEG5_CGEN(x) ((x) << S_MACSEG5_CGEN)
+#define F_MACSEG5_CGEN    V_MACSEG5_CGEN(1U)
+
+#define S_PCSSEG5_CGEN    10
+#define V_PCSSEG5_CGEN(x) ((x) << S_PCSSEG5_CGEN)
+#define F_PCSSEG5_CGEN    V_PCSSEG5_CGEN(1U)
+
+#define S_MACSEG4_CGEN    9
+#define V_MACSEG4_CGEN(x) ((x) << S_MACSEG4_CGEN)
+#define F_MACSEG4_CGEN    V_MACSEG4_CGEN(1U)
+
+#define S_PCSSEG4_CGEN    8
+#define V_PCSSEG4_CGEN(x) ((x) << S_PCSSEG4_CGEN)
+#define F_PCSSEG4_CGEN    V_PCSSEG4_CGEN(1U)
+
+#define S_MACSEG3_CGEN    7
+#define V_MACSEG3_CGEN(x) ((x) << S_MACSEG3_CGEN)
+#define F_MACSEG3_CGEN    V_MACSEG3_CGEN(1U)
+
+#define S_PCSSEG3_CGEN    6
+#define V_PCSSEG3_CGEN(x) ((x) << S_PCSSEG3_CGEN)
+#define F_PCSSEG3_CGEN    V_PCSSEG3_CGEN(1U)
+
+#define S_MACSEG2_CGEN    5
+#define V_MACSEG2_CGEN(x) ((x) << S_MACSEG2_CGEN)
+#define F_MACSEG2_CGEN    V_MACSEG2_CGEN(1U)
+
+#define S_PCSSEG2_CGEN    4
+#define V_PCSSEG2_CGEN(x) ((x) << S_PCSSEG2_CGEN)
+#define F_PCSSEG2_CGEN    V_PCSSEG2_CGEN(1U)
+
+#define S_MACSEG1_CGEN    3
+#define V_MACSEG1_CGEN(x) ((x) << S_MACSEG1_CGEN)
+#define F_MACSEG1_CGEN    V_MACSEG1_CGEN(1U)
+
+#define S_PCSSEG1_CGEN    2
+#define V_PCSSEG1_CGEN(x) ((x) << S_PCSSEG1_CGEN)
+#define F_PCSSEG1_CGEN    V_PCSSEG1_CGEN(1U)
+
+#define S_MACSEG0_CGEN    1
+#define V_MACSEG0_CGEN(x) ((x) << S_MACSEG0_CGEN)
+#define F_MACSEG0_CGEN    V_MACSEG0_CGEN(1U)
+
+#define S_PCSSEG0_CGEN    0
+#define V_PCSSEG0_CGEN(x) ((x) << S_PCSSEG0_CGEN)
+#define F_PCSSEG0_CGEN    V_PCSSEG0_CGEN(1U)
+
+#define A_MAC_PORT_TX_TS_ID 0x924
+
+#define S_TS_ID    0
+#define M_TS_ID    0x7U
+#define V_TS_ID(x) ((x) << S_TS_ID)
+#define G_TS_ID(x) (((x) >> S_TS_ID) & M_TS_ID)
+
+#define A_MAC_PORT_TX_TS_VAL_LO 0x928
+#define A_MAC_PORT_TX_TS_VAL_HI 0x92c
+#define A_MAC_PORT_EEE_CTL 0x930
+
+#define S_EEE_CTRL    2
+#define M_EEE_CTRL    0x3fffffffU
+#define V_EEE_CTRL(x) ((x) << S_EEE_CTRL)
+#define G_EEE_CTRL(x) (((x) >> S_EEE_CTRL) & M_EEE_CTRL)
+
+#define S_TICK_START    1
+#define V_TICK_START(x) ((x) << S_TICK_START)
+#define F_TICK_START    V_TICK_START(1U)
+
+#define S_EEE_ENABLE    0
+#define V_EEE_ENABLE(x) ((x) << S_EEE_ENABLE)
+#define F_EEE_ENABLE    V_EEE_ENABLE(1U)
+
+#define A_MAC_PORT_EEE_TX_CTL 0x934
+
+#define S_WAKE_TIMER    16
+#define M_WAKE_TIMER    0xffffU
+#define V_WAKE_TIMER(x) ((x) << S_WAKE_TIMER)
+#define G_WAKE_TIMER(x) (((x) >> S_WAKE_TIMER) & M_WAKE_TIMER)
+
+#define S_HSS_TIMER    5
+#define M_HSS_TIMER    0xfU
+#define V_HSS_TIMER(x) ((x) << S_HSS_TIMER)
+#define G_HSS_TIMER(x) (((x) >> S_HSS_TIMER) & M_HSS_TIMER)
+
+#define S_HSS_CTL    4
+#define V_HSS_CTL(x) ((x) << S_HSS_CTL)
+#define F_HSS_CTL    V_HSS_CTL(1U)
+
+#define S_LPI_ACTIVE    3
+#define V_LPI_ACTIVE(x) ((x) << S_LPI_ACTIVE)
+#define F_LPI_ACTIVE    V_LPI_ACTIVE(1U)
+
+#define S_LPI_TXHOLD    2
+#define V_LPI_TXHOLD(x) ((x) << S_LPI_TXHOLD)
+#define F_LPI_TXHOLD    V_LPI_TXHOLD(1U)
+
+#define S_LPI_REQ    1
+#define V_LPI_REQ(x) ((x) << S_LPI_REQ)
+#define F_LPI_REQ    V_LPI_REQ(1U)
+
+#define S_EEE_TX_RESET    0
+#define V_EEE_TX_RESET(x) ((x) << S_EEE_TX_RESET)
+#define F_EEE_TX_RESET    V_EEE_TX_RESET(1U)
+
+#define A_MAC_PORT_EEE_RX_CTL 0x938
+
+#define S_LPI_IND    1
+#define V_LPI_IND(x) ((x) << S_LPI_IND)
+#define F_LPI_IND    V_LPI_IND(1U)
+
+#define S_EEE_RX_RESET    0
+#define V_EEE_RX_RESET(x) ((x) << S_EEE_RX_RESET)
+#define F_EEE_RX_RESET    V_EEE_RX_RESET(1U)
+
+#define A_MAC_PORT_EEE_TX_10G_SLEEP_TIMER 0x93c
+#define A_MAC_PORT_EEE_TX_10G_QUIET_TIMER 0x940
+#define A_MAC_PORT_EEE_TX_10G_WAKE_TIMER 0x944
+#define A_MAC_PORT_EEE_TX_1G_SLEEP_TIMER 0x948
+#define A_MAC_PORT_EEE_TX_1G_QUIET_TIMER 0x94c
+#define A_MAC_PORT_EEE_TX_1G_REFRESH_TIMER 0x950
+#define A_MAC_PORT_EEE_RX_10G_QUIET_TIMER 0x954
+#define A_MAC_PORT_EEE_RX_10G_WAKE_TIMER 0x958
+#define A_MAC_PORT_EEE_RX_10G_WF_TIMER 0x95c
+#define A_MAC_PORT_EEE_RX_1G_QUIET_TIMER 0x960
+#define A_MAC_PORT_EEE_RX_1G_WAKE_TIMER 0x964
+#define A_MAC_PORT_EEE_WF_COUNT 0x968
+
+#define S_WAKE_CNT_CLR    16
+#define V_WAKE_CNT_CLR(x) ((x) << S_WAKE_CNT_CLR)
+#define F_WAKE_CNT_CLR    V_WAKE_CNT_CLR(1U)
+
+#define S_WAKE_CNT    0
+#define M_WAKE_CNT    0xffffU
+#define V_WAKE_CNT(x) ((x) << S_WAKE_CNT)
+#define G_WAKE_CNT(x) (((x) >> S_WAKE_CNT) & M_WAKE_CNT)
+
+#define A_MAC_PORT_PTP_TIMER_RD0_LO 0x96c
+#define A_MAC_PORT_PTP_TIMER_RD0_HI 0x970
+#define A_MAC_PORT_PTP_TIMER_RD1_LO 0x974
+#define A_MAC_PORT_PTP_TIMER_RD1_HI 0x978
+#define A_MAC_PORT_PTP_TIMER_WR_LO 0x97c
+#define A_MAC_PORT_PTP_TIMER_WR_HI 0x980
+#define A_MAC_PORT_PTP_TIMER_OFFSET_0 0x984
+#define A_MAC_PORT_PTP_TIMER_OFFSET_1 0x988
+#define A_MAC_PORT_PTP_TIMER_OFFSET_2 0x98c
+
+#define S_PTP_OFFSET    0
+#define M_PTP_OFFSET    0xffU
+#define V_PTP_OFFSET(x) ((x) << S_PTP_OFFSET)
+#define G_PTP_OFFSET(x) (((x) >> S_PTP_OFFSET) & M_PTP_OFFSET)
+
+#define A_MAC_PORT_PTP_SUM_LO 0x990
+#define A_MAC_PORT_PTP_SUM_HI 0x994
+#define A_MAC_PORT_PTP_TIMER_INCR0 0x998
+
+#define S_Y    16
+#define M_Y    0xffffU
+#define V_Y(x) ((x) << S_Y)
+#define G_Y(x) (((x) >> S_Y) & M_Y)
+
+#define S_X    0
+#define M_X    0xffffU
+#define V_X(x) ((x) << S_X)
+#define G_X(x) (((x) >> S_X) & M_X)
+
+#define A_MAC_PORT_PTP_TIMER_INCR1 0x99c
+
+#define S_Y_TICK    16
+#define M_Y_TICK    0xffffU
+#define V_Y_TICK(x) ((x) << S_Y_TICK)
+#define G_Y_TICK(x) (((x) >> S_Y_TICK) & M_Y_TICK)
+
+#define S_X_TICK    0
+#define M_X_TICK    0xffffU
+#define V_X_TICK(x) ((x) << S_X_TICK)
+#define G_X_TICK(x) (((x) >> S_X_TICK) & M_X_TICK)
+
+#define A_MAC_PORT_PTP_DRIFT_ADJUST_COUNT 0x9a0
+#define A_MAC_PORT_PTP_OFFSET_ADJUST_FINE 0x9a4
+
+#define S_B    16
+#define CXGBE_M_B    0xffffU
+#define V_B(x) ((x) << S_B)
+#define G_B(x) (((x) >> S_B) & CXGBE_M_B)
+
+#define S_A    0
+#define M_A    0xffffU
+#define V_A(x) ((x) << S_A)
+#define G_A(x) (((x) >> S_A) & M_A)
+
+#define A_MAC_PORT_PTP_OFFSET_ADJUST_TOTAL 0x9a8
+#define A_MAC_PORT_PTP_CFG 0x9ac
+
+#define S_FRZ    18
+#define V_FRZ(x) ((x) << S_FRZ)
+#define F_FRZ    V_FRZ(1U)
+
+#define S_OFFSER_ADJUST_SIGN    17
+#define V_OFFSER_ADJUST_SIGN(x) ((x) << S_OFFSER_ADJUST_SIGN)
+#define F_OFFSER_ADJUST_SIGN    V_OFFSER_ADJUST_SIGN(1U)
+
+#define S_ADD_OFFSET    16
+#define V_ADD_OFFSET(x) ((x) << S_ADD_OFFSET)
+#define F_ADD_OFFSET    V_ADD_OFFSET(1U)
+
+#define S_CYCLE1    8
+#define M_CYCLE1    0xffU
+#define V_CYCLE1(x) ((x) << S_CYCLE1)
+#define G_CYCLE1(x) (((x) >> S_CYCLE1) & M_CYCLE1)
+
+#define S_Q    0
+#define M_Q    0xffU
+#define V_Q(x) ((x) << S_Q)
+#define G_Q(x) (((x) >> S_Q) & M_Q)
+
+#define S_ALARM_EN    21
+#define V_ALARM_EN(x) ((x) << S_ALARM_EN)
+#define F_ALARM_EN    V_ALARM_EN(1U)
+
+#define S_ALARM_START    20
+#define V_ALARM_START(x) ((x) << S_ALARM_START)
+#define F_ALARM_START    V_ALARM_START(1U)
+
+#define S_PPS_EN    19
+#define V_PPS_EN(x) ((x) << S_PPS_EN)
+#define F_PPS_EN    V_PPS_EN(1U)
+
+#define A_MAC_PORT_PTP_PPS 0x9b0
+#define A_MAC_PORT_PTP_SINGLE_ALARM 0x9b4
+#define A_MAC_PORT_PTP_PERIODIC_ALARM 0x9b8
+#define A_MAC_PORT_PTP_STATUS 0x9bc
+
+#define S_ALARM_DONE    0
+#define V_ALARM_DONE(x) ((x) << S_ALARM_DONE)
+#define F_ALARM_DONE    V_ALARM_DONE(1U)
+
+#define A_MAC_PORT_MTIP_REVISION 0xa00
+
+#define S_CUSTREV    16
+#define M_CUSTREV    0xffffU
+#define V_CUSTREV(x) ((x) << S_CUSTREV)
+#define G_CUSTREV(x) (((x) >> S_CUSTREV) & M_CUSTREV)
+
+#define S_VER    8
+#define M_VER    0xffU
+#define V_VER(x) ((x) << S_VER)
+#define G_VER(x) (((x) >> S_VER) & M_VER)
+
+#define S_MTIP_REV    0
+#define M_MTIP_REV    0xffU
+#define V_MTIP_REV(x) ((x) << S_MTIP_REV)
+#define G_MTIP_REV(x) (((x) >> S_MTIP_REV) & M_MTIP_REV)
+
+#define A_MAC_PORT_MTIP_SCRATCH 0xa04
+#define A_MAC_PORT_MTIP_COMMAND_CONFIG 0xa08
+
+#define S_TX_FLUSH_ENABLE    22
+#define V_TX_FLUSH_ENABLE(x) ((x) << S_TX_FLUSH_ENABLE)
+#define F_TX_FLUSH_ENABLE    V_TX_FLUSH_ENABLE(1U)
+
+#define S_RX_SFD_ANY    21
+#define V_RX_SFD_ANY(x) ((x) << S_RX_SFD_ANY)
+#define F_RX_SFD_ANY    V_RX_SFD_ANY(1U)
+
+#define S_PAUSE_PFC_COMP    20
+#define V_PAUSE_PFC_COMP(x) ((x) << S_PAUSE_PFC_COMP)
+#define F_PAUSE_PFC_COMP    V_PAUSE_PFC_COMP(1U)
+
+#define S_PFC_MODE    19
+#define V_PFC_MODE(x) ((x) << S_PFC_MODE)
+#define F_PFC_MODE    V_PFC_MODE(1U)
+
+#define S_RS_COL_CNT_EXT    18
+#define V_RS_COL_CNT_EXT(x) ((x) << S_RS_COL_CNT_EXT)
+#define F_RS_COL_CNT_EXT    V_RS_COL_CNT_EXT(1U)
+
+#define S_NO_LGTH_CHECK    17
+#define V_NO_LGTH_CHECK(x) ((x) << S_NO_LGTH_CHECK)
+#define F_NO_LGTH_CHECK    V_NO_LGTH_CHECK(1U)
+
+#define S_SEND_IDLE    16
+#define V_SEND_IDLE(x) ((x) << S_SEND_IDLE)
+#define F_SEND_IDLE    V_SEND_IDLE(1U)
+
+#define S_PHY_TXENA    15
+#define V_PHY_TXENA(x) ((x) << S_PHY_TXENA)
+#define F_PHY_TXENA    V_PHY_TXENA(1U)
+
+#define S_RX_ERR_DISC    14
+#define V_RX_ERR_DISC(x) ((x) << S_RX_ERR_DISC)
+#define F_RX_ERR_DISC    V_RX_ERR_DISC(1U)
+
+#define S_CMD_FRAME_ENA    13
+#define V_CMD_FRAME_ENA(x) ((x) << S_CMD_FRAME_ENA)
+#define F_CMD_FRAME_ENA    V_CMD_FRAME_ENA(1U)
+
+#define S_SW_RESET    12
+#define V_SW_RESET(x) ((x) << S_SW_RESET)
+#define F_SW_RESET    V_SW_RESET(1U)
+
+#define S_TX_PAD_EN    11
+#define V_TX_PAD_EN(x) ((x) << S_TX_PAD_EN)
+#define F_TX_PAD_EN    V_TX_PAD_EN(1U)
+
+#define S_PHY_LOOPBACK_EN    10
+#define V_PHY_LOOPBACK_EN(x) ((x) << S_PHY_LOOPBACK_EN)
+#define F_PHY_LOOPBACK_EN    V_PHY_LOOPBACK_EN(1U)
+
+#define S_TX_ADDR_INS    9
+#define V_TX_ADDR_INS(x) ((x) << S_TX_ADDR_INS)
+#define F_TX_ADDR_INS    V_TX_ADDR_INS(1U)
+
+#define S_PAUSE_IGNORE    8
+#define V_PAUSE_IGNORE(x) ((x) << S_PAUSE_IGNORE)
+#define F_PAUSE_IGNORE    V_PAUSE_IGNORE(1U)
+
+#define S_PAUSE_FWD    7
+#define V_PAUSE_FWD(x) ((x) << S_PAUSE_FWD)
+#define F_PAUSE_FWD    V_PAUSE_FWD(1U)
+
+#define S_CRC_FWD    6
+#define V_CRC_FWD(x) ((x) << S_CRC_FWD)
+#define F_CRC_FWD    V_CRC_FWD(1U)
+
+#define S_PAD_EN    5
+#define V_PAD_EN(x) ((x) << S_PAD_EN)
+#define F_PAD_EN    V_PAD_EN(1U)
+
+#define S_PROMIS_EN    4
+#define V_PROMIS_EN(x) ((x) << S_PROMIS_EN)
+#define F_PROMIS_EN    V_PROMIS_EN(1U)
+
+#define S_WAN_MODE    3
+#define V_WAN_MODE(x) ((x) << S_WAN_MODE)
+#define F_WAN_MODE    V_WAN_MODE(1U)
+
+#define S_RX_ENA    1
+#define V_RX_ENA(x) ((x) << S_RX_ENA)
+#define F_RX_ENA    V_RX_ENA(1U)
+
+#define S_TX_ENA    0
+#define V_TX_ENA(x) ((x) << S_TX_ENA)
+#define F_TX_ENA    V_TX_ENA(1U)
+
+#define A_MAC_PORT_MTIP_MAC_ADDR_0 0xa0c
+#define A_MAC_PORT_MTIP_MAC_ADDR_1 0xa10
+
+#define S_MACADDRHI    0
+#define M_MACADDRHI    0xffffU
+#define V_MACADDRHI(x) ((x) << S_MACADDRHI)
+#define G_MACADDRHI(x) (((x) >> S_MACADDRHI) & M_MACADDRHI)
+
+#define A_MAC_PORT_MTIP_FRM_LENGTH 0xa14
+
+#define S_LEN    0
+#define M_LEN    0xffffU
+#define V_LEN(x) ((x) << S_LEN)
+#define G_LEN(x) (((x) >> S_LEN) & M_LEN)
+
+#define A_MAC_PORT_MTIP_RX_FIFO_SECTIONS 0xa1c
+
+#define S_AVAIL    16
+#define M_AVAIL    0xffffU
+#define V_AVAIL(x) ((x) << S_AVAIL)
+#define G_AVAIL(x) (((x) >> S_AVAIL) & M_AVAIL)
+
+#define S_EMPTY    0
+#define M_EMPTY    0xffffU
+#define V_EMPTY(x) ((x) << S_EMPTY)
+#define G_EMPTY(x) (((x) >> S_EMPTY) & M_EMPTY)
+
+#define A_MAC_PORT_MTIP_TX_FIFO_SECTIONS 0xa20
+#define A_MAC_PORT_MTIP_RX_FIFO_ALMOST_F_E 0xa24
+
+#define S_ALMSTFULL    16
+#define M_ALMSTFULL    0xffffU
+#define V_ALMSTFULL(x) ((x) << S_ALMSTFULL)
+#define G_ALMSTFULL(x) (((x) >> S_ALMSTFULL) & M_ALMSTFULL)
+
+#define S_ALMSTEMPTY    0
+#define M_ALMSTEMPTY    0xffffU
+#define V_ALMSTEMPTY(x) ((x) << S_ALMSTEMPTY)
+#define G_ALMSTEMPTY(x) (((x) >> S_ALMSTEMPTY) & M_ALMSTEMPTY)
+
+#define A_MAC_PORT_MTIP_TX_FIFO_ALMOST_F_E 0xa28
+#define A_MAC_PORT_MTIP_HASHTABLE_LOAD 0xa2c
+
+#define S_ENABLE_MCAST_RX    8
+#define V_ENABLE_MCAST_RX(x) ((x) << S_ENABLE_MCAST_RX)
+#define F_ENABLE_MCAST_RX    V_ENABLE_MCAST_RX(1U)
+
+#define S_HASHTABLE_ADDR    0
+#define M_HASHTABLE_ADDR    0x3fU
+#define V_HASHTABLE_ADDR(x) ((x) << S_HASHTABLE_ADDR)
+#define G_HASHTABLE_ADDR(x) (((x) >> S_HASHTABLE_ADDR) & M_HASHTABLE_ADDR)
+
+#define A_MAC_PORT_MTIP_MAC_STATUS 0xa40
+
+#define S_TS_AVAIL    3
+#define V_TS_AVAIL(x) ((x) << S_TS_AVAIL)
+#define F_TS_AVAIL    V_TS_AVAIL(1U)
+
+#define S_PHY_LOS    2
+#define V_PHY_LOS(x) ((x) << S_PHY_LOS)
+#define F_PHY_LOS    V_PHY_LOS(1U)
+
+#define S_RX_REM_FAULT    1
+#define V_RX_REM_FAULT(x) ((x) << S_RX_REM_FAULT)
+#define F_RX_REM_FAULT    V_RX_REM_FAULT(1U)
+
+#define S_RX_LOC_FAULT    0
+#define V_RX_LOC_FAULT(x) ((x) << S_RX_LOC_FAULT)
+#define F_RX_LOC_FAULT    V_RX_LOC_FAULT(1U)
+
+#define A_MAC_PORT_MTIP_TX_IPG_LENGTH 0xa44
+
+#define S_IPG    0
+#define M_IPG    0x7fU
+#define V_IPG(x) ((x) << S_IPG)
+#define G_IPG(x) (((x) >> S_IPG) & M_IPG)
+
+#define A_MAC_PORT_MTIP_MAC_CREDIT_TRIGGER 0xa48
+
+#define S_RXFIFORST    0
+#define V_RXFIFORST(x) ((x) << S_RXFIFORST)
+#define F_RXFIFORST    V_RXFIFORST(1U)
+
+#define A_MAC_PORT_MTIP_INIT_CREDIT 0xa4c
+
+#define S_MACCRDRST    0
+#define M_MACCRDRST    0xffU
+#define V_MACCRDRST(x) ((x) << S_MACCRDRST)
+#define G_MACCRDRST(x) (((x) >> S_MACCRDRST) & M_MACCRDRST)
+
+#define A_MAC_PORT_MTIP_CURRENT_CREDIT 0xa50
+
+#define S_INITCREDIT    0
+#define M_INITCREDIT    0xffU
+#define V_INITCREDIT(x) ((x) << S_INITCREDIT)
+#define G_INITCREDIT(x) (((x) >> S_INITCREDIT) & M_INITCREDIT)
+
+#define A_MAC_PORT_RX_PAUSE_STATUS 0xa74
+
+#define S_STATUS    0
+#define M_STATUS    0xffU
+#define V_STATUS(x) ((x) << S_STATUS)
+#define G_STATUS(x) (((x) >> S_STATUS) & M_STATUS)
+
+#define A_MAC_PORT_MTIP_TS_TIMESTAMP 0xa7c
+#define A_MAC_PORT_AFRAMESTRANSMITTEDOK 0xa80
+#define A_MAC_PORT_AFRAMESTRANSMITTEDOKHI 0xa84
+#define A_MAC_PORT_AFRAMESRECEIVEDOK 0xa88
+#define A_MAC_PORT_AFRAMESRECEIVEDOKHI 0xa8c
+#define A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS 0xa90
+#define A_MAC_PORT_AFRAMECHECKSEQUENCEERRORSHI 0xa94
+#define A_MAC_PORT_AALIGNMENTERRORS 0xa98
+#define A_MAC_PORT_AALIGNMENTERRORSHI 0xa9c
+#define A_MAC_PORT_APAUSEMACCTRLFRAMESTRANSMITTED 0xaa0
+#define A_MAC_PORT_APAUSEMACCTRLFRAMESTRANSMITTEDHI 0xaa4
+#define A_MAC_PORT_APAUSEMACCTRLFRAMESRECEIVED 0xaa8
+#define A_MAC_PORT_APAUSEMACCTRLFRAMESRECEIVEDHI 0xaac
+#define A_MAC_PORT_AFRAMETOOLONGERRORS 0xab0
+#define A_MAC_PORT_AFRAMETOOLONGERRORSHI 0xab4
+#define A_MAC_PORT_AINRANGELENGTHERRORS 0xab8
+#define A_MAC_PORT_AINRANGELENGTHERRORSHI 0xabc
+#define A_MAC_PORT_VLANTRANSMITTEDOK 0xac0
+#define A_MAC_PORT_VLANTRANSMITTEDOKHI 0xac4
+#define A_MAC_PORT_VLANRECEIVEDOK 0xac8
+#define A_MAC_PORT_VLANRECEIVEDOKHI 0xacc
+#define A_MAC_PORT_AOCTETSTRANSMITTEDOK 0xad0
+#define A_MAC_PORT_AOCTETSTRANSMITTEDOKHI 0xad4
+#define A_MAC_PORT_AOCTETSRECEIVEDOK 0xad8
+#define A_MAC_PORT_AOCTETSRECEIVEDOKHI 0xadc
+#define A_MAC_PORT_IFINUCASTPKTS 0xae0
+#define A_MAC_PORT_IFINUCASTPKTSHI 0xae4
+#define A_MAC_PORT_IFINMULTICASTPKTS 0xae8
+#define A_MAC_PORT_IFINMULTICASTPKTSHI 0xaec
+#define A_MAC_PORT_IFINBROADCASTPKTS 0xaf0
+#define A_MAC_PORT_IFINBROADCASTPKTSHI 0xaf4
+#define A_MAC_PORT_IFOUTERRORS 0xaf8
+#define A_MAC_PORT_IFOUTERRORSHI 0xafc
+#define A_MAC_PORT_IFOUTUCASTPKTS 0xb08
+#define A_MAC_PORT_IFOUTUCASTPKTSHI 0xb0c
+#define A_MAC_PORT_IFOUTMULTICASTPKTS 0xb10
+#define A_MAC_PORT_IFOUTMULTICASTPKTSHI 0xb14
+#define A_MAC_PORT_IFOUTBROADCASTPKTS 0xb18
+#define A_MAC_PORT_IFOUTBROADCASTPKTSHI 0xb1c
+#define A_MAC_PORT_ETHERSTATSDROPEVENTS 0xb20
+#define A_MAC_PORT_ETHERSTATSDROPEVENTSHI 0xb24
+#define A_MAC_PORT_ETHERSTATSOCTETS 0xb28
+#define A_MAC_PORT_ETHERSTATSOCTETSHI 0xb2c
+#define A_MAC_PORT_ETHERSTATSPKTS 0xb30
+#define A_MAC_PORT_ETHERSTATSPKTSHI 0xb34
+#define A_MAC_PORT_ETHERSTATSUNDERSIZEPKTS 0xb38
+#define A_MAC_PORT_ETHERSTATSUNDERSIZEPKTSHI 0xb3c
+#define A_MAC_PORT_ETHERSTATSPKTS64OCTETS 0xb40
+#define A_MAC_PORT_ETHERSTATSPKTS64OCTETSHI 0xb44
+#define A_MAC_PORT_ETHERSTATSPKTS65TO127OCTETS 0xb48
+#define A_MAC_PORT_ETHERSTATSPKTS65TO127OCTETSHI 0xb4c
+#define A_MAC_PORT_ETHERSTATSPKTS128TO255OCTETS 0xb50
+#define A_MAC_PORT_ETHERSTATSPKTS128TO255OCTETSHI 0xb54
+#define A_MAC_PORT_ETHERSTATSPKTS256TO511OCTETS 0xb58
+#define A_MAC_PORT_ETHERSTATSPKTS256TO511OCTETSHI 0xb5c
+#define A_MAC_PORT_ETHERSTATSPKTS512TO1023OCTETS 0xb60
+#define A_MAC_PORT_ETHERSTATSPKTS512TO1023OCTETSHI 0xb64
+#define A_MAC_PORT_ETHERSTATSPKTS1024TO1518OCTETS 0xb68
+#define A_MAC_PORT_ETHERSTATSPKTS1024TO1518OCTETSHI 0xb6c
+#define A_MAC_PORT_ETHERSTATSPKTS1519TOMAXOCTETS 0xb70
+#define A_MAC_PORT_ETHERSTATSPKTS1519TOMAXOCTETSHI 0xb74
+#define A_MAC_PORT_ETHERSTATSOVERSIZEPKTS 0xb78
+#define A_MAC_PORT_ETHERSTATSOVERSIZEPKTSHI 0xb7c
+#define A_MAC_PORT_ETHERSTATSJABBERS 0xb80
+#define A_MAC_PORT_ETHERSTATSJABBERSHI 0xb84
+#define A_MAC_PORT_ETHERSTATSFRAGMENTS 0xb88
+#define A_MAC_PORT_ETHERSTATSFRAGMENTSHI 0xb8c
+#define A_MAC_PORT_IFINERRORS 0xb90
+#define A_MAC_PORT_IFINERRORSHI 0xb94
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_0 0xb98
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_0HI 0xb9c
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_1 0xba0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_1HI 0xba4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_2 0xba8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_2HI 0xbac
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_3 0xbb0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_3HI 0xbb4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_4 0xbb8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_4HI 0xbbc
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_5 0xbc0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_5HI 0xbc4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_6 0xbc8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_6HI 0xbcc
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_7 0xbd0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESTRANSMITTED_7HI 0xbd4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_0 0xbd8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_0HI 0xbdc
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_1 0xbe0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_1HI 0xbe4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_2 0xbe8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_2HI 0xbec
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_3 0xbf0
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_3HI 0xbf4
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_4 0xbf8
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_4HI 0xbfc
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_5 0xc00
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_5HI 0xc04
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_6 0xc08
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_6HI 0xc0c
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_7 0xc10
+#define A_MAC_PORT_ACBFCPAUSEFRAMESRECEIVED_7HI 0xc14
+#define A_MAC_PORT_AMACCONTROLFRAMESTRANSMITTED 0xc18
+#define A_MAC_PORT_AMACCONTROLFRAMESTRANSMITTEDHI 0xc1c
+#define A_MAC_PORT_AMACCONTROLFRAMESRECEIVED 0xc20
+#define A_MAC_PORT_AMACCONTROLFRAMESRECEIVEDHI 0xc24
+#define A_MAC_PORT_MTIP_SGMII_CONTROL 0xd00
+
+#define S_RESET    15
+#define V_RESET(x) ((x) << S_RESET)
+#define F_RESET    V_RESET(1U)
+
+#define S_LOOPBACK    14
+#define V_LOOPBACK(x) ((x) << S_LOOPBACK)
+#define F_LOOPBACK    V_LOOPBACK(1U)
+
+#define S_SPPEDSEL1    13
+#define V_SPPEDSEL1(x) ((x) << S_SPPEDSEL1)
+#define F_SPPEDSEL1    V_SPPEDSEL1(1U)
+
+#define S_AN_EN    12
+#define V_AN_EN(x) ((x) << S_AN_EN)
+#define F_AN_EN    V_AN_EN(1U)
+
+#define S_PWRDWN    11
+#define V_PWRDWN(x) ((x) << S_PWRDWN)
+#define F_PWRDWN    V_PWRDWN(1U)
+
+#define S_ISOLATE    10
+#define V_ISOLATE(x) ((x) << S_ISOLATE)
+#define F_ISOLATE    V_ISOLATE(1U)
+
+#define S_AN_RESTART    9
+#define V_AN_RESTART(x) ((x) << S_AN_RESTART)
+#define F_AN_RESTART    V_AN_RESTART(1U)
+
+#define S_DPLX    8
+#define V_DPLX(x) ((x) << S_DPLX)
+#define F_DPLX    V_DPLX(1U)
+
+#define S_COLLISIONTEST    7
+#define V_COLLISIONTEST(x) ((x) << S_COLLISIONTEST)
+#define F_COLLISIONTEST    V_COLLISIONTEST(1U)
+
+#define S_SPEEDSEL0    6
+#define V_SPEEDSEL0(x) ((x) << S_SPEEDSEL0)
+#define F_SPEEDSEL0    V_SPEEDSEL0(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_REVISION 0xd00
+
+#define S_VER_1G10G    8
+#define M_VER_1G10G    0xffU
+#define V_VER_1G10G(x) ((x) << S_VER_1G10G)
+#define G_VER_1G10G(x) (((x) >> S_VER_1G10G) & M_VER_1G10G)
+
+#define S_REV_1G10G    0
+#define M_REV_1G10G    0xffU
+#define V_REV_1G10G(x) ((x) << S_REV_1G10G)
+#define G_REV_1G10G(x) (((x) >> S_REV_1G10G) & M_REV_1G10G)
+
+#define A_MAC_PORT_MTIP_SGMII_STATUS 0xd04
+
+#define S_100BASET4    15
+#define V_100BASET4(x) ((x) << S_100BASET4)
+#define F_100BASET4    V_100BASET4(1U)
+
+#define S_100BASEXFULLDPLX    14
+#define V_100BASEXFULLDPLX(x) ((x) << S_100BASEXFULLDPLX)
+#define F_100BASEXFULLDPLX    V_100BASEXFULLDPLX(1U)
+
+#define S_100BASEXHALFDPLX    13
+#define V_100BASEXHALFDPLX(x) ((x) << S_100BASEXHALFDPLX)
+#define F_100BASEXHALFDPLX    V_100BASEXHALFDPLX(1U)
+
+#define S_10MBPSFULLDPLX    12
+#define V_10MBPSFULLDPLX(x) ((x) << S_10MBPSFULLDPLX)
+#define F_10MBPSFULLDPLX    V_10MBPSFULLDPLX(1U)
+
+#define S_10MBPSHALFDPLX    11
+#define V_10MBPSHALFDPLX(x) ((x) << S_10MBPSHALFDPLX)
+#define F_10MBPSHALFDPLX    V_10MBPSHALFDPLX(1U)
+
+#define S_100BASET2FULLDPLX    10
+#define V_100BASET2FULLDPLX(x) ((x) << S_100BASET2FULLDPLX)
+#define F_100BASET2FULLDPLX    V_100BASET2FULLDPLX(1U)
+
+#define S_100BASET2HALFDPLX    9
+#define V_100BASET2HALFDPLX(x) ((x) << S_100BASET2HALFDPLX)
+#define F_100BASET2HALFDPLX    V_100BASET2HALFDPLX(1U)
+
+#define S_EXTDSTATUS    8
+#define V_EXTDSTATUS(x) ((x) << S_EXTDSTATUS)
+#define F_EXTDSTATUS    V_EXTDSTATUS(1U)
+
+#define S_SGMII_REM_FAULT    4
+#define V_SGMII_REM_FAULT(x) ((x) << S_SGMII_REM_FAULT)
+#define F_SGMII_REM_FAULT    V_SGMII_REM_FAULT(1U)
+
+#define S_JABBERDETECT    1
+#define V_JABBERDETECT(x) ((x) << S_JABBERDETECT)
+#define F_JABBERDETECT    V_JABBERDETECT(1U)
+
+#define S_EXTDCAPABILITY    0
+#define V_EXTDCAPABILITY(x) ((x) << S_EXTDCAPABILITY)
+#define F_EXTDCAPABILITY    V_EXTDCAPABILITY(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_SCRATCH 0xd04
+#define A_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_0 0xd08
+#define A_MAC_PORT_MTIP_1G10G_COMMAND_CONFIG 0xd08
+
+#define S_SHORT_DISCARD    25
+#define V_SHORT_DISCARD(x) ((x) << S_SHORT_DISCARD)
+#define F_SHORT_DISCARD    V_SHORT_DISCARD(1U)
+
+#define S_REG_LOWP_RXEMPTY    24
+#define V_REG_LOWP_RXEMPTY(x) ((x) << S_REG_LOWP_RXEMPTY)
+#define F_REG_LOWP_RXEMPTY    V_REG_LOWP_RXEMPTY(1U)
+
+#define S_TX_LOWP_ENA    23
+#define V_TX_LOWP_ENA(x) ((x) << S_TX_LOWP_ENA)
+#define F_TX_LOWP_ENA    V_TX_LOWP_ENA(1U)
+
+#define S_TX_FLUSH_EN    22
+#define V_TX_FLUSH_EN(x) ((x) << S_TX_FLUSH_EN)
+#define F_TX_FLUSH_EN    V_TX_FLUSH_EN(1U)
+
+#define S_SFD_ANY    21
+#define V_SFD_ANY(x) ((x) << S_SFD_ANY)
+#define F_SFD_ANY    V_SFD_ANY(1U)
+
+#define S_COL_CNT_EXT    18
+#define V_COL_CNT_EXT(x) ((x) << S_COL_CNT_EXT)
+#define F_COL_CNT_EXT    V_COL_CNT_EXT(1U)
+
+#define S_FORCE_SEND_IDLE    16
+#define V_FORCE_SEND_IDLE(x) ((x) << S_FORCE_SEND_IDLE)
+#define F_FORCE_SEND_IDLE    V_FORCE_SEND_IDLE(1U)
+
+#define S_CNTL_FRM_ENA    13
+#define V_CNTL_FRM_ENA(x) ((x) << S_CNTL_FRM_ENA)
+#define F_CNTL_FRM_ENA    V_CNTL_FRM_ENA(1U)
+
+#define S_RX_ENAMAC    1
+#define V_RX_ENAMAC(x) ((x) << S_RX_ENAMAC)
+#define F_RX_ENAMAC    V_RX_ENAMAC(1U)
+
+#define S_TX_ENAMAC    0
+#define V_TX_ENAMAC(x) ((x) << S_TX_ENAMAC)
+#define F_TX_ENAMAC    V_TX_ENAMAC(1U)
+
+#define A_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_1 0xd0c
+#define A_MAC_PORT_MTIP_1G10G_MAC_ADDR_0 0xd0c
+#define A_MAC_PORT_MTIP_SGMII_DEV_ABILITY 0xd10
+
+#define S_RF2    13
+#define V_RF2(x) ((x) << S_RF2)
+#define F_RF2    V_RF2(1U)
+
+#define S_RF1    12
+#define V_RF1(x) ((x) << S_RF1)
+#define F_RF1    V_RF1(1U)
+
+#define S_PS2    8
+#define V_PS2(x) ((x) << S_PS2)
+#define F_PS2    V_PS2(1U)
+
+#define S_PS1    7
+#define V_PS1(x) ((x) << S_PS1)
+#define F_PS1    V_PS1(1U)
+
+#define S_HD    6
+#define V_HD(x) ((x) << S_HD)
+#define F_HD    V_HD(1U)
+
+#define S_FD    5
+#define V_FD(x) ((x) << S_FD)
+#define F_FD    V_FD(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_MAC_ADDR_1 0xd10
+#define A_MAC_PORT_MTIP_SGMII_PARTNER_ABILITY 0xd14
+
+#define S_CULINKSTATUS    15
+#define V_CULINKSTATUS(x) ((x) << S_CULINKSTATUS)
+#define F_CULINKSTATUS    V_CULINKSTATUS(1U)
+
+#define S_CUDPLXSTATUS    12
+#define V_CUDPLXSTATUS(x) ((x) << S_CUDPLXSTATUS)
+#define F_CUDPLXSTATUS    V_CUDPLXSTATUS(1U)
+
+#define S_CUSPEED    10
+#define M_CUSPEED    0x3U
+#define V_CUSPEED(x) ((x) << S_CUSPEED)
+#define G_CUSPEED(x) (((x) >> S_CUSPEED) & M_CUSPEED)
+
+#define A_MAC_PORT_MTIP_1G10G_FRM_LENGTH_TX_MTU 0xd14
+
+#define S_SET_LEN    16
+#define M_SET_LEN    0xffffU
+#define V_SET_LEN(x) ((x) << S_SET_LEN)
+#define G_SET_LEN(x) (((x) >> S_SET_LEN) & M_SET_LEN)
+
+#define S_FRM_LEN_SET    0
+#define M_FRM_LEN_SET    0xffffU
+#define V_FRM_LEN_SET(x) ((x) << S_FRM_LEN_SET)
+#define G_FRM_LEN_SET(x) (((x) >> S_FRM_LEN_SET) & M_FRM_LEN_SET)
+
+#define A_MAC_PORT_MTIP_SGMII_AN_EXPANSION 0xd18
+
+#define S_PGRCVD    1
+#define V_PGRCVD(x) ((x) << S_PGRCVD)
+#define F_PGRCVD    V_PGRCVD(1U)
+
+#define S_REALTIMEPGRCVD    0
+#define V_REALTIMEPGRCVD(x) ((x) << S_REALTIMEPGRCVD)
+#define F_REALTIMEPGRCVD    V_REALTIMEPGRCVD(1U)
+
+#define A_MAC_PORT_MTIP_SGMII_DEVICE_NP 0xd1c
+#define A_MAC_PORT_MTIP_1G10G_RX_FIFO_SECTIONS 0xd1c
+
+#define S_RX1G10G_EMPTY    16
+#define M_RX1G10G_EMPTY    0xffffU
+#define V_RX1G10G_EMPTY(x) ((x) << S_RX1G10G_EMPTY)
+#define G_RX1G10G_EMPTY(x) (((x) >> S_RX1G10G_EMPTY) & M_RX1G10G_EMPTY)
+
+#define S_RX1G10G_AVAIL    0
+#define M_RX1G10G_AVAIL    0xffffU
+#define V_RX1G10G_AVAIL(x) ((x) << S_RX1G10G_AVAIL)
+#define G_RX1G10G_AVAIL(x) (((x) >> S_RX1G10G_AVAIL) & M_RX1G10G_AVAIL)
+
+#define A_MAC_PORT_MTIP_SGMII_PARTNER_NP 0xd20
+#define A_MAC_PORT_MTIP_1G10G_TX_FIFO_SECTIONS 0xd20
+
+#define S_TX1G10G_EMPTY    16
+#define M_TX1G10G_EMPTY    0xffffU
+#define V_TX1G10G_EMPTY(x) ((x) << S_TX1G10G_EMPTY)
+#define G_TX1G10G_EMPTY(x) (((x) >> S_TX1G10G_EMPTY) & M_TX1G10G_EMPTY)
+
+#define S_TX1G10G_AVAIL    0
+#define M_TX1G10G_AVAIL    0xffffU
+#define V_TX1G10G_AVAIL(x) ((x) << S_TX1G10G_AVAIL)
+#define G_TX1G10G_AVAIL(x) (((x) >> S_TX1G10G_AVAIL) & M_TX1G10G_AVAIL)
+
+#define A_MAC_PORT_MTIP_1G10G_RX_FIFO_ALMOST_F_E 0xd24
+
+#define S_ALMOSTFULL    16
+#define M_ALMOSTFULL    0xffffU
+#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL)
+#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL)
+
+#define S_ALMOSTEMPTY    0
+#define M_ALMOSTEMPTY    0xffffU
+#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY)
+#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY)
+
+#define A_MAC_PORT_MTIP_1G10G_TX_FIFO_ALMOST_F_E 0xd28
+#define A_MAC_PORT_MTIP_1G10G_HASHTABLE_LOAD 0xd2c
+#define A_MAC_PORT_MTIP_1G10G_MDIO_CFG_STATUS 0xd30
+
+#define S_CLK_DIVISOR    7
+#define M_CLK_DIVISOR    0x1ffU
+#define V_CLK_DIVISOR(x) ((x) << S_CLK_DIVISOR)
+#define G_CLK_DIVISOR(x) (((x) >> S_CLK_DIVISOR) & M_CLK_DIVISOR)
+
+#define S_ENA_CLAUSE    6
+#define V_ENA_CLAUSE(x) ((x) << S_ENA_CLAUSE)
+#define F_ENA_CLAUSE    V_ENA_CLAUSE(1U)
+
+#define S_PREAMBLE_DISABLE    5
+#define V_PREAMBLE_DISABLE(x) ((x) << S_PREAMBLE_DISABLE)
+#define F_PREAMBLE_DISABLE    V_PREAMBLE_DISABLE(1U)
+
+#define S_HOLD_TIME_SETTING    2
+#define M_HOLD_TIME_SETTING    0x7U
+#define V_HOLD_TIME_SETTING(x) ((x) << S_HOLD_TIME_SETTING)
+#define G_HOLD_TIME_SETTING(x) (((x) >> S_HOLD_TIME_SETTING) & M_HOLD_TIME_SETTING)
+
+#define S_MDIO_READ_ERROR    1
+#define V_MDIO_READ_ERROR(x) ((x) << S_MDIO_READ_ERROR)
+#define F_MDIO_READ_ERROR    V_MDIO_READ_ERROR(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_MDIO_COMMAND 0xd34
+
+#define S_READ_MODE    15
+#define V_READ_MODE(x) ((x) << S_READ_MODE)
+#define F_READ_MODE    V_READ_MODE(1U)
+
+#define S_POST_INCR_READ    14
+#define V_POST_INCR_READ(x) ((x) << S_POST_INCR_READ)
+#define F_POST_INCR_READ    V_POST_INCR_READ(1U)
+
+#define S_PORT_PHY_ADDR    5
+#define M_PORT_PHY_ADDR    0x1fU
+#define V_PORT_PHY_ADDR(x) ((x) << S_PORT_PHY_ADDR)
+#define G_PORT_PHY_ADDR(x) (((x) >> S_PORT_PHY_ADDR) & M_PORT_PHY_ADDR)
+
+#define S_DEVICE_REG_ADDR    0
+#define M_DEVICE_REG_ADDR    0x1fU
+#define V_DEVICE_REG_ADDR(x) ((x) << S_DEVICE_REG_ADDR)
+#define G_DEVICE_REG_ADDR(x) (((x) >> S_DEVICE_REG_ADDR) & M_DEVICE_REG_ADDR)
+
+#define A_MAC_PORT_MTIP_1G10G_MDIO_DATA 0xd38
+
+#define S_MDIO_DATA    0
+#define M_MDIO_DATA    0xffffU
+#define V_MDIO_DATA(x) ((x) << S_MDIO_DATA)
+#define G_MDIO_DATA(x) (((x) >> S_MDIO_DATA) & M_MDIO_DATA)
+
+#define A_MAC_PORT_MTIP_SGMII_EXTENDED_STATUS 0xd3c
+#define A_MAC_PORT_MTIP_1G10G_MDIO_REGADDR 0xd3c
+#define A_MAC_PORT_MTIP_1G10G_STATUS 0xd40
+
+#define S_RX_LINT_FAULT    7
+#define V_RX_LINT_FAULT(x) ((x) << S_RX_LINT_FAULT)
+#define F_RX_LINT_FAULT    V_RX_LINT_FAULT(1U)
+
+#define S_RX_EMPTY    6
+#define V_RX_EMPTY(x) ((x) << S_RX_EMPTY)
+#define F_RX_EMPTY    V_RX_EMPTY(1U)
+
+#define S_TX_EMPTY    5
+#define V_TX_EMPTY(x) ((x) << S_TX_EMPTY)
+#define F_TX_EMPTY    V_TX_EMPTY(1U)
+
+#define S_RX_LOWP    4
+#define V_RX_LOWP(x) ((x) << S_RX_LOWP)
+#define F_RX_LOWP    V_RX_LOWP(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_TX_IPG_LENGTH 0xd44
+#define A_MAC_PORT_MTIP_SGMII_LINK_TIMER_LO 0xd48
+
+#define S_COUNT_LO    0
+#define M_COUNT_LO    0xffffU
+#define V_COUNT_LO(x) ((x) << S_COUNT_LO)
+#define G_COUNT_LO(x) (((x) >> S_COUNT_LO) & M_COUNT_LO)
+
+#define A_MAC_PORT_MTIP_1G10G_CREDIT_TRIGGER 0xd48
+#define A_MAC_PORT_MTIP_SGMII_LINK_TIMER_HI 0xd4c
+
+#define S_COUNT_HI    0
+#define M_COUNT_HI    0x1fU
+#define V_COUNT_HI(x) ((x) << S_COUNT_HI)
+#define G_COUNT_HI(x) (((x) >> S_COUNT_HI) & M_COUNT_HI)
+
+#define A_MAC_PORT_MTIP_1G10G_INIT_CREDIT 0xd4c
+#define A_MAC_PORT_MTIP_SGMII_IF_MODE 0xd50
+
+#define S_SGMII_PCS_ENABLE    5
+#define V_SGMII_PCS_ENABLE(x) ((x) << S_SGMII_PCS_ENABLE)
+#define F_SGMII_PCS_ENABLE    V_SGMII_PCS_ENABLE(1U)
+
+#define S_SGMII_HDUPLEX    4
+#define V_SGMII_HDUPLEX(x) ((x) << S_SGMII_HDUPLEX)
+#define F_SGMII_HDUPLEX    V_SGMII_HDUPLEX(1U)
+
+#define S_SGMII_SPEED    2
+#define M_SGMII_SPEED    0x3U
+#define V_SGMII_SPEED(x) ((x) << S_SGMII_SPEED)
+#define G_SGMII_SPEED(x) (((x) >> S_SGMII_SPEED) & M_SGMII_SPEED)
+
+#define S_USE_SGMII_AN    1
+#define V_USE_SGMII_AN(x) ((x) << S_USE_SGMII_AN)
+#define F_USE_SGMII_AN    V_USE_SGMII_AN(1U)
+
+#define S_SGMII_ENA    0
+#define V_SGMII_ENA(x) ((x) << S_SGMII_ENA)
+#define F_SGMII_ENA    V_SGMII_ENA(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_CL01_PAUSE_QUANTA 0xd54
+
+#define S_CL1_PAUSE_QUANTA    16
+#define M_CL1_PAUSE_QUANTA    0xffffU
+#define V_CL1_PAUSE_QUANTA(x) ((x) << S_CL1_PAUSE_QUANTA)
+#define G_CL1_PAUSE_QUANTA(x) (((x) >> S_CL1_PAUSE_QUANTA) & M_CL1_PAUSE_QUANTA)
+
+#define S_CL0_PAUSE_QUANTA    0
+#define M_CL0_PAUSE_QUANTA    0xffffU
+#define V_CL0_PAUSE_QUANTA(x) ((x) << S_CL0_PAUSE_QUANTA)
+#define G_CL0_PAUSE_QUANTA(x) (((x) >> S_CL0_PAUSE_QUANTA) & M_CL0_PAUSE_QUANTA)
+
+#define A_MAC_PORT_MTIP_1G10G_CL23_PAUSE_QUANTA 0xd58
+
+#define S_CL3_PAUSE_QUANTA    16
+#define M_CL3_PAUSE_QUANTA    0xffffU
+#define V_CL3_PAUSE_QUANTA(x) ((x) << S_CL3_PAUSE_QUANTA)
+#define G_CL3_PAUSE_QUANTA(x) (((x) >> S_CL3_PAUSE_QUANTA) & M_CL3_PAUSE_QUANTA)
+
+#define S_CL2_PAUSE_QUANTA    0
+#define M_CL2_PAUSE_QUANTA    0xffffU
+#define V_CL2_PAUSE_QUANTA(x) ((x) << S_CL2_PAUSE_QUANTA)
+#define G_CL2_PAUSE_QUANTA(x) (((x) >> S_CL2_PAUSE_QUANTA) & M_CL2_PAUSE_QUANTA)
+
+#define A_MAC_PORT_MTIP_1G10G_CL45_PAUSE_QUANTA 0xd5c
+
+#define S_CL5_PAUSE_QUANTA    16
+#define M_CL5_PAUSE_QUANTA    0xffffU
+#define V_CL5_PAUSE_QUANTA(x) ((x) << S_CL5_PAUSE_QUANTA)
+#define G_CL5_PAUSE_QUANTA(x) (((x) >> S_CL5_PAUSE_QUANTA) & M_CL5_PAUSE_QUANTA)
+
+#define S_CL4_PAUSE_QUANTA    0
+#define M_CL4_PAUSE_QUANTA    0xffffU
+#define V_CL4_PAUSE_QUANTA(x) ((x) << S_CL4_PAUSE_QUANTA)
+#define G_CL4_PAUSE_QUANTA(x) (((x) >> S_CL4_PAUSE_QUANTA) & M_CL4_PAUSE_QUANTA)
+
+#define A_MAC_PORT_MTIP_1G10G_CL67_PAUSE_QUANTA 0xd60
+
+#define S_CL7_PAUSE_QUANTA    16
+#define M_CL7_PAUSE_QUANTA    0xffffU
+#define V_CL7_PAUSE_QUANTA(x) ((x) << S_CL7_PAUSE_QUANTA)
+#define G_CL7_PAUSE_QUANTA(x) (((x) >> S_CL7_PAUSE_QUANTA) & M_CL7_PAUSE_QUANTA)
+
+#define S_CL6_PAUSE_QUANTA    0
+#define M_CL6_PAUSE_QUANTA    0xffffU
+#define V_CL6_PAUSE_QUANTA(x) ((x) << S_CL6_PAUSE_QUANTA)
+#define G_CL6_PAUSE_QUANTA(x) (((x) >> S_CL6_PAUSE_QUANTA) & M_CL6_PAUSE_QUANTA)
+
+#define A_MAC_PORT_MTIP_1G10G_CL01_QUANTA_THRESH 0xd64
+
+#define S_CL1_QUANTA_THRESH    16
+#define M_CL1_QUANTA_THRESH    0xffffU
+#define V_CL1_QUANTA_THRESH(x) ((x) << S_CL1_QUANTA_THRESH)
+#define G_CL1_QUANTA_THRESH(x) (((x) >> S_CL1_QUANTA_THRESH) & M_CL1_QUANTA_THRESH)
+
+#define S_CL0_QUANTA_THRESH    0
+#define M_CL0_QUANTA_THRESH    0xffffU
+#define V_CL0_QUANTA_THRESH(x) ((x) << S_CL0_QUANTA_THRESH)
+#define G_CL0_QUANTA_THRESH(x) (((x) >> S_CL0_QUANTA_THRESH) & M_CL0_QUANTA_THRESH)
+
+#define A_MAC_PORT_MTIP_1G10G_CL23_QUANTA_THRESH 0xd68
+
+#define S_CL3_QUANTA_THRESH    16
+#define M_CL3_QUANTA_THRESH    0xffffU
+#define V_CL3_QUANTA_THRESH(x) ((x) << S_CL3_QUANTA_THRESH)
+#define G_CL3_QUANTA_THRESH(x) (((x) >> S_CL3_QUANTA_THRESH) & M_CL3_QUANTA_THRESH)
+
+#define S_CL2_QUANTA_THRESH    0
+#define M_CL2_QUANTA_THRESH    0xffffU
+#define V_CL2_QUANTA_THRESH(x) ((x) << S_CL2_QUANTA_THRESH)
+#define G_CL2_QUANTA_THRESH(x) (((x) >> S_CL2_QUANTA_THRESH) & M_CL2_QUANTA_THRESH)
+
+#define A_MAC_PORT_MTIP_1G10G_CL45_QUANTA_THRESH 0xd6c
+
+#define S_CL5_QUANTA_THRESH    16
+#define M_CL5_QUANTA_THRESH    0xffffU
+#define V_CL5_QUANTA_THRESH(x) ((x) << S_CL5_QUANTA_THRESH)
+#define G_CL5_QUANTA_THRESH(x) (((x) >> S_CL5_QUANTA_THRESH) & M_CL5_QUANTA_THRESH)
+
+#define S_CL4_QUANTA_THRESH    0
+#define M_CL4_QUANTA_THRESH    0xffffU
+#define V_CL4_QUANTA_THRESH(x) ((x) << S_CL4_QUANTA_THRESH)
+#define G_CL4_QUANTA_THRESH(x) (((x) >> S_CL4_QUANTA_THRESH) & M_CL4_QUANTA_THRESH)
+
+#define A_MAC_PORT_MTIP_1G10G_CL67_QUANTA_THRESH 0xd70
+
+#define S_CL7_QUANTA_THRESH    16
+#define M_CL7_QUANTA_THRESH    0xffffU
+#define V_CL7_QUANTA_THRESH(x) ((x) << S_CL7_QUANTA_THRESH)
+#define G_CL7_QUANTA_THRESH(x) (((x) >> S_CL7_QUANTA_THRESH) & M_CL7_QUANTA_THRESH)
+
+#define S_CL6_QUANTA_THRESH    0
+#define M_CL6_QUANTA_THRESH    0xffffU
+#define V_CL6_QUANTA_THRESH(x) ((x) << S_CL6_QUANTA_THRESH)
+#define G_CL6_QUANTA_THRESH(x) (((x) >> S_CL6_QUANTA_THRESH) & M_CL6_QUANTA_THRESH)
+
+#define A_MAC_PORT_MTIP_1G10G_RX_PAUSE_STATUS 0xd74
+
+#define S_STATUS_BIT    0
+#define M_STATUS_BIT    0xffU
+#define V_STATUS_BIT(x) ((x) << S_STATUS_BIT)
+#define G_STATUS_BIT(x) (((x) >> S_STATUS_BIT) & M_STATUS_BIT)
+
+#define A_MAC_PORT_MTIP_1G10G_TS_TIMESTAMP 0xd7c
+#define A_MAC_PORT_MTIP_1G10G_STATN_CONFIG 0xde0
+
+#define S_CLEAR    2
+#define V_CLEAR(x) ((x) << S_CLEAR)
+#define F_CLEAR    V_CLEAR(1U)
+
+#define S_CLEAR_ON_READ    1
+#define V_CLEAR_ON_READ(x) ((x) << S_CLEAR_ON_READ)
+#define F_CLEAR_ON_READ    V_CLEAR_ON_READ(1U)
+
+#define S_SATURATE    0
+#define V_SATURATE(x) ((x) << S_SATURATE)
+#define F_SATURATE    V_SATURATE(1U)
+
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOCTETS 0xe00
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOCTETSHI 0xe04
+#define A_MAC_PORT_MTIP_1G10G_RX_OCTETSOK 0xe08
+#define A_MAC_PORT_MTIP_1G10G_RX_OCTETSOKHI 0xe0c
+#define A_MAC_PORT_MTIP_1G10G_RX_AALIGNMENTERRORS 0xe10
+#define A_MAC_PORT_MTIP_1G10G_RX_AALIGNMENTERRORSHI 0xe14
+#define A_MAC_PORT_MTIP_1G10G_RX_APAUSEMACCTRLFRAMES 0xe18
+#define A_MAC_PORT_MTIP_1G10G_RX_APAUSEMACCTRLFRAMESHI 0xe1c
+#define A_MAC_PORT_MTIP_1G10G_RX_FRAMESOK 0xe20
+#define A_MAC_PORT_MTIP_1G10G_RX_FRAMESOKHI 0xe24
+#define A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS 0xe28
+#define A_MAC_PORT_MTIP_1G10G_RX_CRCERRORSHI 0xe2c
+#define A_MAC_PORT_MTIP_1G10G_RX_VLANOK 0xe30
+#define A_MAC_PORT_MTIP_1G10G_RX_VLANOKHI 0xe34
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINERRORS 0xe38
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINERRORSHI 0xe3c
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINUCASTPKTS 0xe40
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINUCASTPKTSHI 0xe44
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINMULTICASTPKTS 0xe48
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINMULTICASTPKTSHI 0xe4c
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINBROADCASTPKTS 0xe50
+#define A_MAC_PORT_MTIP_1G10G_RX_IFINBROADCASTPKTSHI 0xe54
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSDROPEVENTS 0xe58
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSDROPEVENTSHI 0xe5c
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS 0xe60
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTSHI 0xe64
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSUNDERSIZEPKTS 0xe68
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSUNDERSIZEPKTSHI 0xe6c
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS64OCTETS 0xe70
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS64OCTETSHI 0xe74
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS65TO127OCTETS 0xe78
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS65TO127OCTETSHI 0xe7c
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS128TO255OCTETS 0xe80
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS128TO255OCTETSHI 0xe84
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS256TO511OCTETS 0xe88
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS256TO511OCTETSHI 0xe8c
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS512TO1023OCTETS 0xe90
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS512TO1023OCTETSHI 0xe94
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1024TO1518OCTETS 0xe98
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1024TO1518OCTETSHI 0xe9c
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1519TOMAX 0xea0
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1519TOMAXHI 0xea4
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOVERSIZEPKTS 0xea8
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOVERSIZEPKTSHI 0xeac
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSJABBERS 0xeb0
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSJABBERSHI 0xeb4
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSFRAGMENTS 0xeb8
+#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSFRAGMENTSHI 0xebc
+#define A_MAC_PORT_MTIP_1G10G_AMACCONTROLFRAMESRECEIVED 0xec0
+#define A_MAC_PORT_MTIP_1G10G_AMACCONTROLFRAMESRECEIVEDHI 0xec4
+#define A_MAC_PORT_MTIP_1G10G_RX_AFRAMETOOLONG 0xec8
+#define A_MAC_PORT_MTIP_1G10G_RX_AFRAMETOOLONGHI 0xecc
+#define A_MAC_PORT_MTIP_1G10G_RX_AINRANGELENGTHERRORS 0xed0
+#define A_MAC_PORT_MTIP_1G10G_RX_AINRANGELENGTHERRORSHI 0xed4
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSOCTETS 0xf00
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSOCTETSHI 0xf04
+#define A_MAC_PORT_MTIP_1G10G_TX_OCTETSOK 0xf08
+#define A_MAC_PORT_MTIP_1G10G_TX_OCTETSOKHI 0xf0c
+#define A_MAC_PORT_MTIP_1G10G_TX_AALIGNMENTERRORS 0xf10
+#define A_MAC_PORT_MTIP_1G10G_TX_AALIGNMENTERRORSHI 0xf14
+#define A_MAC_PORT_MTIP_1G10G_TX_APAUSEMACCTRLFRAMES 0xf18
+#define A_MAC_PORT_MTIP_1G10G_TX_APAUSEMACCTRLFRAMESHI 0xf1c
+#define A_MAC_PORT_MTIP_1G10G_TX_FRAMESOK 0xf20
+#define A_MAC_PORT_MTIP_1G10G_TX_FRAMESOKHI 0xf24
+#define A_MAC_PORT_MTIP_1G10G_TX_CRCERRORS 0xf28
+#define A_MAC_PORT_MTIP_1G10G_TX_CRCERRORSHI 0xf2c
+#define A_MAC_PORT_MTIP_1G10G_TX_VLANOK 0xf30
+#define A_MAC_PORT_MTIP_1G10G_TX_VLANOKHI 0xf34
+#define A_MAC_PORT_MTIP_1G10G_TX_IFOUTERRORS 0xf38
+#define A_MAC_PORT_MTIP_1G10G_TX_IFOUTERRORSHI 0xf3c
+#define A_MAC_PORT_MTIP_1G10G_TX_IFUCASTPKTS 0xf40
+#define A_MAC_PORT_MTIP_1G10G_TX_IFUCASTPKTSHI 0xf44
+#define A_MAC_PORT_MTIP_1G10G_TX_IFMULTICASTPKTS 0xf48
+#define A_MAC_PORT_MTIP_1G10G_TX_IFMULTICASTPKTSHI 0xf4c
+#define A_MAC_PORT_MTIP_1G10G_TX_IFBROADCASTPKTS 0xf50
+#define A_MAC_PORT_MTIP_1G10G_TX_IFBROADCASTPKTSHI 0xf54
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSDROPEVENTS 0xf58
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSDROPEVENTSHI 0xf5c
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS 0xf60
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTSHI 0xf64
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSUNDERSIZEPKTS 0xf68
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSUNDERSIZEPKTSHI 0xf6c
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS64OCTETS 0xf70
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS64OCTETSHI 0xf74
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS65TO127OCTETS 0xf78
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS65TO127OCTETSHI 0xf7c
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS128TO255OCTETS 0xf80
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS128TO255OCTETSHI 0xf84
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS256TO511OCTETS 0xf88
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS256TO511OCTETSHI 0xf8c
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS512TO1023OCTETS 0xf90
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS512TO1023OCTETSHI 0xf94
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS1024TO1518OCTETS 0xf98
+#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS1024TO1518OCTETSHI 0xf9c
+#define A_MAC_PORT_MTIP_1G10G_ETHERSTATSPKTS1519TOTX_MTU 0xfa0
+#define A_MAC_PORT_MTIP_1G10G_ETHERSTATSPKTS1519TOTX_MTUHI 0xfa4
+#define A_MAC_PORT_MTIP_1G10G_TX_AMACCONTROLFRAMES 0xfc0
+#define A_MAC_PORT_MTIP_1G10G_TX_AMACCONTROLFRAMESHI 0xfc4
+#define A_MAC_PORT_MTIP_1G10G_IF_MODE 0x1000
+
+#define S_MII_ENA_10    4
+#define V_MII_ENA_10(x) ((x) << S_MII_ENA_10)
+#define F_MII_ENA_10    V_MII_ENA_10(1U)
+
+#define S_IF_MODE    0
+#define M_IF_MODE    0x3U
+#define V_IF_MODE(x) ((x) << S_IF_MODE)
+#define G_IF_MODE(x) (((x) >> S_IF_MODE) & M_IF_MODE)
+
+#define A_MAC_PORT_MTIP_1G10G_IF_STATUS 0x1004
+
+#define S_IF_STATUS_MODE    0
+#define M_IF_STATUS_MODE    0x3U
+#define V_IF_STATUS_MODE(x) ((x) << S_IF_STATUS_MODE)
+#define G_IF_STATUS_MODE(x) (((x) >> S_IF_STATUS_MODE) & M_IF_STATUS_MODE)
+
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_0 0x1080
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_0HI 0x1084
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_1 0x1088
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_1HI 0x108c
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_2 0x1090
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_2HI 0x1094
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_3 0x1098
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_3HI 0x109c
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_4 0x10a0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_4HI 0x10a4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_5 0x10a8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_5HI 0x10ac
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_6 0x10b0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_6HI 0x10b4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_7 0x10b8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_7HI 0x10bc
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_0 0x10c0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_0HI 0x10c4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_1 0x10c8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_1HI 0x10cc
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_2 0x10d0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_2HI 0x10d4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_3 0x10d8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_3HI 0x10dc
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_4 0x10e0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_4HI 0x10e4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_5 0x10e8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_5HI 0x10ec
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_6 0x10f0
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_6HI 0x10f4
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_7 0x10f8
+#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_7HI 0x10fc
+#define A_MAC_PORT_MTIP_ACT_CTL_SEG 0x1200
+
+#define S_ACTIVE    0
+#define M_ACTIVE    0x3fU
+#define V_ACTIVE(x) ((x) << S_ACTIVE)
+#define G_ACTIVE(x) (((x) >> S_ACTIVE) & M_ACTIVE)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_CONTROL 0x1200
+
+#define S_SPEED_SEL    13
+#define V_SPEED_SEL(x) ((x) << S_SPEED_SEL)
+#define F_SPEED_SEL    V_SPEED_SEL(1U)
+
+#define S_PWR_DWN    11
+#define V_PWR_DWN(x) ((x) << S_PWR_DWN)
+#define F_PWR_DWN    V_PWR_DWN(1U)
+
+#define S_DUPLEX_MODE    8
+#define V_DUPLEX_MODE(x) ((x) << S_DUPLEX_MODE)
+#define F_DUPLEX_MODE    V_DUPLEX_MODE(1U)
+
+#define S_COLLISION_TEST    7
+#define V_COLLISION_TEST(x) ((x) << S_COLLISION_TEST)
+#define F_COLLISION_TEST    V_COLLISION_TEST(1U)
+
+#define S_T6_SPEED_SEL1    6
+#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
+#define F_T6_SPEED_SEL1    V_T6_SPEED_SEL1(1U)
+
+#define A_MAC_PORT_MTIP_MODE_CTL_SEG 0x1204
+
+#define S_MODE_CTL    0
+#define M_MODE_CTL    0x3U
+#define V_MODE_CTL(x) ((x) << S_MODE_CTL)
+#define G_MODE_CTL(x) (((x) >> S_MODE_CTL) & M_MODE_CTL)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_STATUS 0x1204
+
+#define S_T6_REM_FAULT    4
+#define V_T6_REM_FAULT(x) ((x) << S_T6_REM_FAULT)
+#define F_T6_REM_FAULT    V_T6_REM_FAULT(1U)
+
+#define A_MAC_PORT_MTIP_TXCLK_CTL_SEG 0x1208
+
+#define S_TXCLK_CTL    0
+#define M_TXCLK_CTL    0xffffU
+#define V_TXCLK_CTL(x) ((x) << S_TXCLK_CTL)
+#define G_TXCLK_CTL(x) (((x) >> S_TXCLK_CTL) & M_TXCLK_CTL)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_0 0x1208
+#define A_MAC_PORT_MTIP_TX_PRMBL_CTL_SEG 0x120c
+#define A_T6_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_1 0x120c
+#define A_T6_MAC_PORT_MTIP_SGMII_DEV_ABILITY 0x1210
+#define A_T6_MAC_PORT_MTIP_SGMII_PARTNER_ABILITY 0x1214
+#define A_T6_MAC_PORT_MTIP_SGMII_AN_EXPANSION 0x1218
+
+#define S_NEXT_PAGE_ABLE    2
+#define V_NEXT_PAGE_ABLE(x) ((x) << S_NEXT_PAGE_ABLE)
+#define F_NEXT_PAGE_ABLE    V_NEXT_PAGE_ABLE(1U)
+
+#define S_PAGE_RECEIVE    1
+#define V_PAGE_RECEIVE(x) ((x) << S_PAGE_RECEIVE)
+#define F_PAGE_RECEIVE    V_PAGE_RECEIVE(1U)
+
+#define A_MAC_PORT_MTIP_SGMII_NP_TX 0x121c
+
+#define S_NP_TX    0
+#define M_NP_TX    0xffffU
+#define V_NP_TX(x) ((x) << S_NP_TX)
+#define G_NP_TX(x) (((x) >> S_NP_TX) & M_NP_TX)
+
+#define A_MAC_PORT_MTIP_WAN_RS_COL_CNT 0x1220
+
+#define S_COL_CNT    0
+#define M_COL_CNT    0xffffU
+#define V_COL_CNT(x) ((x) << S_COL_CNT)
+#define G_COL_CNT(x) (((x) >> S_COL_CNT) & M_COL_CNT)
+
+#define A_MAC_PORT_MTIP_SGMII_LP_NP_RX 0x1220
+
+#define S_LP_NP_RX    0
+#define M_LP_NP_RX    0xffffU
+#define V_LP_NP_RX(x) ((x) << S_LP_NP_RX)
+#define G_LP_NP_RX(x) (((x) >> S_LP_NP_RX) & M_LP_NP_RX)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_EXTENDED_STATUS 0x123c
+
+#define S_EXTENDED_STATUS    0
+#define M_EXTENDED_STATUS    0xffffU
+#define V_EXTENDED_STATUS(x) ((x) << S_EXTENDED_STATUS)
+#define G_EXTENDED_STATUS(x) (((x) >> S_EXTENDED_STATUS) & M_EXTENDED_STATUS)
+
+#define A_MAC_PORT_MTIP_VL_INTVL 0x1240
+
+#define S_VL_INTVL    1
+#define V_VL_INTVL(x) ((x) << S_VL_INTVL)
+#define F_VL_INTVL    V_VL_INTVL(1U)
+
+#define A_MAC_PORT_MTIP_SGMII_SCRATCH 0x1240
+
+#define S_SCRATCH    0
+#define M_SCRATCH    0xffffU
+#define V_SCRATCH(x) ((x) << S_SCRATCH)
+#define G_SCRATCH(x) (((x) >> S_SCRATCH) & M_SCRATCH)
+
+#define A_MAC_PORT_MTIP_SGMII_REV 0x1244
+
+#define S_SGMII_VER    8
+#define M_SGMII_VER    0xffU
+#define V_SGMII_VER(x) ((x) << S_SGMII_VER)
+#define G_SGMII_VER(x) (((x) >> S_SGMII_VER) & M_SGMII_VER)
+
+#define S_SGMII_REV    0
+#define M_SGMII_REV    0xffU
+#define V_SGMII_REV(x) ((x) << S_SGMII_REV)
+#define G_SGMII_REV(x) (((x) >> S_SGMII_REV) & M_SGMII_REV)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_LINK_TIMER_LO 0x1248
+
+#define S_LINK_TIMER_LO    0
+#define M_LINK_TIMER_LO    0xffffU
+#define V_LINK_TIMER_LO(x) ((x) << S_LINK_TIMER_LO)
+#define G_LINK_TIMER_LO(x) (((x) >> S_LINK_TIMER_LO) & M_LINK_TIMER_LO)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_LINK_TIMER_HI 0x124c
+
+#define S_LINK_TIMER_HI    0
+#define M_LINK_TIMER_HI    0xffffU
+#define V_LINK_TIMER_HI(x) ((x) << S_LINK_TIMER_HI)
+#define G_LINK_TIMER_HI(x) (((x) >> S_LINK_TIMER_HI) & M_LINK_TIMER_HI)
+
+#define A_T6_MAC_PORT_MTIP_SGMII_IF_MODE 0x1250
+
+#define S_SGMII_DUPLEX    4
+#define V_SGMII_DUPLEX(x) ((x) << S_SGMII_DUPLEX)
+#define F_SGMII_DUPLEX    V_SGMII_DUPLEX(1U)
+
+#define A_MAC_PORT_MTIP_SGMII_DECODE_ERROR 0x1254
+
+#define S_T6_DECODE_ERROR    0
+#define M_T6_DECODE_ERROR    0xffffU
+#define V_T6_DECODE_ERROR(x) ((x) << S_T6_DECODE_ERROR)
+#define G_T6_DECODE_ERROR(x) (((x) >> S_T6_DECODE_ERROR) & M_T6_DECODE_ERROR)
+
+#define A_MAC_PORT_MTIP_KR_PCS_CONTROL_1 0x1300
+
+#define S_LOW_POWER    11
+#define V_LOW_POWER(x) ((x) << S_LOW_POWER)
+#define F_LOW_POWER    V_LOW_POWER(1U)
+
+#define S_T6_SPEED_SEL1    6
+#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
+#define F_T6_SPEED_SEL1    V_T6_SPEED_SEL1(1U)
+
+#define S_SPEED_SEL2    2
+#define M_SPEED_SEL2    0xfU
+#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2)
+#define G_SPEED_SEL2(x) (((x) >> S_SPEED_SEL2) & M_SPEED_SEL2)
+
+#define A_MAC_PORT_MTIP_KR_PCS_STATUS_1 0x1304
+
+#define S_TX_LPI    11
+#define V_TX_LPI(x) ((x) << S_TX_LPI)
+#define F_TX_LPI    V_TX_LPI(1U)
+
+#define S_RX_LPI    10
+#define V_RX_LPI(x) ((x) << S_RX_LPI)
+#define F_RX_LPI    V_RX_LPI(1U)
+
+#define S_TX_LPI_ACTIVE    9
+#define V_TX_LPI_ACTIVE(x) ((x) << S_TX_LPI_ACTIVE)
+#define F_TX_LPI_ACTIVE    V_TX_LPI_ACTIVE(1U)
+
+#define S_RX_LPI_ACTIVE    8
+#define V_RX_LPI_ACTIVE(x) ((x) << S_RX_LPI_ACTIVE)
+#define F_RX_LPI_ACTIVE    V_RX_LPI_ACTIVE(1U)
+
+#define S_FAULT    7
+#define V_FAULT(x) ((x) << S_FAULT)
+#define F_FAULT    V_FAULT(1U)
+
+#define S_PCS_RX_LINK_STAT    2
+#define V_PCS_RX_LINK_STAT(x) ((x) << S_PCS_RX_LINK_STAT)
+#define F_PCS_RX_LINK_STAT    V_PCS_RX_LINK_STAT(1U)
+
+#define S_LOW_POWER_ABILITY    1
+#define V_LOW_POWER_ABILITY(x) ((x) << S_LOW_POWER_ABILITY)
+#define F_LOW_POWER_ABILITY    V_LOW_POWER_ABILITY(1U)
+
+#define A_MAC_PORT_MTIP_KR_PCS_DEVICE_IDENTIFIER_1 0x1308
+#define A_MAC_PORT_MTIP_KR_PCS_DEVICE_IDENTIFIER_2 0x130c
+#define A_MAC_PORT_MTIP_KR_PCS_SPEED_ABILITY 0x1310
+
+#define S_10G_CAPABLE    0
+#define V_10G_CAPABLE(x) ((x) << S_10G_CAPABLE)
+#define F_10G_CAPABLE    V_10G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_KR_PCS_DEVICES_IN_PACKAGELO 0x1314
+
+#define S_AUTO_NEGOTIATION_PRESENT    7
+#define V_AUTO_NEGOTIATION_PRESENT(x) ((x) << S_AUTO_NEGOTIATION_PRESENT)
+#define F_AUTO_NEGOTIATION_PRESENT    V_AUTO_NEGOTIATION_PRESENT(1U)
+
+#define S_DTE_XS_PRESENT    5
+#define V_DTE_XS_PRESENT(x) ((x) << S_DTE_XS_PRESENT)
+#define F_DTE_XS_PRESENT    V_DTE_XS_PRESENT(1U)
+
+#define S_PHY_XS_PRESENT    4
+#define V_PHY_XS_PRESENT(x) ((x) << S_PHY_XS_PRESENT)
+#define F_PHY_XS_PRESENT    V_PHY_XS_PRESENT(1U)
+
+#define S_PCS_PRESENT    3
+#define V_PCS_PRESENT(x) ((x) << S_PCS_PRESENT)
+#define F_PCS_PRESENT    V_PCS_PRESENT(1U)
+
+#define S_WIS_PRESENT    2
+#define V_WIS_PRESENT(x) ((x) << S_WIS_PRESENT)
+#define F_WIS_PRESENT    V_WIS_PRESENT(1U)
+
+#define S_PMD_PMA_PRESENT    1
+#define V_PMD_PMA_PRESENT(x) ((x) << S_PMD_PMA_PRESENT)
+#define F_PMD_PMA_PRESENT    V_PMD_PMA_PRESENT(1U)
+
+#define S_CLAUSE_22_REG_PRESENT    0
+#define V_CLAUSE_22_REG_PRESENT(x) ((x) << S_CLAUSE_22_REG_PRESENT)
+#define F_CLAUSE_22_REG_PRESENT    V_CLAUSE_22_REG_PRESENT(1U)
+
+#define A_MAC_PORT_MTIP_KR_PCS_DEVICES_IN_PACKAGEHI 0x1318
+#define A_MAC_PORT_MTIP_KR_PCS_CONTROL_2 0x131c
+
+#define S_PCS_TYPE_SELECTION    0
+#define M_PCS_TYPE_SELECTION    0x3U
+#define V_PCS_TYPE_SELECTION(x) ((x) << S_PCS_TYPE_SELECTION)
+#define G_PCS_TYPE_SELECTION(x) (((x) >> S_PCS_TYPE_SELECTION) & M_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_KR_PCS_STATUS_2 0x1320
+
+#define S_DEVICE_PRESENT    14
+#define M_DEVICE_PRESENT    0x3U
+#define V_DEVICE_PRESENT(x) ((x) << S_DEVICE_PRESENT)
+#define G_DEVICE_PRESENT(x) (((x) >> S_DEVICE_PRESENT) & M_DEVICE_PRESENT)
+
+#define S_TRANSMIT_FAULT    11
+#define V_TRANSMIT_FAULT(x) ((x) << S_TRANSMIT_FAULT)
+#define F_TRANSMIT_FAULT    V_TRANSMIT_FAULT(1U)
+
+#define S_RECEIVE_FAULT    10
+#define V_RECEIVE_FAULT(x) ((x) << S_RECEIVE_FAULT)
+#define F_RECEIVE_FAULT    V_RECEIVE_FAULT(1U)
+
+#define S_10GBASE_W_CAPABLE    2
+#define V_10GBASE_W_CAPABLE(x) ((x) << S_10GBASE_W_CAPABLE)
+#define F_10GBASE_W_CAPABLE    V_10GBASE_W_CAPABLE(1U)
+
+#define S_10GBASE_X_CAPABLE    1
+#define V_10GBASE_X_CAPABLE(x) ((x) << S_10GBASE_X_CAPABLE)
+#define F_10GBASE_X_CAPABLE    V_10GBASE_X_CAPABLE(1U)
+
+#define S_10GBASE_R_CAPABLE    0
+#define V_10GBASE_R_CAPABLE(x) ((x) << S_10GBASE_R_CAPABLE)
+#define F_10GBASE_R_CAPABLE    V_10GBASE_R_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_PACKAGE_IDENTIFIER_LO 0x1338
+
+#define S_PCS_PACKAGE_IDENTIFIER_LO    0
+#define M_PCS_PACKAGE_IDENTIFIER_LO    0xffffU
+#define V_PCS_PACKAGE_IDENTIFIER_LO(x) ((x) << S_PCS_PACKAGE_IDENTIFIER_LO)
+#define G_PCS_PACKAGE_IDENTIFIER_LO(x) (((x) >> S_PCS_PACKAGE_IDENTIFIER_LO) & M_PCS_PACKAGE_IDENTIFIER_LO)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_PACKAGE_IDENTIFIER_HI 0x133c
+
+#define S_PCS_PACKAGE_IDENTIFIER_HI    0
+#define M_PCS_PACKAGE_IDENTIFIER_HI    0xffffU
+#define V_PCS_PACKAGE_IDENTIFIER_HI(x) ((x) << S_PCS_PACKAGE_IDENTIFIER_HI)
+#define G_PCS_PACKAGE_IDENTIFIER_HI(x) (((x) >> S_PCS_PACKAGE_IDENTIFIER_HI) & M_PCS_PACKAGE_IDENTIFIER_HI)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_STATUS_1 0x1380
+
+#define S_10GBASE_R_RX_LINK_STATUS    12
+#define V_10GBASE_R_RX_LINK_STATUS(x) ((x) << S_10GBASE_R_RX_LINK_STATUS)
+#define F_10GBASE_R_RX_LINK_STATUS    V_10GBASE_R_RX_LINK_STATUS(1U)
+
+#define S_PRBS9_PTTRN_TSTNG_ABILITY    3
+#define V_PRBS9_PTTRN_TSTNG_ABILITY(x) ((x) << S_PRBS9_PTTRN_TSTNG_ABILITY)
+#define F_PRBS9_PTTRN_TSTNG_ABILITY    V_PRBS9_PTTRN_TSTNG_ABILITY(1U)
+
+#define S_PRBS31_PTTRN_TSTNG_ABILITY    2
+#define V_PRBS31_PTTRN_TSTNG_ABILITY(x) ((x) << S_PRBS31_PTTRN_TSTNG_ABILITY)
+#define F_PRBS31_PTTRN_TSTNG_ABILITY    V_PRBS31_PTTRN_TSTNG_ABILITY(1U)
+
+#define S_10GBASE_R_PCS_HIGH_BER    1
+#define V_10GBASE_R_PCS_HIGH_BER(x) ((x) << S_10GBASE_R_PCS_HIGH_BER)
+#define F_10GBASE_R_PCS_HIGH_BER    V_10GBASE_R_PCS_HIGH_BER(1U)
+
+#define S_10GBASE_R_PCS_BLOCK_LOCK    0
+#define V_10GBASE_R_PCS_BLOCK_LOCK(x) ((x) << S_10GBASE_R_PCS_BLOCK_LOCK)
+#define F_10GBASE_R_PCS_BLOCK_LOCK    V_10GBASE_R_PCS_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_STATUS_2 0x1384
+
+#define S_LATCHED_BLOCK_LOCK    15
+#define V_LATCHED_BLOCK_LOCK(x) ((x) << S_LATCHED_BLOCK_LOCK)
+#define F_LATCHED_BLOCK_LOCK    V_LATCHED_BLOCK_LOCK(1U)
+
+#define S_LATCHED_HIGH_BER    14
+#define V_LATCHED_HIGH_BER(x) ((x) << S_LATCHED_HIGH_BER)
+#define F_LATCHED_HIGH_BER    V_LATCHED_HIGH_BER(1U)
+
+#define S_BERBER_COUNTER    8
+#define M_BERBER_COUNTER    0x3fU
+#define V_BERBER_COUNTER(x) ((x) << S_BERBER_COUNTER)
+#define G_BERBER_COUNTER(x) (((x) >> S_BERBER_COUNTER) & M_BERBER_COUNTER)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_0 0x1388
+
+#define S_TEST_PATTERN_SEED_A0    0
+#define M_TEST_PATTERN_SEED_A0    0xffffU
+#define V_TEST_PATTERN_SEED_A0(x) ((x) << S_TEST_PATTERN_SEED_A0)
+#define G_TEST_PATTERN_SEED_A0(x) (((x) >> S_TEST_PATTERN_SEED_A0) & M_TEST_PATTERN_SEED_A0)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_1 0x138c
+
+#define S_TEST_PATTERN_SEED_A1    0
+#define M_TEST_PATTERN_SEED_A1    0xffffU
+#define V_TEST_PATTERN_SEED_A1(x) ((x) << S_TEST_PATTERN_SEED_A1)
+#define G_TEST_PATTERN_SEED_A1(x) (((x) >> S_TEST_PATTERN_SEED_A1) & M_TEST_PATTERN_SEED_A1)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_2 0x1390
+
+#define S_TEST_PATTERN_SEED_A2    0
+#define M_TEST_PATTERN_SEED_A2    0xffffU
+#define V_TEST_PATTERN_SEED_A2(x) ((x) << S_TEST_PATTERN_SEED_A2)
+#define G_TEST_PATTERN_SEED_A2(x) (((x) >> S_TEST_PATTERN_SEED_A2) & M_TEST_PATTERN_SEED_A2)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_3 0x1394
+
+#define S_TEST_PATTERN_SEED_A3    0
+#define M_TEST_PATTERN_SEED_A3    0x3ffU
+#define V_TEST_PATTERN_SEED_A3(x) ((x) << S_TEST_PATTERN_SEED_A3)
+#define G_TEST_PATTERN_SEED_A3(x) (((x) >> S_TEST_PATTERN_SEED_A3) & M_TEST_PATTERN_SEED_A3)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_0 0x1398
+
+#define S_TEST_PATTERN_SEED_B0    0
+#define M_TEST_PATTERN_SEED_B0    0xffffU
+#define V_TEST_PATTERN_SEED_B0(x) ((x) << S_TEST_PATTERN_SEED_B0)
+#define G_TEST_PATTERN_SEED_B0(x) (((x) >> S_TEST_PATTERN_SEED_B0) & M_TEST_PATTERN_SEED_B0)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_1 0x139c
+
+#define S_TEST_PATTERN_SEED_B1    0
+#define M_TEST_PATTERN_SEED_B1    0xffffU
+#define V_TEST_PATTERN_SEED_B1(x) ((x) << S_TEST_PATTERN_SEED_B1)
+#define G_TEST_PATTERN_SEED_B1(x) (((x) >> S_TEST_PATTERN_SEED_B1) & M_TEST_PATTERN_SEED_B1)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_2 0x13a0
+
+#define S_TEST_PATTERN_SEED_B2    0
+#define M_TEST_PATTERN_SEED_B2    0xffffU
+#define V_TEST_PATTERN_SEED_B2(x) ((x) << S_TEST_PATTERN_SEED_B2)
+#define G_TEST_PATTERN_SEED_B2(x) (((x) >> S_TEST_PATTERN_SEED_B2) & M_TEST_PATTERN_SEED_B2)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_3 0x13a4
+
+#define S_TEST_PATTERN_SEED_B3    0
+#define M_TEST_PATTERN_SEED_B3    0x3ffU
+#define V_TEST_PATTERN_SEED_B3(x) ((x) << S_TEST_PATTERN_SEED_B3)
+#define G_TEST_PATTERN_SEED_B3(x) (((x) >> S_TEST_PATTERN_SEED_B3) & M_TEST_PATTERN_SEED_B3)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_CONTROL 0x13a8
+
+#define S_PRBS9_TX_TST_PTTRN_EN    6
+#define V_PRBS9_TX_TST_PTTRN_EN(x) ((x) << S_PRBS9_TX_TST_PTTRN_EN)
+#define F_PRBS9_TX_TST_PTTRN_EN    V_PRBS9_TX_TST_PTTRN_EN(1U)
+
+#define S_PRBS31_RX_TST_PTTRN_EN    5
+#define V_PRBS31_RX_TST_PTTRN_EN(x) ((x) << S_PRBS31_RX_TST_PTTRN_EN)
+#define F_PRBS31_RX_TST_PTTRN_EN    V_PRBS31_RX_TST_PTTRN_EN(1U)
+
+#define S_PRBS31_TX_TST_PTTRN_EN    4
+#define V_PRBS31_TX_TST_PTTRN_EN(x) ((x) << S_PRBS31_TX_TST_PTTRN_EN)
+#define F_PRBS31_TX_TST_PTTRN_EN    V_PRBS31_TX_TST_PTTRN_EN(1U)
+
+#define S_TX_TEST_PATTERN_EN    3
+#define V_TX_TEST_PATTERN_EN(x) ((x) << S_TX_TEST_PATTERN_EN)
+#define F_TX_TEST_PATTERN_EN    V_TX_TEST_PATTERN_EN(1U)
+
+#define S_RX_TEST_PATTERN_EN    2
+#define V_RX_TEST_PATTERN_EN(x) ((x) << S_RX_TEST_PATTERN_EN)
+#define F_RX_TEST_PATTERN_EN    V_RX_TEST_PATTERN_EN(1U)
+
+#define S_TEST_PATTERN_SELECT    1
+#define V_TEST_PATTERN_SELECT(x) ((x) << S_TEST_PATTERN_SELECT)
+#define F_TEST_PATTERN_SELECT    V_TEST_PATTERN_SELECT(1U)
+
+#define S_DATA_PATTERN_SELECT    0
+#define V_DATA_PATTERN_SELECT(x) ((x) << S_DATA_PATTERN_SELECT)
+#define F_DATA_PATTERN_SELECT    V_DATA_PATTERN_SELECT(1U)
+
+#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_ERROR_COUNTER 0x13ac
+
+#define S_TEST_PATTERN_ERR_CNTR    0
+#define M_TEST_PATTERN_ERR_CNTR    0xffffU
+#define V_TEST_PATTERN_ERR_CNTR(x) ((x) << S_TEST_PATTERN_ERR_CNTR)
+#define G_TEST_PATTERN_ERR_CNTR(x) (((x) >> S_TEST_PATTERN_ERR_CNTR) & M_TEST_PATTERN_ERR_CNTR)
+
+#define A_MAC_PORT_MTIP_KR_VENDOR_SPECIFIC_PCS_STATUS 0x13b4
+
+#define S_TRANSMIT_FIFO_FAULT    1
+#define V_TRANSMIT_FIFO_FAULT(x) ((x) << S_TRANSMIT_FIFO_FAULT)
+#define F_TRANSMIT_FIFO_FAULT    V_TRANSMIT_FIFO_FAULT(1U)
+
+#define S_RECEIVE_FIFO_FAULT    0
+#define V_RECEIVE_FIFO_FAULT(x) ((x) << S_RECEIVE_FIFO_FAULT)
+#define F_RECEIVE_FIFO_FAULT    V_RECEIVE_FIFO_FAULT(1U)
+
+#define A_MAC_PORT_MTIP_KR4_CONTROL_1 0x1400
+
+#define S_SPEED_SELECTION    13
+#define V_SPEED_SELECTION(x) ((x) << S_SPEED_SELECTION)
+#define F_SPEED_SELECTION    V_SPEED_SELECTION(1U)
+
+#define S_SPEED_SELECTION1    6
+#define V_SPEED_SELECTION1(x) ((x) << S_SPEED_SELECTION1)
+#define F_SPEED_SELECTION1    V_SPEED_SELECTION1(1U)
+
+#define S_SPEED_SELECTION2    2
+#define M_SPEED_SELECTION2    0xfU
+#define V_SPEED_SELECTION2(x) ((x) << S_SPEED_SELECTION2)
+#define G_SPEED_SELECTION2(x) (((x) >> S_SPEED_SELECTION2) & M_SPEED_SELECTION2)
+
+#define A_MAC_PORT_MTIP_KR4_STATUS_1 0x1404
+
+#define S_RECEIVE_LINK_STAT    2
+#define V_RECEIVE_LINK_STAT(x) ((x) << S_RECEIVE_LINK_STAT)
+#define F_RECEIVE_LINK_STAT    V_RECEIVE_LINK_STAT(1U)
+
+#define A_MAC_PORT_MTIP_KR4_DEVICE_ID0 0x1408
+#define A_MAC_PORT_MTIP_KR4_DEVICE_ID1 0x140c
+
+#define S_T6_DEVICE_ID1    16
+#define M_T6_DEVICE_ID1    0xffffU
+#define V_T6_DEVICE_ID1(x) ((x) << S_T6_DEVICE_ID1)
+#define G_T6_DEVICE_ID1(x) (((x) >> S_T6_DEVICE_ID1) & M_T6_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_KR4_SPEED_ABILITY 0x1410
+
+#define S_100G_CAPABLE    3
+#define V_100G_CAPABLE(x) ((x) << S_100G_CAPABLE)
+#define F_100G_CAPABLE    V_100G_CAPABLE(1U)
+
+#define S_40G_CAPABLE    2
+#define V_40G_CAPABLE(x) ((x) << S_40G_CAPABLE)
+#define F_40G_CAPABLE    V_40G_CAPABLE(1U)
+
+#define S_10PASS_TS_2BASE_TL_CAPABLE    1
+#define V_10PASS_TS_2BASE_TL_CAPABLE(x) ((x) << S_10PASS_TS_2BASE_TL_CAPABLE)
+#define F_10PASS_TS_2BASE_TL_CAPABLE    V_10PASS_TS_2BASE_TL_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_KR4_DEVICES_IN_PKG1 0x1414
+
+#define S_CLAUSE_22_REG    0
+#define V_CLAUSE_22_REG(x) ((x) << S_CLAUSE_22_REG)
+#define F_CLAUSE_22_REG    V_CLAUSE_22_REG(1U)
+
+#define A_MAC_PORT_MTIP_KR4_DEVICES_IN_PKG2 0x1418
+
+#define S_VENDOR_SPECIFIC_DEVICE    15
+#define V_VENDOR_SPECIFIC_DEVICE(x) ((x) << S_VENDOR_SPECIFIC_DEVICE)
+#define F_VENDOR_SPECIFIC_DEVICE    V_VENDOR_SPECIFIC_DEVICE(1U)
+
+#define S_VENDOR_SPECIFIC_DEVICE1    14
+#define V_VENDOR_SPECIFIC_DEVICE1(x) ((x) << S_VENDOR_SPECIFIC_DEVICE1)
+#define F_VENDOR_SPECIFIC_DEVICE1    V_VENDOR_SPECIFIC_DEVICE1(1U)
+
+#define S_CLAUSE_22_EXT    13
+#define V_CLAUSE_22_EXT(x) ((x) << S_CLAUSE_22_EXT)
+#define F_CLAUSE_22_EXT    V_CLAUSE_22_EXT(1U)
+
+#define A_MAC_PORT_MTIP_KR4_CONTROL_2 0x141c
+
+#define S_PCS_TYPE_SEL    0
+#define M_PCS_TYPE_SEL    0x7U
+#define V_PCS_TYPE_SEL(x) ((x) << S_PCS_TYPE_SEL)
+#define G_PCS_TYPE_SEL(x) (((x) >> S_PCS_TYPE_SEL) & M_PCS_TYPE_SEL)
+
+#define A_MAC_PORT_MTIP_KR4_STATUS_2 0x1420
+
+#define S_100GBASE_R_CAPABLE    5
+#define V_100GBASE_R_CAPABLE(x) ((x) << S_100GBASE_R_CAPABLE)
+#define F_100GBASE_R_CAPABLE    V_100GBASE_R_CAPABLE(1U)
+
+#define S_40GBASE_R_CAPABLE    4
+#define V_40GBASE_R_CAPABLE(x) ((x) << S_40GBASE_R_CAPABLE)
+#define F_40GBASE_R_CAPABLE    V_40GBASE_R_CAPABLE(1U)
+
+#define S_10GBASE_T_CAPABLE    3
+#define V_10GBASE_T_CAPABLE(x) ((x) << S_10GBASE_T_CAPABLE)
+#define F_10GBASE_T_CAPABLE    V_10GBASE_T_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_KR4_PKG_ID0 0x1438
+#define A_MAC_PORT_MTIP_KR4_PKG_ID1 0x143c
+#define A_MAC_PORT_MTIP_KR4_BASE_R_STATUS_1 0x1480
+
+#define S_T6_RX_LINK_STATUS    12
+#define V_T6_RX_LINK_STATUS(x) ((x) << S_T6_RX_LINK_STATUS)
+#define F_T6_RX_LINK_STATUS    V_T6_RX_LINK_STATUS(1U)
+
+#define S_HIGH_BER    1
+#define V_HIGH_BER(x) ((x) << S_HIGH_BER)
+#define F_HIGH_BER    V_HIGH_BER(1U)
+
+#define S_KR4_BLOCK_LOCK    0
+#define V_KR4_BLOCK_LOCK(x) ((x) << S_KR4_BLOCK_LOCK)
+#define F_KR4_BLOCK_LOCK    V_KR4_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_KR4_BASE_R_STATUS_2 0x1484
+
+#define S_LATCHED_BL_LK    15
+#define V_LATCHED_BL_LK(x) ((x) << S_LATCHED_BL_LK)
+#define F_LATCHED_BL_LK    V_LATCHED_BL_LK(1U)
+
+#define S_LATCHED_HG_BR    14
+#define V_LATCHED_HG_BR(x) ((x) << S_LATCHED_HG_BR)
+#define F_LATCHED_HG_BR    V_LATCHED_HG_BR(1U)
+
+#define S_BER_CNT    8
+#define M_BER_CNT    0x3fU
+#define V_BER_CNT(x) ((x) << S_BER_CNT)
+#define G_BER_CNT(x) (((x) >> S_BER_CNT) & M_BER_CNT)
+
+#define S_ERR_BL_CNT    0
+#define M_ERR_BL_CNT    0xffU
+#define V_ERR_BL_CNT(x) ((x) << S_ERR_BL_CNT)
+#define G_ERR_BL_CNT(x) (((x) >> S_ERR_BL_CNT) & M_ERR_BL_CNT)
+
+#define A_MAC_PORT_MTIP_KR4_BASE_R_TEST_CONTROL 0x14a8
+
+#define S_TX_TP_EN    3
+#define V_TX_TP_EN(x) ((x) << S_TX_TP_EN)
+#define F_TX_TP_EN    V_TX_TP_EN(1U)
+
+#define S_RX_TP_EN    2
+#define V_RX_TP_EN(x) ((x) << S_RX_TP_EN)
+#define F_RX_TP_EN    V_RX_TP_EN(1U)
+
+#define A_MAC_PORT_MTIP_KR4_BASE_R_TEST_ERR_CNT 0x14ac
+
+#define S_TP_ERR_CNTR    0
+#define M_TP_ERR_CNTR    0xffffU
+#define V_TP_ERR_CNTR(x) ((x) << S_TP_ERR_CNTR)
+#define G_TP_ERR_CNTR(x) (((x) >> S_TP_ERR_CNTR) & M_TP_ERR_CNTR)
+
+#define A_MAC_PORT_MTIP_KR4_BER_HIGH_ORDER_CNT 0x14b0
+
+#define S_BER_HI_ORDER_CNT    0
+#define M_BER_HI_ORDER_CNT    0xffffU
+#define V_BER_HI_ORDER_CNT(x) ((x) << S_BER_HI_ORDER_CNT)
+#define G_BER_HI_ORDER_CNT(x) (((x) >> S_BER_HI_ORDER_CNT) & M_BER_HI_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_KR4_ERR_BLK_HIGH_ORDER_CNT 0x14b4
+
+#define S_HI_ORDER_CNT_EN    15
+#define V_HI_ORDER_CNT_EN(x) ((x) << S_HI_ORDER_CNT_EN)
+#define F_HI_ORDER_CNT_EN    V_HI_ORDER_CNT_EN(1U)
+
+#define S_ERR_BLK_CNTR    0
+#define M_ERR_BLK_CNTR    0x3fffU
+#define V_ERR_BLK_CNTR(x) ((x) << S_ERR_BLK_CNTR)
+#define G_ERR_BLK_CNTR(x) (((x) >> S_ERR_BLK_CNTR) & M_ERR_BLK_CNTR)
+
+#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_1 0x14c8
+
+#define S_LANE_ALIGN_STATUS    12
+#define V_LANE_ALIGN_STATUS(x) ((x) << S_LANE_ALIGN_STATUS)
+#define F_LANE_ALIGN_STATUS    V_LANE_ALIGN_STATUS(1U)
+
+#define S_LANE_3_BLK_LCK    3
+#define V_LANE_3_BLK_LCK(x) ((x) << S_LANE_3_BLK_LCK)
+#define F_LANE_3_BLK_LCK    V_LANE_3_BLK_LCK(1U)
+
+#define S_LANE_2_BLK_LC32_6431K    2
+#define V_LANE_2_BLK_LC32_6431K(x) ((x) << S_LANE_2_BLK_LC32_6431K)
+#define F_LANE_2_BLK_LC32_6431K    V_LANE_2_BLK_LC32_6431K(1U)
+
+#define S_LANE_1_BLK_LCK    1
+#define V_LANE_1_BLK_LCK(x) ((x) << S_LANE_1_BLK_LCK)
+#define F_LANE_1_BLK_LCK    V_LANE_1_BLK_LCK(1U)
+
+#define S_LANE_0_BLK_LCK    0
+#define V_LANE_0_BLK_LCK(x) ((x) << S_LANE_0_BLK_LCK)
+#define F_LANE_0_BLK_LCK    V_LANE_0_BLK_LCK(1U)
+
+#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_2 0x14cc
+#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_3 0x14d0
+
+#define S_LANE_3_ALIGN_MRKR_LCK    3
+#define V_LANE_3_ALIGN_MRKR_LCK(x) ((x) << S_LANE_3_ALIGN_MRKR_LCK)
+#define F_LANE_3_ALIGN_MRKR_LCK    V_LANE_3_ALIGN_MRKR_LCK(1U)
+
+#define S_LANE_2_ALIGN_MRKR_LCK    2
+#define V_LANE_2_ALIGN_MRKR_LCK(x) ((x) << S_LANE_2_ALIGN_MRKR_LCK)
+#define F_LANE_2_ALIGN_MRKR_LCK    V_LANE_2_ALIGN_MRKR_LCK(1U)
+
+#define S_LANE_1_ALIGN_MRKR_LCK    1
+#define V_LANE_1_ALIGN_MRKR_LCK(x) ((x) << S_LANE_1_ALIGN_MRKR_LCK)
+#define F_LANE_1_ALIGN_MRKR_LCK    V_LANE_1_ALIGN_MRKR_LCK(1U)
+
+#define S_LANE_0_ALIGN_MRKR_LCK    0
+#define V_LANE_0_ALIGN_MRKR_LCK(x) ((x) << S_LANE_0_ALIGN_MRKR_LCK)
+#define F_LANE_0_ALIGN_MRKR_LCK    V_LANE_0_ALIGN_MRKR_LCK(1U)
+
+#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_4 0x14d4
+#define A_MAC_PORT_MTIP_MDIO_CFG_STATUS 0x1600
+
+#define S_CLK_DIV    7
+#define M_CLK_DIV    0x1ffU
+#define V_CLK_DIV(x) ((x) << S_CLK_DIV)
+#define G_CLK_DIV(x) (((x) >> S_CLK_DIV) & M_CLK_DIV)
+
+#define S_CL45_EN    6
+#define V_CL45_EN(x) ((x) << S_CL45_EN)
+#define F_CL45_EN    V_CL45_EN(1U)
+
+#define S_DISABLE_PREAMBLE    5
+#define V_DISABLE_PREAMBLE(x) ((x) << S_DISABLE_PREAMBLE)
+#define F_DISABLE_PREAMBLE    V_DISABLE_PREAMBLE(1U)
+
+#define S_MDIO_HOLD_TIME    2
+#define M_MDIO_HOLD_TIME    0x7U
+#define V_MDIO_HOLD_TIME(x) ((x) << S_MDIO_HOLD_TIME)
+#define G_MDIO_HOLD_TIME(x) (((x) >> S_MDIO_HOLD_TIME) & M_MDIO_HOLD_TIME)
+
+#define S_MDIO_READ_ERR    1
+#define V_MDIO_READ_ERR(x) ((x) << S_MDIO_READ_ERR)
+#define F_MDIO_READ_ERR    V_MDIO_READ_ERR(1U)
+
+#define S_MDIO_BUSY    0
+#define V_MDIO_BUSY(x) ((x) << S_MDIO_BUSY)
+#define F_MDIO_BUSY    V_MDIO_BUSY(1U)
+
+#define A_MAC_PORT_MTIP_MDIO_COMMAND 0x1604
+
+#define S_MDIO_CMD_READ    15
+#define V_MDIO_CMD_READ(x) ((x) << S_MDIO_CMD_READ)
+#define F_MDIO_CMD_READ    V_MDIO_CMD_READ(1U)
+
+#define S_READ_INCR    14
+#define V_READ_INCR(x) ((x) << S_READ_INCR)
+#define F_READ_INCR    V_READ_INCR(1U)
+
+#define S_PORT_ADDR    5
+#define M_PORT_ADDR    0x1fU
+#define V_PORT_ADDR(x) ((x) << S_PORT_ADDR)
+#define G_PORT_ADDR(x) (((x) >> S_PORT_ADDR) & M_PORT_ADDR)
+
+#define S_DEV_ADDR    0
+#define M_DEV_ADDR    0x1fU
+#define V_DEV_ADDR(x) ((x) << S_DEV_ADDR)
+#define G_DEV_ADDR(x) (((x) >> S_DEV_ADDR) & M_DEV_ADDR)
+
+#define A_MAC_PORT_MTIP_MDIO_DATA 0x1608
+
+#define S_READBUSY    31
+#define V_READBUSY(x) ((x) << S_READBUSY)
+#define F_READBUSY    V_READBUSY(1U)
+
+#define S_DATA_WORD    0
+#define M_DATA_WORD    0xffffU
+#define V_DATA_WORD(x) ((x) << S_DATA_WORD)
+#define G_DATA_WORD(x) (((x) >> S_DATA_WORD) & M_DATA_WORD)
+
+#define A_MAC_PORT_MTIP_MDIO_REGADDR 0x160c
+
+#define S_MDIO_ADDR    0
+#define M_MDIO_ADDR    0xffffU
+#define V_MDIO_ADDR(x) ((x) << S_MDIO_ADDR)
+#define G_MDIO_ADDR(x) (((x) >> S_MDIO_ADDR) & M_MDIO_ADDR)
+
+#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_0 0x1720
+
+#define S_BIP_ERR_CNT_LANE_0    0
+#define M_BIP_ERR_CNT_LANE_0    0xffffU
+#define V_BIP_ERR_CNT_LANE_0(x) ((x) << S_BIP_ERR_CNT_LANE_0)
+#define G_BIP_ERR_CNT_LANE_0(x) (((x) >> S_BIP_ERR_CNT_LANE_0) & M_BIP_ERR_CNT_LANE_0)
+
+#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_1 0x1724
+
+#define S_BIP_ERR_CNT_LANE_1    0
+#define M_BIP_ERR_CNT_LANE_1    0xffffU
+#define V_BIP_ERR_CNT_LANE_1(x) ((x) << S_BIP_ERR_CNT_LANE_1)
+#define G_BIP_ERR_CNT_LANE_1(x) (((x) >> S_BIP_ERR_CNT_LANE_1) & M_BIP_ERR_CNT_LANE_1)
+
+#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_2 0x1728
+
+#define S_BIP_ERR_CNT_LANE_2    0
+#define M_BIP_ERR_CNT_LANE_2    0xffffU
+#define V_BIP_ERR_CNT_LANE_2(x) ((x) << S_BIP_ERR_CNT_LANE_2)
+#define G_BIP_ERR_CNT_LANE_2(x) (((x) >> S_BIP_ERR_CNT_LANE_2) & M_BIP_ERR_CNT_LANE_2)
+
+#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_3 0x172c
+
+#define S_BIP_ERR_CNT_LANE_3    0
+#define M_BIP_ERR_CNT_LANE_3    0xffffU
+#define V_BIP_ERR_CNT_LANE_3(x) ((x) << S_BIP_ERR_CNT_LANE_3)
+#define G_BIP_ERR_CNT_LANE_3(x) (((x) >> S_BIP_ERR_CNT_LANE_3) & M_BIP_ERR_CNT_LANE_3)
+
+#define A_MAC_PORT_MTIP_VLAN_TPID_0 0x1a00
+
+#define S_VLANTAG    0
+#define CXGBE_M_VLANTAG    0xffffU
+#define V_VLANTAG(x) ((x) << S_VLANTAG)
+#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG)
+
+#define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04
+#define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08
+#define A_MAC_PORT_MTIP_VLAN_TPID_3 0x1a0c
+#define A_MAC_PORT_MTIP_VLAN_TPID_4 0x1a10
+#define A_MAC_PORT_MTIP_VLAN_TPID_5 0x1a14
+#define A_MAC_PORT_MTIP_VLAN_TPID_6 0x1a18
+#define A_MAC_PORT_MTIP_VLAN_TPID_7 0x1a1c
+#define A_MAC_PORT_MTIP_KR4_LANE_0_MAPPING 0x1a40
+
+#define S_KR4_LANE_0_MAPPING    0
+#define M_KR4_LANE_0_MAPPING    0x3U
+#define V_KR4_LANE_0_MAPPING(x) ((x) << S_KR4_LANE_0_MAPPING)
+#define G_KR4_LANE_0_MAPPING(x) (((x) >> S_KR4_LANE_0_MAPPING) & M_KR4_LANE_0_MAPPING)
+
+#define A_MAC_PORT_MTIP_KR4_LANE_1_MAPPING 0x1a44
+
+#define S_KR4_LANE_1_MAPPING    0
+#define M_KR4_LANE_1_MAPPING    0x3U
+#define V_KR4_LANE_1_MAPPING(x) ((x) << S_KR4_LANE_1_MAPPING)
+#define G_KR4_LANE_1_MAPPING(x) (((x) >> S_KR4_LANE_1_MAPPING) & M_KR4_LANE_1_MAPPING)
+
+#define A_MAC_PORT_MTIP_KR4_LANE_2_MAPPING 0x1a48
+
+#define S_KR4_LANE_2_MAPPING    0
+#define M_KR4_LANE_2_MAPPING    0x3U
+#define V_KR4_LANE_2_MAPPING(x) ((x) << S_KR4_LANE_2_MAPPING)
+#define G_KR4_LANE_2_MAPPING(x) (((x) >> S_KR4_LANE_2_MAPPING) & M_KR4_LANE_2_MAPPING)
+
+#define A_MAC_PORT_MTIP_KR4_LANE_3_MAPPING 0x1a4c
+
+#define S_KR4_LANE_3_MAPPING    0
+#define M_KR4_LANE_3_MAPPING    0x3U
+#define V_KR4_LANE_3_MAPPING(x) ((x) << S_KR4_LANE_3_MAPPING)
+#define G_KR4_LANE_3_MAPPING(x) (((x) >> S_KR4_LANE_3_MAPPING) & M_KR4_LANE_3_MAPPING)
+
+#define A_MAC_PORT_MTIP_KR4_SCRATCH 0x1af0
+#define A_MAC_PORT_MTIP_KR4_CORE_REVISION 0x1af4
+#define A_MAC_PORT_MTIP_KR4_VL_INTVL 0x1af8
+
+#define S_SHRT_MRKR_CNFG    0
+#define V_SHRT_MRKR_CNFG(x) ((x) << S_SHRT_MRKR_CNFG)
+#define F_SHRT_MRKR_CNFG    V_SHRT_MRKR_CNFG(1U)
+
+#define A_MAC_PORT_MTIP_KR4_TX_LANE_THRESH 0x1afc
+#define A_MAC_PORT_MTIP_CR4_CONTROL_1 0x1b00
+#define A_MAC_PORT_MTIP_CR4_STATUS_1 0x1b04
+
+#define S_CR4_RX_LINK_STATUS    2
+#define V_CR4_RX_LINK_STATUS(x) ((x) << S_CR4_RX_LINK_STATUS)
+#define F_CR4_RX_LINK_STATUS    V_CR4_RX_LINK_STATUS(1U)
+
+#define A_MAC_PORT_MTIP_CR4_DEVICE_ID0 0x1b08
+
+#define S_CR4_DEVICE_ID0    0
+#define M_CR4_DEVICE_ID0    0xffffU
+#define V_CR4_DEVICE_ID0(x) ((x) << S_CR4_DEVICE_ID0)
+#define G_CR4_DEVICE_ID0(x) (((x) >> S_CR4_DEVICE_ID0) & M_CR4_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_DEVICE_ID1 0x1b0c
+
+#define S_CR4_DEVICE_ID1    0
+#define M_CR4_DEVICE_ID1    0xffffU
+#define V_CR4_DEVICE_ID1(x) ((x) << S_CR4_DEVICE_ID1)
+#define G_CR4_DEVICE_ID1(x) (((x) >> S_CR4_DEVICE_ID1) & M_CR4_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_SPEED_ABILITY 0x1b10
+
+#define S_CR4_100G_CAPABLE    8
+#define V_CR4_100G_CAPABLE(x) ((x) << S_CR4_100G_CAPABLE)
+#define F_CR4_100G_CAPABLE    V_CR4_100G_CAPABLE(1U)
+
+#define S_CR4_40G_CAPABLE    7
+#define V_CR4_40G_CAPABLE(x) ((x) << S_CR4_40G_CAPABLE)
+#define F_CR4_40G_CAPABLE    V_CR4_40G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_DEVICES_IN_PKG1 0x1b14
+
+#define S_CLAUSE22REG_PRESENT    0
+#define V_CLAUSE22REG_PRESENT(x) ((x) << S_CLAUSE22REG_PRESENT)
+#define F_CLAUSE22REG_PRESENT    V_CLAUSE22REG_PRESENT(1U)
+
+#define A_MAC_PORT_MTIP_CR4_DEVICES_IN_PKG2 0x1b18
+
+#define S_VSD_2_PRESENT    15
+#define V_VSD_2_PRESENT(x) ((x) << S_VSD_2_PRESENT)
+#define F_VSD_2_PRESENT    V_VSD_2_PRESENT(1U)
+
+#define S_VSD_1_PRESENT    14
+#define V_VSD_1_PRESENT(x) ((x) << S_VSD_1_PRESENT)
+#define F_VSD_1_PRESENT    V_VSD_1_PRESENT(1U)
+
+#define S_CLAUSE22_EXT_PRESENT    13
+#define V_CLAUSE22_EXT_PRESENT(x) ((x) << S_CLAUSE22_EXT_PRESENT)
+#define F_CLAUSE22_EXT_PRESENT    V_CLAUSE22_EXT_PRESENT(1U)
+
+#define A_MAC_PORT_MTIP_CR4_CONTROL_2 0x1b1c
+
+#define S_CR4_PCS_TYPE_SELECTION    0
+#define M_CR4_PCS_TYPE_SELECTION    0x7U
+#define V_CR4_PCS_TYPE_SELECTION(x) ((x) << S_CR4_PCS_TYPE_SELECTION)
+#define G_CR4_PCS_TYPE_SELECTION(x) (((x) >> S_CR4_PCS_TYPE_SELECTION) & M_CR4_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_CR4_STATUS_2 0x1b20
+#define A_MAC_PORT_MTIP_CR4_PKG_ID0 0x1b38
+#define A_MAC_PORT_MTIP_CR4_PKG_ID1 0x1b3c
+#define A_MAC_PORT_MTIP_CR4_BASE_R_STATUS_1 0x1b80
+
+#define S_RX_LINK_STAT    12
+#define V_RX_LINK_STAT(x) ((x) << S_RX_LINK_STAT)
+#define F_RX_LINK_STAT    V_RX_LINK_STAT(1U)
+
+#define S_BR_BLOCK_LOCK    0
+#define V_BR_BLOCK_LOCK(x) ((x) << S_BR_BLOCK_LOCK)
+#define F_BR_BLOCK_LOCK    V_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_BASE_R_STATUS_2 0x1b84
+
+#define S_BER_COUNTER    8
+#define M_BER_COUNTER    0x3fU
+#define V_BER_COUNTER(x) ((x) << S_BER_COUNTER)
+#define G_BER_COUNTER(x) (((x) >> S_BER_COUNTER) & M_BER_COUNTER)
+
+#define S_ERRORED_BLOCKS_CNTR    0
+#define M_ERRORED_BLOCKS_CNTR    0xffU
+#define V_ERRORED_BLOCKS_CNTR(x) ((x) << S_ERRORED_BLOCKS_CNTR)
+#define G_ERRORED_BLOCKS_CNTR(x) (((x) >> S_ERRORED_BLOCKS_CNTR) & M_ERRORED_BLOCKS_CNTR)
+
+#define A_MAC_PORT_MTIP_CR4_BASE_R_TEST_CONTROL 0x1ba8
+
+#define S_SCRAMBLED_ID_TP_EN    7
+#define V_SCRAMBLED_ID_TP_EN(x) ((x) << S_SCRAMBLED_ID_TP_EN)
+#define F_SCRAMBLED_ID_TP_EN    V_SCRAMBLED_ID_TP_EN(1U)
+
+#define A_MAC_PORT_MTIP_CR4_BASE_R_TEST_ERR_CNT 0x1bac
+
+#define S_BASE_R_TEST_ERR_CNT    0
+#define M_BASE_R_TEST_ERR_CNT    0xffffU
+#define V_BASE_R_TEST_ERR_CNT(x) ((x) << S_BASE_R_TEST_ERR_CNT)
+#define G_BASE_R_TEST_ERR_CNT(x) (((x) >> S_BASE_R_TEST_ERR_CNT) & M_BASE_R_TEST_ERR_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_BER_HIGH_ORDER_CNT 0x1bb0
+
+#define S_BER_HIGH_ORDER_CNT    0
+#define M_BER_HIGH_ORDER_CNT    0xffffU
+#define V_BER_HIGH_ORDER_CNT(x) ((x) << S_BER_HIGH_ORDER_CNT)
+#define G_BER_HIGH_ORDER_CNT(x) (((x) >> S_BER_HIGH_ORDER_CNT) & M_BER_HIGH_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_ERR_BLK_HIGH_ORDER_CNT 0x1bb4
+
+#define S_HI_ORDER_CNT_PRESENT    15
+#define V_HI_ORDER_CNT_PRESENT(x) ((x) << S_HI_ORDER_CNT_PRESENT)
+#define F_HI_ORDER_CNT_PRESENT    V_HI_ORDER_CNT_PRESENT(1U)
+
+#define S_ERR_BLKS_CNTR    0
+#define M_ERR_BLKS_CNTR    0x3fffU
+#define V_ERR_BLKS_CNTR(x) ((x) << S_ERR_BLKS_CNTR)
+#define G_ERR_BLKS_CNTR(x) (((x) >> S_ERR_BLKS_CNTR) & M_ERR_BLKS_CNTR)
+
+#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_1 0x1bc8
+
+#define S_LANE_ALIGN_STAT    12
+#define V_LANE_ALIGN_STAT(x) ((x) << S_LANE_ALIGN_STAT)
+#define F_LANE_ALIGN_STAT    V_LANE_ALIGN_STAT(1U)
+
+#define S_LANE_7_BLCK_LCK    7
+#define V_LANE_7_BLCK_LCK(x) ((x) << S_LANE_7_BLCK_LCK)
+#define F_LANE_7_BLCK_LCK    V_LANE_7_BLCK_LCK(1U)
+
+#define S_LANE_6_BLCK_LCK    6
+#define V_LANE_6_BLCK_LCK(x) ((x) << S_LANE_6_BLCK_LCK)
+#define F_LANE_6_BLCK_LCK    V_LANE_6_BLCK_LCK(1U)
+
+#define S_LANE_5_BLCK_LCK    5
+#define V_LANE_5_BLCK_LCK(x) ((x) << S_LANE_5_BLCK_LCK)
+#define F_LANE_5_BLCK_LCK    V_LANE_5_BLCK_LCK(1U)
+
+#define S_LANE_4_BLCK_LCK    4
+#define V_LANE_4_BLCK_LCK(x) ((x) << S_LANE_4_BLCK_LCK)
+#define F_LANE_4_BLCK_LCK    V_LANE_4_BLCK_LCK(1U)
+
+#define S_LANE_3_BLCK_LCK    3
+#define V_LANE_3_BLCK_LCK(x) ((x) << S_LANE_3_BLCK_LCK)
+#define F_LANE_3_BLCK_LCK    V_LANE_3_BLCK_LCK(1U)
+
+#define S_LANE_2_BLCK_LCK    2
+#define V_LANE_2_BLCK_LCK(x) ((x) << S_LANE_2_BLCK_LCK)
+#define F_LANE_2_BLCK_LCK    V_LANE_2_BLCK_LCK(1U)
+
+#define S_LANE_1_BLCK_LCK    1
+#define V_LANE_1_BLCK_LCK(x) ((x) << S_LANE_1_BLCK_LCK)
+#define F_LANE_1_BLCK_LCK    V_LANE_1_BLCK_LCK(1U)
+
+#define S_LANE_0_BLCK_LCK    0
+#define V_LANE_0_BLCK_LCK(x) ((x) << S_LANE_0_BLCK_LCK)
+#define F_LANE_0_BLCK_LCK    V_LANE_0_BLCK_LCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_2 0x1bcc
+
+#define S_LANE_19_BLCK_LCK    11
+#define V_LANE_19_BLCK_LCK(x) ((x) << S_LANE_19_BLCK_LCK)
+#define F_LANE_19_BLCK_LCK    V_LANE_19_BLCK_LCK(1U)
+
+#define S_LANE_18_BLCK_LCK    10
+#define V_LANE_18_BLCK_LCK(x) ((x) << S_LANE_18_BLCK_LCK)
+#define F_LANE_18_BLCK_LCK    V_LANE_18_BLCK_LCK(1U)
+
+#define S_LANE_17_BLCK_LCK    9
+#define V_LANE_17_BLCK_LCK(x) ((x) << S_LANE_17_BLCK_LCK)
+#define F_LANE_17_BLCK_LCK    V_LANE_17_BLCK_LCK(1U)
+
+#define S_LANE_16_BLCK_LCK    8
+#define V_LANE_16_BLCK_LCK(x) ((x) << S_LANE_16_BLCK_LCK)
+#define F_LANE_16_BLCK_LCK    V_LANE_16_BLCK_LCK(1U)
+
+#define S_LANE_15_BLCK_LCK    7
+#define V_LANE_15_BLCK_LCK(x) ((x) << S_LANE_15_BLCK_LCK)
+#define F_LANE_15_BLCK_LCK    V_LANE_15_BLCK_LCK(1U)
+
+#define S_LANE_14_BLCK_LCK    6
+#define V_LANE_14_BLCK_LCK(x) ((x) << S_LANE_14_BLCK_LCK)
+#define F_LANE_14_BLCK_LCK    V_LANE_14_BLCK_LCK(1U)
+
+#define S_LANE_13_BLCK_LCK    5
+#define V_LANE_13_BLCK_LCK(x) ((x) << S_LANE_13_BLCK_LCK)
+#define F_LANE_13_BLCK_LCK    V_LANE_13_BLCK_LCK(1U)
+
+#define S_LANE_12_BLCK_LCK    4
+#define V_LANE_12_BLCK_LCK(x) ((x) << S_LANE_12_BLCK_LCK)
+#define F_LANE_12_BLCK_LCK    V_LANE_12_BLCK_LCK(1U)
+
+#define S_LANE_11_BLCK_LCK    3
+#define V_LANE_11_BLCK_LCK(x) ((x) << S_LANE_11_BLCK_LCK)
+#define F_LANE_11_BLCK_LCK    V_LANE_11_BLCK_LCK(1U)
+
+#define S_LANE_10_BLCK_LCK    2
+#define V_LANE_10_BLCK_LCK(x) ((x) << S_LANE_10_BLCK_LCK)
+#define F_LANE_10_BLCK_LCK    V_LANE_10_BLCK_LCK(1U)
+
+#define S_LANE_9_BLCK_LCK    1
+#define V_LANE_9_BLCK_LCK(x) ((x) << S_LANE_9_BLCK_LCK)
+#define F_LANE_9_BLCK_LCK    V_LANE_9_BLCK_LCK(1U)
+
+#define S_LANE_8_BLCK_LCK    0
+#define V_LANE_8_BLCK_LCK(x) ((x) << S_LANE_8_BLCK_LCK)
+#define F_LANE_8_BLCK_LCK    V_LANE_8_BLCK_LCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_3 0x1bd0
+
+#define S_LANE7_ALGN_MRKR_LCK    7
+#define V_LANE7_ALGN_MRKR_LCK(x) ((x) << S_LANE7_ALGN_MRKR_LCK)
+#define F_LANE7_ALGN_MRKR_LCK    V_LANE7_ALGN_MRKR_LCK(1U)
+
+#define S_LANE6_ALGN_MRKR_LCK    6
+#define V_LANE6_ALGN_MRKR_LCK(x) ((x) << S_LANE6_ALGN_MRKR_LCK)
+#define F_LANE6_ALGN_MRKR_LCK    V_LANE6_ALGN_MRKR_LCK(1U)
+
+#define S_LANE5_ALGN_MRKR_LCK    5
+#define V_LANE5_ALGN_MRKR_LCK(x) ((x) << S_LANE5_ALGN_MRKR_LCK)
+#define F_LANE5_ALGN_MRKR_LCK    V_LANE5_ALGN_MRKR_LCK(1U)
+
+#define S_LANE4_ALGN_MRKR_LCK    4
+#define V_LANE4_ALGN_MRKR_LCK(x) ((x) << S_LANE4_ALGN_MRKR_LCK)
+#define F_LANE4_ALGN_MRKR_LCK    V_LANE4_ALGN_MRKR_LCK(1U)
+
+#define S_LANE3_ALGN_MRKR_LCK    3
+#define V_LANE3_ALGN_MRKR_LCK(x) ((x) << S_LANE3_ALGN_MRKR_LCK)
+#define F_LANE3_ALGN_MRKR_LCK    V_LANE3_ALGN_MRKR_LCK(1U)
+
+#define S_LANE2_ALGN_MRKR_LCK    2
+#define V_LANE2_ALGN_MRKR_LCK(x) ((x) << S_LANE2_ALGN_MRKR_LCK)
+#define F_LANE2_ALGN_MRKR_LCK    V_LANE2_ALGN_MRKR_LCK(1U)
+
+#define S_LANE1_ALGN_MRKR_LCK    1
+#define V_LANE1_ALGN_MRKR_LCK(x) ((x) << S_LANE1_ALGN_MRKR_LCK)
+#define F_LANE1_ALGN_MRKR_LCK    V_LANE1_ALGN_MRKR_LCK(1U)
+
+#define S_LANE0_ALGN_MRKR_LCK    0
+#define V_LANE0_ALGN_MRKR_LCK(x) ((x) << S_LANE0_ALGN_MRKR_LCK)
+#define F_LANE0_ALGN_MRKR_LCK    V_LANE0_ALGN_MRKR_LCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_4 0x1bd4
+
+#define S_LANE19_ALGN_MRKR_LCK    11
+#define V_LANE19_ALGN_MRKR_LCK(x) ((x) << S_LANE19_ALGN_MRKR_LCK)
+#define F_LANE19_ALGN_MRKR_LCK    V_LANE19_ALGN_MRKR_LCK(1U)
+
+#define S_LANE18_ALGN_MRKR_LCK    10
+#define V_LANE18_ALGN_MRKR_LCK(x) ((x) << S_LANE18_ALGN_MRKR_LCK)
+#define F_LANE18_ALGN_MRKR_LCK    V_LANE18_ALGN_MRKR_LCK(1U)
+
+#define S_LANE17_ALGN_MRKR_LCK    9
+#define V_LANE17_ALGN_MRKR_LCK(x) ((x) << S_LANE17_ALGN_MRKR_LCK)
+#define F_LANE17_ALGN_MRKR_LCK    V_LANE17_ALGN_MRKR_LCK(1U)
+
+#define S_LANE16_ALGN_MRKR_LCK    8
+#define V_LANE16_ALGN_MRKR_LCK(x) ((x) << S_LANE16_ALGN_MRKR_LCK)
+#define F_LANE16_ALGN_MRKR_LCK    V_LANE16_ALGN_MRKR_LCK(1U)
+
+#define S_LANE15_ALGN_MRKR_LCK    7
+#define V_LANE15_ALGN_MRKR_LCK(x) ((x) << S_LANE15_ALGN_MRKR_LCK)
+#define F_LANE15_ALGN_MRKR_LCK    V_LANE15_ALGN_MRKR_LCK(1U)
+
+#define S_LANE14_ALGN_MRKR_LCK    6
+#define V_LANE14_ALGN_MRKR_LCK(x) ((x) << S_LANE14_ALGN_MRKR_LCK)
+#define F_LANE14_ALGN_MRKR_LCK    V_LANE14_ALGN_MRKR_LCK(1U)
+
+#define S_LANE13_ALGN_MRKR_LCK    5
+#define V_LANE13_ALGN_MRKR_LCK(x) ((x) << S_LANE13_ALGN_MRKR_LCK)
+#define F_LANE13_ALGN_MRKR_LCK    V_LANE13_ALGN_MRKR_LCK(1U)
+
+#define S_LANE12_ALGN_MRKR_LCK    4
+#define V_LANE12_ALGN_MRKR_LCK(x) ((x) << S_LANE12_ALGN_MRKR_LCK)
+#define F_LANE12_ALGN_MRKR_LCK    V_LANE12_ALGN_MRKR_LCK(1U)
+
+#define S_LANE11_ALGN_MRKR_LCK    3
+#define V_LANE11_ALGN_MRKR_LCK(x) ((x) << S_LANE11_ALGN_MRKR_LCK)
+#define F_LANE11_ALGN_MRKR_LCK    V_LANE11_ALGN_MRKR_LCK(1U)
+
+#define S_LANE10_ALGN_MRKR_LCK    2
+#define V_LANE10_ALGN_MRKR_LCK(x) ((x) << S_LANE10_ALGN_MRKR_LCK)
+#define F_LANE10_ALGN_MRKR_LCK    V_LANE10_ALGN_MRKR_LCK(1U)
+
+#define S_LANE9_ALGN_MRKR_LCK    1
+#define V_LANE9_ALGN_MRKR_LCK(x) ((x) << S_LANE9_ALGN_MRKR_LCK)
+#define F_LANE9_ALGN_MRKR_LCK    V_LANE9_ALGN_MRKR_LCK(1U)
+
+#define S_LANE8_ALGN_MRKR_LCK    0
+#define V_LANE8_ALGN_MRKR_LCK(x) ((x) << S_LANE8_ALGN_MRKR_LCK)
+#define F_LANE8_ALGN_MRKR_LCK    V_LANE8_ALGN_MRKR_LCK(1U)
+
+#define A_MAC_PORT_MTIP_PCS_CTL 0x1e00
+
+#define S_PCS_LPBK    14
+#define V_PCS_LPBK(x) ((x) << S_PCS_LPBK)
+#define F_PCS_LPBK    V_PCS_LPBK(1U)
+
+#define S_SPEED_SEL1    13
+#define V_SPEED_SEL1(x) ((x) << S_SPEED_SEL1)
+#define F_SPEED_SEL1    V_SPEED_SEL1(1U)
+
+#define S_LP_MODE    11
+#define V_LP_MODE(x) ((x) << S_LP_MODE)
+#define F_LP_MODE    V_LP_MODE(1U)
+
+#define S_SPEED_SEL0    6
+#define V_SPEED_SEL0(x) ((x) << S_SPEED_SEL0)
+#define F_SPEED_SEL0    V_SPEED_SEL0(1U)
+
+#define S_PCS_SPEED    2
+#define M_PCS_SPEED    0xfU
+#define V_PCS_SPEED(x) ((x) << S_PCS_SPEED)
+#define G_PCS_SPEED(x) (((x) >> S_PCS_SPEED) & M_PCS_SPEED)
+
+#define A_MAC_PORT_MTIP_PCS_STATUS1 0x1e04
+
+#define S_FAULTDET    7
+#define V_FAULTDET(x) ((x) << S_FAULTDET)
+#define F_FAULTDET    V_FAULTDET(1U)
+
+#define S_RX_LINK_STATUS    2
+#define V_RX_LINK_STATUS(x) ((x) << S_RX_LINK_STATUS)
+#define F_RX_LINK_STATUS    V_RX_LINK_STATUS(1U)
+
+#define S_LOPWRABL    1
+#define V_LOPWRABL(x) ((x) << S_LOPWRABL)
+#define F_LOPWRABL    V_LOPWRABL(1U)
+
+#define A_MAC_PORT_MTIP_PCS_DEVICE_ID0 0x1e08
+
+#define S_DEVICE_ID0    0
+#define M_DEVICE_ID0    0xffffU
+#define V_DEVICE_ID0(x) ((x) << S_DEVICE_ID0)
+#define G_DEVICE_ID0(x) (((x) >> S_DEVICE_ID0) & M_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_PCS_DEVICE_ID1 0x1e0c
+
+#define S_DEVICE_ID1    0
+#define M_DEVICE_ID1    0xffffU
+#define V_DEVICE_ID1(x) ((x) << S_DEVICE_ID1)
+#define G_DEVICE_ID1(x) (((x) >> S_DEVICE_ID1) & M_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_PCS_SPEED_ABILITY 0x1e10
+
+#define S_100G    8
+#define V_100G(x) ((x) << S_100G)
+#define F_100G    V_100G(1U)
+
+#define S_40G    7
+#define V_40G(x) ((x) << S_40G)
+#define F_40G    V_40G(1U)
+
+#define S_10BASE_TL    1
+#define V_10BASE_TL(x) ((x) << S_10BASE_TL)
+#define F_10BASE_TL    V_10BASE_TL(1U)
+
+#define S_10G    0
+#define V_10G(x) ((x) << S_10G)
+#define F_10G    V_10G(1U)
+
+#define A_MAC_PORT_MTIP_PCS_DEVICE_PKG1 0x1e14
+
+#define S_TC_PRESENT    6
+#define V_TC_PRESENT(x) ((x) << S_TC_PRESENT)
+#define F_TC_PRESENT    V_TC_PRESENT(1U)
+
+#define S_DTEXS    5
+#define V_DTEXS(x) ((x) << S_DTEXS)
+#define F_DTEXS    V_DTEXS(1U)
+
+#define S_PHYXS    4
+#define V_PHYXS(x) ((x) << S_PHYXS)
+#define F_PHYXS    V_PHYXS(1U)
+
+#define S_PCS    3
+#define V_PCS(x) ((x) << S_PCS)
+#define F_PCS    V_PCS(1U)
+
+#define S_WIS    2
+#define V_WIS(x) ((x) << S_WIS)
+#define F_WIS    V_WIS(1U)
+
+#define S_PMD_PMA    1
+#define V_PMD_PMA(x) ((x) << S_PMD_PMA)
+#define F_PMD_PMA    V_PMD_PMA(1U)
+
+#define S_CL22    0
+#define V_CL22(x) ((x) << S_CL22)
+#define F_CL22    V_CL22(1U)
+
+#define A_MAC_PORT_MTIP_PCS_DEVICE_PKG2 0x1e18
+
+#define S_VENDDEV2    15
+#define V_VENDDEV2(x) ((x) << S_VENDDEV2)
+#define F_VENDDEV2    V_VENDDEV2(1U)
+
+#define S_VENDDEV1    14
+#define V_VENDDEV1(x) ((x) << S_VENDDEV1)
+#define F_VENDDEV1    V_VENDDEV1(1U)
+
+#define S_CL22EXT    13
+#define V_CL22EXT(x) ((x) << S_CL22EXT)
+#define F_CL22EXT    V_CL22EXT(1U)
+
+#define A_MAC_PORT_MTIP_PCS_CTL2 0x1e1c
+
+#define S_PCSTYPE    0
+#define M_PCSTYPE    0x7U
+#define V_PCSTYPE(x) ((x) << S_PCSTYPE)
+#define G_PCSTYPE(x) (((x) >> S_PCSTYPE) & M_PCSTYPE)
+
+#define A_MAC_PORT_MTIP_PCS_STATUS2 0x1e20
+
+#define S_PCS_STAT2_DEVICE    15
+#define V_PCS_STAT2_DEVICE(x) ((x) << S_PCS_STAT2_DEVICE)
+#define F_PCS_STAT2_DEVICE    V_PCS_STAT2_DEVICE(1U)
+
+#define S_TXFAULT    7
+#define V_TXFAULT(x) ((x) << S_TXFAULT)
+#define F_TXFAULT    V_TXFAULT(1U)
+
+#define S_RXFAULT    6
+#define V_RXFAULT(x) ((x) << S_RXFAULT)
+#define F_RXFAULT    V_RXFAULT(1U)
+
+#define S_100BASE_R    5
+#define V_100BASE_R(x) ((x) << S_100BASE_R)
+#define F_100BASE_R    V_100BASE_R(1U)
+
+#define S_40GBASE_R    4
+#define V_40GBASE_R(x) ((x) << S_40GBASE_R)
+#define F_40GBASE_R    V_40GBASE_R(1U)
+
+#define S_10GBASE_T    3
+#define V_10GBASE_T(x) ((x) << S_10GBASE_T)
+#define F_10GBASE_T    V_10GBASE_T(1U)
+
+#define S_10GBASE_W    2
+#define V_10GBASE_W(x) ((x) << S_10GBASE_W)
+#define F_10GBASE_W    V_10GBASE_W(1U)
+
+#define S_10GBASE_X    1
+#define V_10GBASE_X(x) ((x) << S_10GBASE_X)
+#define F_10GBASE_X    V_10GBASE_X(1U)
+
+#define S_10GBASE_R    0
+#define V_10GBASE_R(x) ((x) << S_10GBASE_R)
+#define F_10GBASE_R    V_10GBASE_R(1U)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_0 0x1e20
+
+#define S_BIP_ERR_CNTLANE_0    0
+#define M_BIP_ERR_CNTLANE_0    0xffffU
+#define V_BIP_ERR_CNTLANE_0(x) ((x) << S_BIP_ERR_CNTLANE_0)
+#define G_BIP_ERR_CNTLANE_0(x) (((x) >> S_BIP_ERR_CNTLANE_0) & M_BIP_ERR_CNTLANE_0)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_1 0x1e24
+
+#define S_BIP_ERR_CNTLANE_1    0
+#define M_BIP_ERR_CNTLANE_1    0xffffU
+#define V_BIP_ERR_CNTLANE_1(x) ((x) << S_BIP_ERR_CNTLANE_1)
+#define G_BIP_ERR_CNTLANE_1(x) (((x) >> S_BIP_ERR_CNTLANE_1) & M_BIP_ERR_CNTLANE_1)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_2 0x1e28
+
+#define S_BIP_ERR_CNTLANE_2    0
+#define M_BIP_ERR_CNTLANE_2    0xffffU
+#define V_BIP_ERR_CNTLANE_2(x) ((x) << S_BIP_ERR_CNTLANE_2)
+#define G_BIP_ERR_CNTLANE_2(x) (((x) >> S_BIP_ERR_CNTLANE_2) & M_BIP_ERR_CNTLANE_2)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_3 0x1e2c
+
+#define S_BIP_ERR_CNTLANE_3    0
+#define M_BIP_ERR_CNTLANE_3    0xffffU
+#define V_BIP_ERR_CNTLANE_3(x) ((x) << S_BIP_ERR_CNTLANE_3)
+#define G_BIP_ERR_CNTLANE_3(x) (((x) >> S_BIP_ERR_CNTLANE_3) & M_BIP_ERR_CNTLANE_3)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_4 0x1e30
+
+#define S_BIP_ERR_CNTLANE_4    0
+#define M_BIP_ERR_CNTLANE_4    0xffffU
+#define V_BIP_ERR_CNTLANE_4(x) ((x) << S_BIP_ERR_CNTLANE_4)
+#define G_BIP_ERR_CNTLANE_4(x) (((x) >> S_BIP_ERR_CNTLANE_4) & M_BIP_ERR_CNTLANE_4)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_5 0x1e34
+
+#define S_BIP_ERR_CNTLANE_5    0
+#define M_BIP_ERR_CNTLANE_5    0xffffU
+#define V_BIP_ERR_CNTLANE_5(x) ((x) << S_BIP_ERR_CNTLANE_5)
+#define G_BIP_ERR_CNTLANE_5(x) (((x) >> S_BIP_ERR_CNTLANE_5) & M_BIP_ERR_CNTLANE_5)
+
+#define A_MAC_PORT_MTIP_PCS_PKG_ID0 0x1e38
+
+#define S_PKG_ID0    0
+#define M_PKG_ID0    0xffffU
+#define V_PKG_ID0(x) ((x) << S_PKG_ID0)
+#define G_PKG_ID0(x) (((x) >> S_PKG_ID0) & M_PKG_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_6 0x1e38
+
+#define S_BIP_ERR_CNTLANE_6    0
+#define M_BIP_ERR_CNTLANE_6    0xffffU
+#define V_BIP_ERR_CNTLANE_6(x) ((x) << S_BIP_ERR_CNTLANE_6)
+#define G_BIP_ERR_CNTLANE_6(x) (((x) >> S_BIP_ERR_CNTLANE_6) & M_BIP_ERR_CNTLANE_6)
+
+#define A_MAC_PORT_MTIP_PCS_PKG_ID1 0x1e3c
+
+#define S_PKG_ID1    0
+#define M_PKG_ID1    0xffffU
+#define V_PKG_ID1(x) ((x) << S_PKG_ID1)
+#define G_PKG_ID1(x) (((x) >> S_PKG_ID1) & M_PKG_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_7 0x1e3c
+
+#define S_BIP_ERR_CNTLANE_7    0
+#define M_BIP_ERR_CNTLANE_7    0xffffU
+#define V_BIP_ERR_CNTLANE_7(x) ((x) << S_BIP_ERR_CNTLANE_7)
+#define G_BIP_ERR_CNTLANE_7(x) (((x) >> S_BIP_ERR_CNTLANE_7) & M_BIP_ERR_CNTLANE_7)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_8 0x1e40
+
+#define S_BIP_ERR_CNTLANE_8    0
+#define M_BIP_ERR_CNTLANE_8    0xffffU
+#define V_BIP_ERR_CNTLANE_8(x) ((x) << S_BIP_ERR_CNTLANE_8)
+#define G_BIP_ERR_CNTLANE_8(x) (((x) >> S_BIP_ERR_CNTLANE_8) & M_BIP_ERR_CNTLANE_8)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_9 0x1e44
+
+#define S_BIP_ERR_CNTLANE_9    0
+#define M_BIP_ERR_CNTLANE_9    0xffffU
+#define V_BIP_ERR_CNTLANE_9(x) ((x) << S_BIP_ERR_CNTLANE_9)
+#define G_BIP_ERR_CNTLANE_9(x) (((x) >> S_BIP_ERR_CNTLANE_9) & M_BIP_ERR_CNTLANE_9)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_10 0x1e48
+
+#define S_BIP_ERR_CNTLANE_10    0
+#define M_BIP_ERR_CNTLANE_10    0xffffU
+#define V_BIP_ERR_CNTLANE_10(x) ((x) << S_BIP_ERR_CNTLANE_10)
+#define G_BIP_ERR_CNTLANE_10(x) (((x) >> S_BIP_ERR_CNTLANE_10) & M_BIP_ERR_CNTLANE_10)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_11 0x1e4c
+
+#define S_BIP_ERR_CNTLANE_11    0
+#define M_BIP_ERR_CNTLANE_11    0xffffU
+#define V_BIP_ERR_CNTLANE_11(x) ((x) << S_BIP_ERR_CNTLANE_11)
+#define G_BIP_ERR_CNTLANE_11(x) (((x) >> S_BIP_ERR_CNTLANE_11) & M_BIP_ERR_CNTLANE_11)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_12 0x1e50
+
+#define S_BIP_ERR_CNTLANE_12    0
+#define M_BIP_ERR_CNTLANE_12    0xffffU
+#define V_BIP_ERR_CNTLANE_12(x) ((x) << S_BIP_ERR_CNTLANE_12)
+#define G_BIP_ERR_CNTLANE_12(x) (((x) >> S_BIP_ERR_CNTLANE_12) & M_BIP_ERR_CNTLANE_12)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_13 0x1e54
+
+#define S_BIP_ERR_CNTLANE_13    0
+#define M_BIP_ERR_CNTLANE_13    0xffffU
+#define V_BIP_ERR_CNTLANE_13(x) ((x) << S_BIP_ERR_CNTLANE_13)
+#define G_BIP_ERR_CNTLANE_13(x) (((x) >> S_BIP_ERR_CNTLANE_13) & M_BIP_ERR_CNTLANE_13)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_14 0x1e58
+
+#define S_BIP_ERR_CNTLANE_14    0
+#define M_BIP_ERR_CNTLANE_14    0xffffU
+#define V_BIP_ERR_CNTLANE_14(x) ((x) << S_BIP_ERR_CNTLANE_14)
+#define G_BIP_ERR_CNTLANE_14(x) (((x) >> S_BIP_ERR_CNTLANE_14) & M_BIP_ERR_CNTLANE_14)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_15 0x1e5c
+
+#define S_BIP_ERR_CNTLANE_15    0
+#define M_BIP_ERR_CNTLANE_15    0xffffU
+#define V_BIP_ERR_CNTLANE_15(x) ((x) << S_BIP_ERR_CNTLANE_15)
+#define G_BIP_ERR_CNTLANE_15(x) (((x) >> S_BIP_ERR_CNTLANE_15) & M_BIP_ERR_CNTLANE_15)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_16 0x1e60
+
+#define S_BIP_ERR_CNTLANE_16    0
+#define M_BIP_ERR_CNTLANE_16    0xffffU
+#define V_BIP_ERR_CNTLANE_16(x) ((x) << S_BIP_ERR_CNTLANE_16)
+#define G_BIP_ERR_CNTLANE_16(x) (((x) >> S_BIP_ERR_CNTLANE_16) & M_BIP_ERR_CNTLANE_16)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_17 0x1e64
+
+#define S_BIP_ERR_CNTLANE_17    0
+#define M_BIP_ERR_CNTLANE_17    0xffffU
+#define V_BIP_ERR_CNTLANE_17(x) ((x) << S_BIP_ERR_CNTLANE_17)
+#define G_BIP_ERR_CNTLANE_17(x) (((x) >> S_BIP_ERR_CNTLANE_17) & M_BIP_ERR_CNTLANE_17)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_18 0x1e68
+
+#define S_BIP_ERR_CNTLANE_18    0
+#define M_BIP_ERR_CNTLANE_18    0xffffU
+#define V_BIP_ERR_CNTLANE_18(x) ((x) << S_BIP_ERR_CNTLANE_18)
+#define G_BIP_ERR_CNTLANE_18(x) (((x) >> S_BIP_ERR_CNTLANE_18) & M_BIP_ERR_CNTLANE_18)
+
+#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_19 0x1e6c
+
+#define S_BIP_ERR_CNTLANE_19    0
+#define M_BIP_ERR_CNTLANE_19    0xffffU
+#define V_BIP_ERR_CNTLANE_19(x) ((x) << S_BIP_ERR_CNTLANE_19)
+#define G_BIP_ERR_CNTLANE_19(x) (((x) >> S_BIP_ERR_CNTLANE_19) & M_BIP_ERR_CNTLANE_19)
+
+#define A_MAC_PORT_MTIP_PCS_BASER_STATUS1 0x1e80
+
+#define S_RXLINKSTATUS    12
+#define V_RXLINKSTATUS(x) ((x) << S_RXLINKSTATUS)
+#define F_RXLINKSTATUS    V_RXLINKSTATUS(1U)
+
+#define S_RESEREVED    4
+#define M_RESEREVED    0xffU
+#define V_RESEREVED(x) ((x) << S_RESEREVED)
+#define G_RESEREVED(x) (((x) >> S_RESEREVED) & M_RESEREVED)
+
+#define S_10GPRBS9    3
+#define V_10GPRBS9(x) ((x) << S_10GPRBS9)
+#define F_10GPRBS9    V_10GPRBS9(1U)
+
+#define S_10GPRBS31    2
+#define V_10GPRBS31(x) ((x) << S_10GPRBS31)
+#define F_10GPRBS31    V_10GPRBS31(1U)
+
+#define S_HIBER    1
+#define V_HIBER(x) ((x) << S_HIBER)
+#define F_HIBER    V_HIBER(1U)
+
+#define S_BLOCKLOCK    0
+#define V_BLOCKLOCK(x) ((x) << S_BLOCKLOCK)
+#define F_BLOCKLOCK    V_BLOCKLOCK(1U)
+
+#define A_MAC_PORT_MTIP_PCS_BASER_STATUS2 0x1e84
+
+#define S_BLOCKLOCKLL    15
+#define V_BLOCKLOCKLL(x) ((x) << S_BLOCKLOCKLL)
+#define F_BLOCKLOCKLL    V_BLOCKLOCKLL(1U)
+
+#define S_HIBERLH    14
+#define V_HIBERLH(x) ((x) << S_HIBERLH)
+#define F_HIBERLH    V_HIBERLH(1U)
+
+#define S_HIBERCOUNT    8
+#define M_HIBERCOUNT    0x3fU
+#define V_HIBERCOUNT(x) ((x) << S_HIBERCOUNT)
+#define G_HIBERCOUNT(x) (((x) >> S_HIBERCOUNT) & M_HIBERCOUNT)
+
+#define S_ERRBLKCNT    0
+#define M_ERRBLKCNT    0xffU
+#define V_ERRBLKCNT(x) ((x) << S_ERRBLKCNT)
+#define G_ERRBLKCNT(x) (((x) >> S_ERRBLKCNT) & M_ERRBLKCNT)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_A 0x1e88
+
+#define S_SEEDA    0
+#define M_SEEDA    0xffffU
+#define V_SEEDA(x) ((x) << S_SEEDA)
+#define G_SEEDA(x) (((x) >> S_SEEDA) & M_SEEDA)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_A1 0x1e8c
+
+#define S_SEEDA1    0
+#define M_SEEDA1    0xffffU
+#define V_SEEDA1(x) ((x) << S_SEEDA1)
+#define G_SEEDA1(x) (((x) >> S_SEEDA1) & M_SEEDA1)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_A2 0x1e90
+
+#define S_SEEDA2    0
+#define M_SEEDA2    0xffffU
+#define V_SEEDA2(x) ((x) << S_SEEDA2)
+#define G_SEEDA2(x) (((x) >> S_SEEDA2) & M_SEEDA2)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_A3 0x1e94
+
+#define S_SEEDA3    0
+#define M_SEEDA3    0x3ffU
+#define V_SEEDA3(x) ((x) << S_SEEDA3)
+#define G_SEEDA3(x) (((x) >> S_SEEDA3) & M_SEEDA3)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_B 0x1e98
+
+#define S_SEEDB    0
+#define M_SEEDB    0xffffU
+#define V_SEEDB(x) ((x) << S_SEEDB)
+#define G_SEEDB(x) (((x) >> S_SEEDB) & M_SEEDB)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_B1 0x1e9c
+
+#define S_SEEDB1    0
+#define M_SEEDB1    0xffffU
+#define V_SEEDB1(x) ((x) << S_SEEDB1)
+#define G_SEEDB1(x) (((x) >> S_SEEDB1) & M_SEEDB1)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_B2 0x1ea0
+
+#define S_SEEDB2    0
+#define M_SEEDB2    0xffffU
+#define V_SEEDB2(x) ((x) << S_SEEDB2)
+#define G_SEEDB2(x) (((x) >> S_SEEDB2) & M_SEEDB2)
+
+#define A_MAC_PORT_MTIP_10GBASER_SEED_B3 0x1ea4
+
+#define S_SEEDB3    0
+#define M_SEEDB3    0x3ffU
+#define V_SEEDB3(x) ((x) << S_SEEDB3)
+#define G_SEEDB3(x) (((x) >> S_SEEDB3) & M_SEEDB3)
+
+#define A_MAC_PORT_MTIP_BASER_TEST_CTRL 0x1ea8
+
+#define S_TXPRBS9    6
+#define V_TXPRBS9(x) ((x) << S_TXPRBS9)
+#define F_TXPRBS9    V_TXPRBS9(1U)
+
+#define S_RXPRBS31    5
+#define V_RXPRBS31(x) ((x) << S_RXPRBS31)
+#define F_RXPRBS31    V_RXPRBS31(1U)
+
+#define S_TXPRBS31    4
+#define V_TXPRBS31(x) ((x) << S_TXPRBS31)
+#define F_TXPRBS31    V_TXPRBS31(1U)
+
+#define S_TXTESTPATEN    3
+#define V_TXTESTPATEN(x) ((x) << S_TXTESTPATEN)
+#define F_TXTESTPATEN    V_TXTESTPATEN(1U)
+
+#define S_RXTESTPATEN    2
+#define V_RXTESTPATEN(x) ((x) << S_RXTESTPATEN)
+#define F_RXTESTPATEN    V_RXTESTPATEN(1U)
+
+#define S_TESTPATSEL    1
+#define V_TESTPATSEL(x) ((x) << S_TESTPATSEL)
+#define F_TESTPATSEL    V_TESTPATSEL(1U)
+
+#define S_DATAPATSEL    0
+#define V_DATAPATSEL(x) ((x) << S_DATAPATSEL)
+#define F_DATAPATSEL    V_DATAPATSEL(1U)
+
+#define A_MAC_PORT_MTIP_BASER_TEST_ERR_CNT 0x1eac
+
+#define S_TEST_ERR_CNT    0
+#define M_TEST_ERR_CNT    0xffffU
+#define V_TEST_ERR_CNT(x) ((x) << S_TEST_ERR_CNT)
+#define G_TEST_ERR_CNT(x) (((x) >> S_TEST_ERR_CNT) & M_TEST_ERR_CNT)
+
+#define A_MAC_PORT_MTIP_BER_HIGH_ORDER_CNT 0x1eb0
+
+#define S_BER_CNT_HI    0
+#define M_BER_CNT_HI    0xffffU
+#define V_BER_CNT_HI(x) ((x) << S_BER_CNT_HI)
+#define G_BER_CNT_HI(x) (((x) >> S_BER_CNT_HI) & M_BER_CNT_HI)
+
+#define A_MAC_PORT_MTIP_BLK_HIGH_ORDER_CNT 0x1eb4
+
+#define S_HICOUNTPRSNT    15
+#define V_HICOUNTPRSNT(x) ((x) << S_HICOUNTPRSNT)
+#define F_HICOUNTPRSNT    V_HICOUNTPRSNT(1U)
+
+#define S_BLOCK_CNT_HI    0
+#define M_BLOCK_CNT_HI    0x3fffU
+#define V_BLOCK_CNT_HI(x) ((x) << S_BLOCK_CNT_HI)
+#define G_BLOCK_CNT_HI(x) (((x) >> S_BLOCK_CNT_HI) & M_BLOCK_CNT_HI)
+
+#define A_MAC_PORT_MTIP_PCS_MULTI_LANE_ALIGN_STATUS1 0x1ec8
+
+#define S_ALIGNSTATUS    12
+#define V_ALIGNSTATUS(x) ((x) << S_ALIGNSTATUS)
+#define F_ALIGNSTATUS    V_ALIGNSTATUS(1U)
+
+#define S_LANE7    7
+#define V_LANE7(x) ((x) << S_LANE7)
+#define F_LANE7    V_LANE7(1U)
+
+#define S_LANE6    6
+#define V_LANE6(x) ((x) << S_LANE6)
+#define F_LANE6    V_LANE6(1U)
+
+#define S_LANE5    5
+#define V_LANE5(x) ((x) << S_LANE5)
+#define F_LANE5    V_LANE5(1U)
+
+#define S_LANE4    4
+#define V_LANE4(x) ((x) << S_LANE4)
+#define F_LANE4    V_LANE4(1U)
+
+#define S_LANE3    3
+#define V_LANE3(x) ((x) << S_LANE3)
+#define F_LANE3    V_LANE3(1U)
+
+#define S_LANE2    2
+#define V_LANE2(x) ((x) << S_LANE2)
+#define F_LANE2    V_LANE2(1U)
+
+#define S_LANE1    1
+#define V_LANE1(x) ((x) << S_LANE1)
+#define F_LANE1    V_LANE1(1U)
+
+#define S_LANE0    0
+#define V_LANE0(x) ((x) << S_LANE0)
+#define F_LANE0    V_LANE0(1U)
+
+#define A_MAC_PORT_MTIP_PCS_MULTI_LANE_ALIGN_STATUS2 0x1ecc
+
+#define S_LANE19    11
+#define V_LANE19(x) ((x) << S_LANE19)
+#define F_LANE19    V_LANE19(1U)
+
+#define S_LANE18    10
+#define V_LANE18(x) ((x) << S_LANE18)
+#define F_LANE18    V_LANE18(1U)
+
+#define S_LANE17    9
+#define V_LANE17(x) ((x) << S_LANE17)
+#define F_LANE17    V_LANE17(1U)
+
+#define S_LANE16    8
+#define V_LANE16(x) ((x) << S_LANE16)
+#define F_LANE16    V_LANE16(1U)
+
+#define S_LANE15    7
+#define V_LANE15(x) ((x) << S_LANE15)
+#define F_LANE15    V_LANE15(1U)
+
+#define S_LANE14    6
+#define V_LANE14(x) ((x) << S_LANE14)
+#define F_LANE14    V_LANE14(1U)
+
+#define S_LANE13    5
+#define V_LANE13(x) ((x) << S_LANE13)
+#define F_LANE13    V_LANE13(1U)
+
+#define S_LANE12    4
+#define V_LANE12(x) ((x) << S_LANE12)
+#define F_LANE12    V_LANE12(1U)
+
+#define S_LANE11    3
+#define V_LANE11(x) ((x) << S_LANE11)
+#define F_LANE11    V_LANE11(1U)
+
+#define S_LANE10    2
+#define V_LANE10(x) ((x) << S_LANE10)
+#define F_LANE10    V_LANE10(1U)
+
+#define S_LANE9    1
+#define V_LANE9(x) ((x) << S_LANE9)
+#define F_LANE9    V_LANE9(1U)
+
+#define S_LANE8    0
+#define V_LANE8(x) ((x) << S_LANE8)
+#define F_LANE8    V_LANE8(1U)
+
+#define A_MAC_PORT_MTIP_PCS_MULTI_LANE_ALIGN_STATUS3 0x1ed0
+
+#define S_AMLOCK7    7
+#define V_AMLOCK7(x) ((x) << S_AMLOCK7)
+#define F_AMLOCK7    V_AMLOCK7(1U)
+
+#define S_AMLOCK6    6
+#define V_AMLOCK6(x) ((x) << S_AMLOCK6)
+#define F_AMLOCK6    V_AMLOCK6(1U)
+
+#define S_AMLOCK5    5
+#define V_AMLOCK5(x) ((x) << S_AMLOCK5)
+#define F_AMLOCK5    V_AMLOCK5(1U)
+
+#define S_AMLOCK4    4
+#define V_AMLOCK4(x) ((x) << S_AMLOCK4)
+#define F_AMLOCK4    V_AMLOCK4(1U)
+
+#define S_AMLOCK3    3
+#define V_AMLOCK3(x) ((x) << S_AMLOCK3)
+#define F_AMLOCK3    V_AMLOCK3(1U)
+
+#define S_AMLOCK2    2
+#define V_AMLOCK2(x) ((x) << S_AMLOCK2)
+#define F_AMLOCK2    V_AMLOCK2(1U)
+
+#define S_AMLOCK1    1
+#define V_AMLOCK1(x) ((x) << S_AMLOCK1)
+#define F_AMLOCK1    V_AMLOCK1(1U)
+
+#define S_AMLOCK0    0
+#define V_AMLOCK0(x) ((x) << S_AMLOCK0)
+#define F_AMLOCK0    V_AMLOCK0(1U)
+
+#define A_MAC_PORT_MTIP_PCS_MULTI_LANE_ALIGN_STATUS4 0x1ed4
+
+#define S_AMLOCK19    11
+#define V_AMLOCK19(x) ((x) << S_AMLOCK19)
+#define F_AMLOCK19    V_AMLOCK19(1U)
+
+#define S_AMLOCK18    10
+#define V_AMLOCK18(x) ((x) << S_AMLOCK18)
+#define F_AMLOCK18    V_AMLOCK18(1U)
+
+#define S_AMLOCK17    9
+#define V_AMLOCK17(x) ((x) << S_AMLOCK17)
+#define F_AMLOCK17    V_AMLOCK17(1U)
+
+#define S_AMLOCK16    8
+#define V_AMLOCK16(x) ((x) << S_AMLOCK16)
+#define F_AMLOCK16    V_AMLOCK16(1U)
+
+#define S_AMLOCK15    7
+#define V_AMLOCK15(x) ((x) << S_AMLOCK15)
+#define F_AMLOCK15    V_AMLOCK15(1U)
+
+#define S_AMLOCK14    6
+#define V_AMLOCK14(x) ((x) << S_AMLOCK14)
+#define F_AMLOCK14    V_AMLOCK14(1U)
+
+#define S_AMLOCK13    5
+#define V_AMLOCK13(x) ((x) << S_AMLOCK13)
+#define F_AMLOCK13    V_AMLOCK13(1U)
+
+#define S_AMLOCK12    4
+#define V_AMLOCK12(x) ((x) << S_AMLOCK12)
+#define F_AMLOCK12    V_AMLOCK12(1U)
+
+#define S_AMLOCK11    3
+#define V_AMLOCK11(x) ((x) << S_AMLOCK11)
+#define F_AMLOCK11    V_AMLOCK11(1U)
+
+#define S_AMLOCK10    2
+#define V_AMLOCK10(x) ((x) << S_AMLOCK10)
+#define F_AMLOCK10    V_AMLOCK10(1U)
+
+#define S_AMLOCK9    1
+#define V_AMLOCK9(x) ((x) << S_AMLOCK9)
+#define F_AMLOCK9    V_AMLOCK9(1U)
+
+#define S_AMLOCK8    0
+#define V_AMLOCK8(x) ((x) << S_AMLOCK8)
+#define F_AMLOCK8    V_AMLOCK8(1U)
+
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_0 0x1f68
+
+#define S_BIPERR_CNT    0
+#define M_BIPERR_CNT    0xffffU
+#define V_BIPERR_CNT(x) ((x) << S_BIPERR_CNT)
+#define G_BIPERR_CNT(x) (((x) >> S_BIPERR_CNT) & M_BIPERR_CNT)
+
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_1 0x1f6c
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_2 0x1f70
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_3 0x1f74
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_4 0x1f78
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_5 0x1f7c
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_6 0x1f80
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_7 0x1f84
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_8 0x1f88
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_9 0x1f8c
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_10 0x1f90
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_11 0x1f94
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_12 0x1f98
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_13 0x1f9c
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_14 0x1fa0
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_15 0x1fa4
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_16 0x1fa8
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_17 0x1fac
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_18 0x1fb0
+#define A_MAC_PORT_MTIP_PCS_BIP_ERR_CNT_19 0x1fb4
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_0 0x1fb8
+
+#define S_MAP    0
+#define M_MAP    0x1fU
+#define V_MAP(x) ((x) << S_MAP)
+#define G_MAP(x) (((x) >> S_MAP) & M_MAP)
+
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_1 0x1fbc
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_2 0x1fc0
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_3 0x1fc4
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_4 0x1fc8
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_5 0x1fcc
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_6 0x1fd0
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_7 0x1fd4
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_8 0x1fd8
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_9 0x1fdc
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_10 0x1fe0
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_11 0x1fe4
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_12 0x1fe8
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_13 0x1fec
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_14 0x1ff0
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_15 0x1ff4
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_16 0x1ff8
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_17 0x1ffc
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_18 0x2000
+#define A_MAC_PORT_MTIP_PCS_LANE_MAP_19 0x2004
+#define A_MAC_PORT_MTIP_CR4_LANE_0_MAPPING 0x2140
+
+#define S_LANE_0_MAPPING    0
+#define M_LANE_0_MAPPING    0x3fU
+#define V_LANE_0_MAPPING(x) ((x) << S_LANE_0_MAPPING)
+#define G_LANE_0_MAPPING(x) (((x) >> S_LANE_0_MAPPING) & M_LANE_0_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_1_MAPPING 0x2144
+
+#define S_LANE_1_MAPPING    0
+#define M_LANE_1_MAPPING    0x3fU
+#define V_LANE_1_MAPPING(x) ((x) << S_LANE_1_MAPPING)
+#define G_LANE_1_MAPPING(x) (((x) >> S_LANE_1_MAPPING) & M_LANE_1_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_2_MAPPING 0x2148
+
+#define S_LANE_2_MAPPING    0
+#define M_LANE_2_MAPPING    0x3fU
+#define V_LANE_2_MAPPING(x) ((x) << S_LANE_2_MAPPING)
+#define G_LANE_2_MAPPING(x) (((x) >> S_LANE_2_MAPPING) & M_LANE_2_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_3_MAPPING 0x214c
+
+#define S_LANE_3_MAPPING    0
+#define M_LANE_3_MAPPING    0x3fU
+#define V_LANE_3_MAPPING(x) ((x) << S_LANE_3_MAPPING)
+#define G_LANE_3_MAPPING(x) (((x) >> S_LANE_3_MAPPING) & M_LANE_3_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_4_MAPPING 0x2150
+
+#define S_LANE_4_MAPPING    0
+#define M_LANE_4_MAPPING    0x3fU
+#define V_LANE_4_MAPPING(x) ((x) << S_LANE_4_MAPPING)
+#define G_LANE_4_MAPPING(x) (((x) >> S_LANE_4_MAPPING) & M_LANE_4_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_5_MAPPING 0x2154
+
+#define S_LANE_5_MAPPING    0
+#define M_LANE_5_MAPPING    0x3fU
+#define V_LANE_5_MAPPING(x) ((x) << S_LANE_5_MAPPING)
+#define G_LANE_5_MAPPING(x) (((x) >> S_LANE_5_MAPPING) & M_LANE_5_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_6_MAPPING 0x2158
+
+#define S_LANE_6_MAPPING    0
+#define M_LANE_6_MAPPING    0x3fU
+#define V_LANE_6_MAPPING(x) ((x) << S_LANE_6_MAPPING)
+#define G_LANE_6_MAPPING(x) (((x) >> S_LANE_6_MAPPING) & M_LANE_6_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_7_MAPPING 0x215c
+
+#define S_LANE_7_MAPPING    0
+#define M_LANE_7_MAPPING    0x3fU
+#define V_LANE_7_MAPPING(x) ((x) << S_LANE_7_MAPPING)
+#define G_LANE_7_MAPPING(x) (((x) >> S_LANE_7_MAPPING) & M_LANE_7_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_8_MAPPING 0x2160
+
+#define S_LANE_8_MAPPING    0
+#define M_LANE_8_MAPPING    0x3fU
+#define V_LANE_8_MAPPING(x) ((x) << S_LANE_8_MAPPING)
+#define G_LANE_8_MAPPING(x) (((x) >> S_LANE_8_MAPPING) & M_LANE_8_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_9_MAPPING 0x2164
+
+#define S_LANE_9_MAPPING    0
+#define M_LANE_9_MAPPING    0x3fU
+#define V_LANE_9_MAPPING(x) ((x) << S_LANE_9_MAPPING)
+#define G_LANE_9_MAPPING(x) (((x) >> S_LANE_9_MAPPING) & M_LANE_9_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_10_MAPPING 0x2168
+
+#define S_LANE_10_MAPPING    0
+#define M_LANE_10_MAPPING    0x3fU
+#define V_LANE_10_MAPPING(x) ((x) << S_LANE_10_MAPPING)
+#define G_LANE_10_MAPPING(x) (((x) >> S_LANE_10_MAPPING) & M_LANE_10_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_11_MAPPING 0x216c
+
+#define S_LANE_11_MAPPING    0
+#define M_LANE_11_MAPPING    0x3fU
+#define V_LANE_11_MAPPING(x) ((x) << S_LANE_11_MAPPING)
+#define G_LANE_11_MAPPING(x) (((x) >> S_LANE_11_MAPPING) & M_LANE_11_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_12_MAPPING 0x2170
+
+#define S_LANE_12_MAPPING    0
+#define M_LANE_12_MAPPING    0x3fU
+#define V_LANE_12_MAPPING(x) ((x) << S_LANE_12_MAPPING)
+#define G_LANE_12_MAPPING(x) (((x) >> S_LANE_12_MAPPING) & M_LANE_12_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_13_MAPPING 0x2174
+
+#define S_LANE_13_MAPPING    0
+#define M_LANE_13_MAPPING    0x3fU
+#define V_LANE_13_MAPPING(x) ((x) << S_LANE_13_MAPPING)
+#define G_LANE_13_MAPPING(x) (((x) >> S_LANE_13_MAPPING) & M_LANE_13_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_14_MAPPING 0x2178
+
+#define S_LANE_14_MAPPING    0
+#define M_LANE_14_MAPPING    0x3fU
+#define V_LANE_14_MAPPING(x) ((x) << S_LANE_14_MAPPING)
+#define G_LANE_14_MAPPING(x) (((x) >> S_LANE_14_MAPPING) & M_LANE_14_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_15_MAPPING 0x217c
+
+#define S_LANE_15_MAPPING    0
+#define M_LANE_15_MAPPING    0x3fU
+#define V_LANE_15_MAPPING(x) ((x) << S_LANE_15_MAPPING)
+#define G_LANE_15_MAPPING(x) (((x) >> S_LANE_15_MAPPING) & M_LANE_15_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_16_MAPPING 0x2180
+
+#define S_LANE_16_MAPPING    0
+#define M_LANE_16_MAPPING    0x3fU
+#define V_LANE_16_MAPPING(x) ((x) << S_LANE_16_MAPPING)
+#define G_LANE_16_MAPPING(x) (((x) >> S_LANE_16_MAPPING) & M_LANE_16_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_17_MAPPING 0x2184
+
+#define S_LANE_17_MAPPING    0
+#define M_LANE_17_MAPPING    0x3fU
+#define V_LANE_17_MAPPING(x) ((x) << S_LANE_17_MAPPING)
+#define G_LANE_17_MAPPING(x) (((x) >> S_LANE_17_MAPPING) & M_LANE_17_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_18_MAPPING 0x2188
+
+#define S_LANE_18_MAPPING    0
+#define M_LANE_18_MAPPING    0x3fU
+#define V_LANE_18_MAPPING(x) ((x) << S_LANE_18_MAPPING)
+#define G_LANE_18_MAPPING(x) (((x) >> S_LANE_18_MAPPING) & M_LANE_18_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_LANE_19_MAPPING 0x218c
+
+#define S_LANE_19_MAPPING    0
+#define M_LANE_19_MAPPING    0x3fU
+#define V_LANE_19_MAPPING(x) ((x) << S_LANE_19_MAPPING)
+#define G_LANE_19_MAPPING(x) (((x) >> S_LANE_19_MAPPING) & M_LANE_19_MAPPING)
+
+#define A_MAC_PORT_MTIP_CR4_SCRATCH 0x21f0
+#define A_MAC_PORT_MTIP_CR4_CORE_REVISION 0x21f4
+
+#define S_CORE_REVISION    0
+#define M_CORE_REVISION    0xffffU
+#define V_CORE_REVISION(x) ((x) << S_CORE_REVISION)
+#define G_CORE_REVISION(x) (((x) >> S_CORE_REVISION) & M_CORE_REVISION)
+
+#define A_MAC_PORT_BEAN_CTL 0x2200
+
+#define S_AN_RESET    15
+#define V_AN_RESET(x) ((x) << S_AN_RESET)
+#define F_AN_RESET    V_AN_RESET(1U)
+
+#define S_EXT_NXP_CTRL    13
+#define V_EXT_NXP_CTRL(x) ((x) << S_EXT_NXP_CTRL)
+#define F_EXT_NXP_CTRL    V_EXT_NXP_CTRL(1U)
+
+#define S_BEAN_EN    12
+#define V_BEAN_EN(x) ((x) << S_BEAN_EN)
+#define F_BEAN_EN    V_BEAN_EN(1U)
+
+#define S_RESTART_BEAN    9
+#define V_RESTART_BEAN(x) ((x) << S_RESTART_BEAN)
+#define F_RESTART_BEAN    V_RESTART_BEAN(1U)
+
+#define A_MAC_PORT_MTIP_RS_FEC_CONTROL 0x2200
+
+#define S_RS_FEC_BYPASS_ERROR_INDICATION    1
+#define V_RS_FEC_BYPASS_ERROR_INDICATION(x) ((x) << S_RS_FEC_BYPASS_ERROR_INDICATION)
+#define F_RS_FEC_BYPASS_ERROR_INDICATION    V_RS_FEC_BYPASS_ERROR_INDICATION(1U)
+
+#define S_RS_FEC_BYPASS_CORRECTION    0
+#define V_RS_FEC_BYPASS_CORRECTION(x) ((x) << S_RS_FEC_BYPASS_CORRECTION)
+#define F_RS_FEC_BYPASS_CORRECTION    V_RS_FEC_BYPASS_CORRECTION(1U)
+
+#define A_MAC_PORT_BEAN_STATUS 0x2204
+
+#define S_PDF    9
+#define V_PDF(x) ((x) << S_PDF)
+#define F_PDF    V_PDF(1U)
+
+#define S_EXT_NXP_STATUS    7
+#define V_EXT_NXP_STATUS(x) ((x) << S_EXT_NXP_STATUS)
+#define F_EXT_NXP_STATUS    V_EXT_NXP_STATUS(1U)
+
+#define S_PAGE_RCVD    6
+#define V_PAGE_RCVD(x) ((x) << S_PAGE_RCVD)
+#define F_PAGE_RCVD    V_PAGE_RCVD(1U)
+
+#define S_BEAN_COMPLETE    5
+#define V_BEAN_COMPLETE(x) ((x) << S_BEAN_COMPLETE)
+#define F_BEAN_COMPLETE    V_BEAN_COMPLETE(1U)
+
+#define S_REM_FAULT_STATUS    4
+#define V_REM_FAULT_STATUS(x) ((x) << S_REM_FAULT_STATUS)
+#define F_REM_FAULT_STATUS    V_REM_FAULT_STATUS(1U)
+
+#define S_BEAN_ABILITY    3
+#define V_BEAN_ABILITY(x) ((x) << S_BEAN_ABILITY)
+#define F_BEAN_ABILITY    V_BEAN_ABILITY(1U)
+
+#define S_LP_BEAN_ABILITY    0
+#define V_LP_BEAN_ABILITY(x) ((x) << S_LP_BEAN_ABILITY)
+#define F_LP_BEAN_ABILITY    V_LP_BEAN_ABILITY(1U)
+
+#define A_MAC_PORT_MTIP_RS_FEC_STATUS 0x2204
+
+#define S_RS_FEC_PCS_ALIGN_STATUS    15
+#define V_RS_FEC_PCS_ALIGN_STATUS(x) ((x) << S_RS_FEC_PCS_ALIGN_STATUS)
+#define F_RS_FEC_PCS_ALIGN_STATUS    V_RS_FEC_PCS_ALIGN_STATUS(1U)
+
+#define S_FEC_ALIGN_STATUS    14
+#define V_FEC_ALIGN_STATUS(x) ((x) << S_FEC_ALIGN_STATUS)
+#define F_FEC_ALIGN_STATUS    V_FEC_ALIGN_STATUS(1U)
+
+#define S_RS_FEC_HIGH_SER    2
+#define V_RS_FEC_HIGH_SER(x) ((x) << S_RS_FEC_HIGH_SER)
+#define F_RS_FEC_HIGH_SER    V_RS_FEC_HIGH_SER(1U)
+
+#define S_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY    1
+#define V_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY(x) ((x) << S_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY)
+#define F_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY    V_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY(1U)
+
+#define S_RS_FEC_BYPASS_CORRECTION_ABILITY    0
+#define V_RS_FEC_BYPASS_CORRECTION_ABILITY(x) ((x) << S_RS_FEC_BYPASS_CORRECTION_ABILITY)
+#define F_RS_FEC_BYPASS_CORRECTION_ABILITY    V_RS_FEC_BYPASS_CORRECTION_ABILITY(1U)
+
+#define A_MAC_PORT_BEAN_ABILITY_0 0x2208
+
+#define S_NXP    15
+#define V_NXP(x) ((x) << S_NXP)
+#define F_NXP    V_NXP(1U)
+
+#define S_REM_FAULT    13
+#define V_REM_FAULT(x) ((x) << S_REM_FAULT)
+#define F_REM_FAULT    V_REM_FAULT(1U)
+
+#define S_PAUSE_ABILITY    10
+#define M_PAUSE_ABILITY    0x7U
+#define V_PAUSE_ABILITY(x) ((x) << S_PAUSE_ABILITY)
+#define G_PAUSE_ABILITY(x) (((x) >> S_PAUSE_ABILITY) & M_PAUSE_ABILITY)
+
+#define S_ECHO_NONCE    5
+#define M_ECHO_NONCE    0x1fU
+#define V_ECHO_NONCE(x) ((x) << S_ECHO_NONCE)
+#define G_ECHO_NONCE(x) (((x) >> S_ECHO_NONCE) & M_ECHO_NONCE)
+
+#define S_SELECTOR    0
+#define M_SELECTOR    0x1fU
+#define V_SELECTOR(x) ((x) << S_SELECTOR)
+#define G_SELECTOR(x) (((x) >> S_SELECTOR) & M_SELECTOR)
+
+#define A_MAC_PORT_MTIP_RS_FEC_CCW_LO 0x2208
+
+#define S_RS_RS_FEC_CCW_LO    0
+#define M_RS_RS_FEC_CCW_LO    0xffffU
+#define V_RS_RS_FEC_CCW_LO(x) ((x) << S_RS_RS_FEC_CCW_LO)
+#define G_RS_RS_FEC_CCW_LO(x) (((x) >> S_RS_RS_FEC_CCW_LO) & M_RS_RS_FEC_CCW_LO)
+
+#define A_MAC_PORT_BEAN_ABILITY_1 0x220c
+
+#define S_TECH_ABILITY_1    5
+#define M_TECH_ABILITY_1    0x7ffU
+#define V_TECH_ABILITY_1(x) ((x) << S_TECH_ABILITY_1)
+#define G_TECH_ABILITY_1(x) (((x) >> S_TECH_ABILITY_1) & M_TECH_ABILITY_1)
+
+#define S_TX_NONCE    0
+#define M_TX_NONCE    0x1fU
+#define V_TX_NONCE(x) ((x) << S_TX_NONCE)
+#define G_TX_NONCE(x) (((x) >> S_TX_NONCE) & M_TX_NONCE)
+
+#define A_MAC_PORT_MTIP_RS_FEC_CCW_HI 0x220c
+
+#define S_RS_RS_FEC_CCW_HI    0
+#define M_RS_RS_FEC_CCW_HI    0xffffU
+#define V_RS_RS_FEC_CCW_HI(x) ((x) << S_RS_RS_FEC_CCW_HI)
+#define G_RS_RS_FEC_CCW_HI(x) (((x) >> S_RS_RS_FEC_CCW_HI) & M_RS_RS_FEC_CCW_HI)
+
+#define A_MAC_PORT_BEAN_ABILITY_2 0x2210
+
+#define S_T5_FEC_ABILITY    14
+#define M_T5_FEC_ABILITY    0x3U
+#define V_T5_FEC_ABILITY(x) ((x) << S_T5_FEC_ABILITY)
+#define G_T5_FEC_ABILITY(x) (((x) >> S_T5_FEC_ABILITY) & M_T5_FEC_ABILITY)
+
+#define S_TECH_ABILITY_2    0
+#define M_TECH_ABILITY_2    0x3fffU
+#define V_TECH_ABILITY_2(x) ((x) << S_TECH_ABILITY_2)
+#define G_TECH_ABILITY_2(x) (((x) >> S_TECH_ABILITY_2) & M_TECH_ABILITY_2)
+
+#define A_MAC_PORT_MTIP_RS_FEC_NCCW_LO 0x2210
+
+#define S_RS_RS_FEC_NCCW_LO    0
+#define M_RS_RS_FEC_NCCW_LO    0xffffU
+#define V_RS_RS_FEC_NCCW_LO(x) ((x) << S_RS_RS_FEC_NCCW_LO)
+#define G_RS_RS_FEC_NCCW_LO(x) (((x) >> S_RS_RS_FEC_NCCW_LO) & M_RS_RS_FEC_NCCW_LO)
+
+#define A_MAC_PORT_BEAN_REM_ABILITY_0 0x2214
+#define A_MAC_PORT_MTIP_RS_FEC_NCCW_HI 0x2214
+
+#define S_RS_RS_FEC_NCCW_HI    0
+#define M_RS_RS_FEC_NCCW_HI    0xffffU
+#define V_RS_RS_FEC_NCCW_HI(x) ((x) << S_RS_RS_FEC_NCCW_HI)
+#define G_RS_RS_FEC_NCCW_HI(x) (((x) >> S_RS_RS_FEC_NCCW_HI) & M_RS_RS_FEC_NCCW_HI)
+
+#define A_MAC_PORT_BEAN_REM_ABILITY_1 0x2218
+#define A_MAC_PORT_MTIP_RS_FEC_LANEMAPRS_FEC_NCCW_HI 0x2218
+
+#define S_PMA_MAPPING    0
+#define M_PMA_MAPPING    0xffU
+#define V_PMA_MAPPING(x) ((x) << S_PMA_MAPPING)
+#define G_PMA_MAPPING(x) (((x) >> S_PMA_MAPPING) & M_PMA_MAPPING)
+
+#define A_MAC_PORT_BEAN_REM_ABILITY_2 0x221c
+#define A_MAC_PORT_BEAN_MS_COUNT 0x2220
+
+#define S_MS_COUNT    0
+#define M_MS_COUNT    0xffffU
+#define V_MS_COUNT(x) ((x) << S_MS_COUNT)
+#define G_MS_COUNT(x) (((x) >> S_MS_COUNT) & M_MS_COUNT)
+
+#define A_MAC_PORT_BEAN_XNP_0 0x2224
+
+#define S_XNP    15
+#define V_XNP(x) ((x) << S_XNP)
+#define F_XNP    V_XNP(1U)
+
+#define S_ACKNOWLEDGE    14
+#define V_ACKNOWLEDGE(x) ((x) << S_ACKNOWLEDGE)
+#define F_ACKNOWLEDGE    V_ACKNOWLEDGE(1U)
+
+#define S_MP    13
+#define V_MP(x) ((x) << S_MP)
+#define F_MP    V_MP(1U)
+
+#define S_ACK2    12
+#define V_ACK2(x) ((x) << S_ACK2)
+#define F_ACK2    V_ACK2(1U)
+
+#define S_MU    0
+#define M_MU    0x7ffU
+#define V_MU(x) ((x) << S_MU)
+#define G_MU(x) (((x) >> S_MU) & M_MU)
+
+#define A_MAC_PORT_BEAN_XNP_1 0x2228
+
+#define S_UNFORMATED    0
+#define M_UNFORMATED    0xffffU
+#define V_UNFORMATED(x) ((x) << S_UNFORMATED)
+#define G_UNFORMATED(x) (((x) >> S_UNFORMATED) & M_UNFORMATED)
+
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR0_LO 0x2228
+
+#define S_RS_FEC_SYMBLERR0_LO    0
+#define V_RS_FEC_SYMBLERR0_LO(x) ((x) << S_RS_FEC_SYMBLERR0_LO)
+#define F_RS_FEC_SYMBLERR0_LO    V_RS_FEC_SYMBLERR0_LO(1U)
+
+#define A_MAC_PORT_BEAN_XNP_2 0x222c
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR0_HI 0x222c
+
+#define S_RS_FEC_SYMBLERR0_HI    0
+#define V_RS_FEC_SYMBLERR0_HI(x) ((x) << S_RS_FEC_SYMBLERR0_HI)
+#define F_RS_FEC_SYMBLERR0_HI    V_RS_FEC_SYMBLERR0_HI(1U)
+
+#define A_MAC_PORT_LP_BEAN_XNP_0 0x2230
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR1_LO 0x2230
+
+#define S_RS_FEC_SYMBLERR1_LO    0
+#define V_RS_FEC_SYMBLERR1_LO(x) ((x) << S_RS_FEC_SYMBLERR1_LO)
+#define F_RS_FEC_SYMBLERR1_LO    V_RS_FEC_SYMBLERR1_LO(1U)
+
+#define A_MAC_PORT_LP_BEAN_XNP_1 0x2234
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR1_HI 0x2234
+
+#define S_RS_FEC_SYMBLERR1_HI    0
+#define V_RS_FEC_SYMBLERR1_HI(x) ((x) << S_RS_FEC_SYMBLERR1_HI)
+#define F_RS_FEC_SYMBLERR1_HI    V_RS_FEC_SYMBLERR1_HI(1U)
+
+#define A_MAC_PORT_LP_BEAN_XNP_2 0x2238
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR2_LO 0x2238
+
+#define S_RS_FEC_SYMBLERR2_LO    0
+#define V_RS_FEC_SYMBLERR2_LO(x) ((x) << S_RS_FEC_SYMBLERR2_LO)
+#define F_RS_FEC_SYMBLERR2_LO    V_RS_FEC_SYMBLERR2_LO(1U)
+
+#define A_MAC_PORT_BEAN_ETH_STATUS 0x223c
+
+#define S_100GCR10    8
+#define V_100GCR10(x) ((x) << S_100GCR10)
+#define F_100GCR10    V_100GCR10(1U)
+
+#define S_40GCR4    6
+#define V_40GCR4(x) ((x) << S_40GCR4)
+#define F_40GCR4    V_40GCR4(1U)
+
+#define S_40GKR4    5
+#define V_40GKR4(x) ((x) << S_40GKR4)
+#define F_40GKR4    V_40GKR4(1U)
+
+#define S_FEC    4
+#define V_FEC(x) ((x) << S_FEC)
+#define F_FEC    V_FEC(1U)
+
+#define S_10GKR    3
+#define V_10GKR(x) ((x) << S_10GKR)
+#define F_10GKR    V_10GKR(1U)
+
+#define S_10GKX4    2
+#define V_10GKX4(x) ((x) << S_10GKX4)
+#define F_10GKX4    V_10GKX4(1U)
+
+#define S_1GKX    1
+#define V_1GKX(x) ((x) << S_1GKX)
+#define F_1GKX    V_1GKX(1U)
+
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR2_HI 0x223c
+
+#define S_RS_FEC_SYMBLERR2_HI    0
+#define V_RS_FEC_SYMBLERR2_HI(x) ((x) << S_RS_FEC_SYMBLERR2_HI)
+#define F_RS_FEC_SYMBLERR2_HI    V_RS_FEC_SYMBLERR2_HI(1U)
+
+#define A_MAC_PORT_BEAN_CTL_LANE1 0x2240
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR3_LO 0x2240
+
+#define S_RS_FEC_SYMBLERR3_LO    0
+#define V_RS_FEC_SYMBLERR3_LO(x) ((x) << S_RS_FEC_SYMBLERR3_LO)
+#define F_RS_FEC_SYMBLERR3_LO    V_RS_FEC_SYMBLERR3_LO(1U)
+
+#define A_MAC_PORT_BEAN_STATUS_LANE1 0x2244
+#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR3_HI 0x2244
+
+#define S_RS_FEC_SYMBLERR3_HI    0
+#define V_RS_FEC_SYMBLERR3_HI(x) ((x) << S_RS_FEC_SYMBLERR3_HI)
+#define F_RS_FEC_SYMBLERR3_HI    V_RS_FEC_SYMBLERR3_HI(1U)
+
+#define A_MAC_PORT_BEAN_ABILITY_0_LANE1 0x2248
+#define A_MAC_PORT_BEAN_ABILITY_1_LANE1 0x224c
+#define A_MAC_PORT_BEAN_ABILITY_2_LANE1 0x2250
+#define A_MAC_PORT_BEAN_REM_ABILITY_0_LANE1 0x2254
+#define A_MAC_PORT_BEAN_REM_ABILITY_1_LANE1 0x2258
+#define A_MAC_PORT_BEAN_REM_ABILITY_2_LANE1 0x225c
+#define A_MAC_PORT_BEAN_MS_COUNT_LANE1 0x2260
+#define A_MAC_PORT_BEAN_XNP_0_LANE1 0x2264
+#define A_MAC_PORT_BEAN_XNP_1_LANE1 0x2268
+#define A_MAC_PORT_BEAN_XNP_2_LANE1 0x226c
+#define A_MAC_PORT_LP_BEAN_XNP_0_LANE1 0x2270
+#define A_MAC_PORT_LP_BEAN_XNP_1_LANE1 0x2274
+#define A_MAC_PORT_LP_BEAN_XNP_2_LANE1 0x2278
+#define A_MAC_PORT_BEAN_ETH_STATUS_LANE1 0x227c
+#define A_MAC_PORT_BEAN_CTL_LANE2 0x2280
+#define A_MAC_PORT_BEAN_STATUS_LANE2 0x2284
+#define A_MAC_PORT_BEAN_ABILITY_0_LANE2 0x2288
+#define A_MAC_PORT_BEAN_ABILITY_1_LANE2 0x228c
+#define A_MAC_PORT_BEAN_ABILITY_2_LANE2 0x2290
+#define A_MAC_PORT_BEAN_REM_ABILITY_0_LANE2 0x2294
+#define A_MAC_PORT_BEAN_REM_ABILITY_1_LANE2 0x2298
+#define A_MAC_PORT_BEAN_REM_ABILITY_2_LANE2 0x229c
+#define A_MAC_PORT_BEAN_MS_COUNT_LANE2 0x22a0
+#define A_MAC_PORT_BEAN_XNP_0_LANE2 0x22a4
+#define A_MAC_PORT_BEAN_XNP_1_LANE2 0x22a8
+#define A_MAC_PORT_BEAN_XNP_2_LANE2 0x22ac
+#define A_MAC_PORT_LP_BEAN_XNP_0_LANE2 0x22b0
+#define A_MAC_PORT_LP_BEAN_XNP_1_LANE2 0x22b4
+#define A_MAC_PORT_LP_BEAN_XNP_2_LANE2 0x22b8
+#define A_MAC_PORT_BEAN_ETH_STATUS_LANE2 0x22bc
+#define A_MAC_PORT_BEAN_CTL_LANE3 0x22c0
+#define A_MAC_PORT_BEAN_STATUS_LANE3 0x22c4
+#define A_MAC_PORT_BEAN_ABILITY_0_LANE3 0x22c8
+#define A_MAC_PORT_BEAN_ABILITY_1_LANE3 0x22cc
+#define A_MAC_PORT_BEAN_ABILITY_2_LANE3 0x22d0
+#define A_MAC_PORT_BEAN_REM_ABILITY_0_LANE3 0x22d4
+#define A_MAC_PORT_BEAN_REM_ABILITY_1_LANE3 0x22d8
+#define A_MAC_PORT_BEAN_REM_ABILITY_2_LANE3 0x22dc
+#define A_MAC_PORT_BEAN_MS_COUNT_LANE3 0x22e0
+#define A_MAC_PORT_BEAN_XNP_0_LANE3 0x22e4
+#define A_MAC_PORT_BEAN_XNP_1_LANE3 0x22e8
+#define A_MAC_PORT_BEAN_XNP_2_LANE3 0x22ec
+#define A_MAC_PORT_LP_BEAN_XNP_0_LANE3 0x22f0
+#define A_MAC_PORT_LP_BEAN_XNP_1_LANE3 0x22f4
+#define A_MAC_PORT_LP_BEAN_XNP_2_LANE3 0x22f8
+#define A_MAC_PORT_BEAN_ETH_STATUS_LANE3 0x22fc
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_CONTROL 0x2400
+
+#define S_RS_FEC_ENABLED_STATUS    15
+#define V_RS_FEC_ENABLED_STATUS(x) ((x) << S_RS_FEC_ENABLED_STATUS)
+#define F_RS_FEC_ENABLED_STATUS    V_RS_FEC_ENABLED_STATUS(1U)
+
+#define S_RS_FEC_ENABLE    2
+#define V_RS_FEC_ENABLE(x) ((x) << S_RS_FEC_ENABLE)
+#define F_RS_FEC_ENABLE    V_RS_FEC_ENABLE(1U)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_INFO_1 0x2404
+
+#define S_DESKEW_EMPTY    12
+#define M_DESKEW_EMPTY    0xfU
+#define V_DESKEW_EMPTY(x) ((x) << S_DESKEW_EMPTY)
+#define G_DESKEW_EMPTY(x) (((x) >> S_DESKEW_EMPTY) & M_DESKEW_EMPTY)
+
+#define S_FEC_ALIGN_STATUS_LH    10
+#define V_FEC_ALIGN_STATUS_LH(x) ((x) << S_FEC_ALIGN_STATUS_LH)
+#define F_FEC_ALIGN_STATUS_LH    V_FEC_ALIGN_STATUS_LH(1U)
+
+#define S_TX_DP_OVERFLOW    9
+#define V_TX_DP_OVERFLOW(x) ((x) << S_TX_DP_OVERFLOW)
+#define F_TX_DP_OVERFLOW    V_TX_DP_OVERFLOW(1U)
+
+#define S_RX_DP_OVERFLOW    8
+#define V_RX_DP_OVERFLOW(x) ((x) << S_RX_DP_OVERFLOW)
+#define F_RX_DP_OVERFLOW    V_RX_DP_OVERFLOW(1U)
+
+#define S_TX_DATAPATH_RESTART    7
+#define V_TX_DATAPATH_RESTART(x) ((x) << S_TX_DATAPATH_RESTART)
+#define F_TX_DATAPATH_RESTART    V_TX_DATAPATH_RESTART(1U)
+
+#define S_RX_DATAPATH_RESTART    6
+#define V_RX_DATAPATH_RESTART(x) ((x) << S_RX_DATAPATH_RESTART)
+#define F_RX_DATAPATH_RESTART    V_RX_DATAPATH_RESTART(1U)
+
+#define S_MARKER_CHECK_RESTART    5
+#define V_MARKER_CHECK_RESTART(x) ((x) << S_MARKER_CHECK_RESTART)
+#define F_MARKER_CHECK_RESTART    V_MARKER_CHECK_RESTART(1U)
+
+#define S_FEC_ALIGN_STATUS_LL    4
+#define V_FEC_ALIGN_STATUS_LL(x) ((x) << S_FEC_ALIGN_STATUS_LL)
+#define F_FEC_ALIGN_STATUS_LL    V_FEC_ALIGN_STATUS_LL(1U)
+
+#define S_AMPS_LOCK    0
+#define M_AMPS_LOCK    0xfU
+#define V_AMPS_LOCK(x) ((x) << S_AMPS_LOCK)
+#define G_AMPS_LOCK(x) (((x) >> S_AMPS_LOCK) & M_AMPS_LOCK)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_INFO_2 0x2408
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_REVISION 0x240c
+
+#define S_RS_FEC_VENDOR_REVISION    0
+#define M_RS_FEC_VENDOR_REVISION    0xffffU
+#define V_RS_FEC_VENDOR_REVISION(x) ((x) << S_RS_FEC_VENDOR_REVISION)
+#define G_RS_FEC_VENDOR_REVISION(x) (((x) >> S_RS_FEC_VENDOR_REVISION) & M_RS_FEC_VENDOR_REVISION)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_KEY 0x2410
+
+#define S_RS_FEC_VENDOR_TX_TEST_KEY    0
+#define M_RS_FEC_VENDOR_TX_TEST_KEY    0xffffU
+#define V_RS_FEC_VENDOR_TX_TEST_KEY(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_KEY)
+#define G_RS_FEC_VENDOR_TX_TEST_KEY(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_KEY) & M_RS_FEC_VENDOR_TX_TEST_KEY)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_SYMBOLS 0x2414
+
+#define S_RS_FEC_VENDOR_TX_TEST_SYMBOLS    0
+#define M_RS_FEC_VENDOR_TX_TEST_SYMBOLS    0xffffU
+#define V_RS_FEC_VENDOR_TX_TEST_SYMBOLS(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_SYMBOLS)
+#define G_RS_FEC_VENDOR_TX_TEST_SYMBOLS(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_SYMBOLS) & M_RS_FEC_VENDOR_TX_TEST_SYMBOLS)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_PATTERN 0x2418
+
+#define S_RS_FEC_VENDOR_TX_TEST_PATTERN    0
+#define M_RS_FEC_VENDOR_TX_TEST_PATTERN    0xffffU
+#define V_RS_FEC_VENDOR_TX_TEST_PATTERN(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_PATTERN)
+#define G_RS_FEC_VENDOR_TX_TEST_PATTERN(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_PATTERN) & M_RS_FEC_VENDOR_TX_TEST_PATTERN)
+
+#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_TRIGGER 0x241c
+
+#define S_RS_FEC_VENDOR_TX_TEST_TRIGGER    0
+#define M_RS_FEC_VENDOR_TX_TEST_TRIGGER    0xffffU
+#define V_RS_FEC_VENDOR_TX_TEST_TRIGGER(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_TRIGGER)
+#define G_RS_FEC_VENDOR_TX_TEST_TRIGGER(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_TRIGGER) & M_RS_FEC_VENDOR_TX_TEST_TRIGGER)
+
+#define A_MAC_PORT_FEC_KR_CONTROL 0x2600
+
+#define S_ENABLE_TR    1
+#define V_ENABLE_TR(x) ((x) << S_ENABLE_TR)
+#define F_ENABLE_TR    V_ENABLE_TR(1U)
+
+#define S_RESTART_TR    0
+#define V_RESTART_TR(x) ((x) << S_RESTART_TR)
+#define F_RESTART_TR    V_RESTART_TR(1U)
+
+#define A_MAC_PORT_FEC_KR_STATUS 0x2604
+
+#define S_FECKRSIGDET    15
+#define V_FECKRSIGDET(x) ((x) << S_FECKRSIGDET)
+#define F_FECKRSIGDET    V_FECKRSIGDET(1U)
+
+#define S_TRAIN_FAIL    3
+#define V_TRAIN_FAIL(x) ((x) << S_TRAIN_FAIL)
+#define F_TRAIN_FAIL    V_TRAIN_FAIL(1U)
+
+#define S_STARTUP_STATUS    2
+#define V_STARTUP_STATUS(x) ((x) << S_STARTUP_STATUS)
+#define F_STARTUP_STATUS    V_STARTUP_STATUS(1U)
+
+#define S_RX_STATUS    0
+#define V_RX_STATUS(x) ((x) << S_RX_STATUS)
+#define F_RX_STATUS    V_RX_STATUS(1U)
+
+#define A_MAC_PORT_FEC_KR_LP_COEFF 0x2608
+
+#define S_PRESET    13
+#define V_PRESET(x) ((x) << S_PRESET)
+#define F_PRESET    V_PRESET(1U)
+
+#define S_INITIALIZE    12
+#define V_INITIALIZE(x) ((x) << S_INITIALIZE)
+#define F_INITIALIZE    V_INITIALIZE(1U)
+
+#define S_CP1_UPD    4
+#define M_CP1_UPD    0x3U
+#define V_CP1_UPD(x) ((x) << S_CP1_UPD)
+#define G_CP1_UPD(x) (((x) >> S_CP1_UPD) & M_CP1_UPD)
+
+#define S_C0_UPD    2
+#define M_C0_UPD    0x3U
+#define V_C0_UPD(x) ((x) << S_C0_UPD)
+#define G_C0_UPD(x) (((x) >> S_C0_UPD) & M_C0_UPD)
+
+#define S_CN1_UPD    0
+#define M_CN1_UPD    0x3U
+#define V_CN1_UPD(x) ((x) << S_CN1_UPD)
+#define G_CN1_UPD(x) (((x) >> S_CN1_UPD) & M_CN1_UPD)
+
+#define A_MAC_PORT_FEC_KR_LP_STAT 0x260c
+
+#define S_RX_READY    15
+#define V_RX_READY(x) ((x) << S_RX_READY)
+#define F_RX_READY    V_RX_READY(1U)
+
+#define S_CP1_STAT    4
+#define M_CP1_STAT    0x3U
+#define V_CP1_STAT(x) ((x) << S_CP1_STAT)
+#define G_CP1_STAT(x) (((x) >> S_CP1_STAT) & M_CP1_STAT)
+
+#define S_C0_STAT    2
+#define M_C0_STAT    0x3U
+#define V_C0_STAT(x) ((x) << S_C0_STAT)
+#define G_C0_STAT(x) (((x) >> S_C0_STAT) & M_C0_STAT)
+
+#define S_CN1_STAT    0
+#define M_CN1_STAT    0x3U
+#define V_CN1_STAT(x) ((x) << S_CN1_STAT)
+#define G_CN1_STAT(x) (((x) >> S_CN1_STAT) & M_CN1_STAT)
+
+#define A_MAC_PORT_FEC_KR_LD_COEFF 0x2610
+#define A_MAC_PORT_FEC_KR_LD_STAT 0x2614
+#define A_MAC_PORT_FEC_ABILITY 0x2618
+
+#define S_FEC_IND_ABILITY    1
+#define V_FEC_IND_ABILITY(x) ((x) << S_FEC_IND_ABILITY)
+#define F_FEC_IND_ABILITY    V_FEC_IND_ABILITY(1U)
+
+#define S_ABILITY    0
+#define V_ABILITY(x) ((x) << S_ABILITY)
+#define F_ABILITY    V_ABILITY(1U)
+
+#define A_MAC_PORT_MTIP_FEC_ABILITY 0x2618
+
+#define S_BASE_R_FEC_ERROR_INDICATION_ABILITY    1
+#define V_BASE_R_FEC_ERROR_INDICATION_ABILITY(x) ((x) << S_BASE_R_FEC_ERROR_INDICATION_ABILITY)
+#define F_BASE_R_FEC_ERROR_INDICATION_ABILITY    V_BASE_R_FEC_ERROR_INDICATION_ABILITY(1U)
+
+#define S_BASE_R_FEC_ABILITY    0
+#define V_BASE_R_FEC_ABILITY(x) ((x) << S_BASE_R_FEC_ABILITY)
+#define F_BASE_R_FEC_ABILITY    V_BASE_R_FEC_ABILITY(1U)
+
+#define A_MAC_PORT_FEC_CONTROL 0x261c
+
+#define S_FEC_EN_ERR_IND    1
+#define V_FEC_EN_ERR_IND(x) ((x) << S_FEC_EN_ERR_IND)
+#define F_FEC_EN_ERR_IND    V_FEC_EN_ERR_IND(1U)
+
+#define S_FEC_EN    0
+#define V_FEC_EN(x) ((x) << S_FEC_EN)
+#define F_FEC_EN    V_FEC_EN(1U)
+
+#define A_MAC_PORT_FEC_STATUS 0x2620
+
+#define S_FEC_LOCKED_100    1
+#define V_FEC_LOCKED_100(x) ((x) << S_FEC_LOCKED_100)
+#define F_FEC_LOCKED_100    V_FEC_LOCKED_100(1U)
+
+#define S_FEC_LOCKED    0
+#define V_FEC_LOCKED(x) ((x) << S_FEC_LOCKED)
+#define F_FEC_LOCKED    V_FEC_LOCKED(1U)
+
+#define S_FEC_LOCKED0    1
+#define M_FEC_LOCKED0    0xfU
+#define V_FEC_LOCKED0(x) ((x) << S_FEC_LOCKED0)
+#define G_FEC_LOCKED0(x) (((x) >> S_FEC_LOCKED0) & M_FEC_LOCKED0)
+
+#define A_MAC_PORT_FEC_CERR_CNT_0 0x2624
+
+#define S_FEC_CERR_CNT_0    0
+#define M_FEC_CERR_CNT_0    0xffffU
+#define V_FEC_CERR_CNT_0(x) ((x) << S_FEC_CERR_CNT_0)
+#define G_FEC_CERR_CNT_0(x) (((x) >> S_FEC_CERR_CNT_0) & M_FEC_CERR_CNT_0)
+
+#define A_MAC_PORT_MTIP_FEC0_CERR_CNT_0 0x2624
+#define A_MAC_PORT_FEC_CERR_CNT_1 0x2628
+
+#define S_FEC_CERR_CNT_1    0
+#define M_FEC_CERR_CNT_1    0xffffU
+#define V_FEC_CERR_CNT_1(x) ((x) << S_FEC_CERR_CNT_1)
+#define G_FEC_CERR_CNT_1(x) (((x) >> S_FEC_CERR_CNT_1) & M_FEC_CERR_CNT_1)
+
+#define A_MAC_PORT_MTIP_FEC0_CERR_CNT_1 0x2628
+#define A_MAC_PORT_FEC_NCERR_CNT_0 0x262c
+
+#define S_FEC_NCERR_CNT_0    0
+#define M_FEC_NCERR_CNT_0    0xffffU
+#define V_FEC_NCERR_CNT_0(x) ((x) << S_FEC_NCERR_CNT_0)
+#define G_FEC_NCERR_CNT_0(x) (((x) >> S_FEC_NCERR_CNT_0) & M_FEC_NCERR_CNT_0)
+
+#define A_MAC_PORT_MTIP_FEC0_NCERR_CNT_0 0x262c
+
+#define S_FEC0_NCERR_CNT_0    0
+#define M_FEC0_NCERR_CNT_0    0xffffU
+#define V_FEC0_NCERR_CNT_0(x) ((x) << S_FEC0_NCERR_CNT_0)
+#define G_FEC0_NCERR_CNT_0(x) (((x) >> S_FEC0_NCERR_CNT_0) & M_FEC0_NCERR_CNT_0)
+
+#define A_MAC_PORT_FEC_NCERR_CNT_1 0x2630
+
+#define S_FEC_NCERR_CNT_1    0
+#define M_FEC_NCERR_CNT_1    0xffffU
+#define V_FEC_NCERR_CNT_1(x) ((x) << S_FEC_NCERR_CNT_1)
+#define G_FEC_NCERR_CNT_1(x) (((x) >> S_FEC_NCERR_CNT_1) & M_FEC_NCERR_CNT_1)
+
+#define A_MAC_PORT_MTIP_FEC0_NCERR_CNT_1 0x2630
+
+#define S_FEC0_NCERR_CNT_1    0
+#define M_FEC0_NCERR_CNT_1    0xffffU
+#define V_FEC0_NCERR_CNT_1(x) ((x) << S_FEC0_NCERR_CNT_1)
+#define G_FEC0_NCERR_CNT_1(x) (((x) >> S_FEC0_NCERR_CNT_1) & M_FEC0_NCERR_CNT_1)
+
+#define A_MAC_PORT_MTIP_FEC_STATUS1 0x2664
+#define A_MAC_PORT_MTIP_FEC1_CERR_CNT_0 0x2668
+#define A_MAC_PORT_MTIP_FEC1_CERR_CNT_1 0x266c
+#define A_MAC_PORT_MTIP_FEC1_NCERR_CNT_0 0x2670
+#define A_MAC_PORT_MTIP_FEC1_NCERR_CNT_1 0x2674
+#define A_MAC_PORT_MTIP_FEC_STATUS2 0x26a8
+#define A_MAC_PORT_MTIP_FEC2_CERR_CNT_0 0x26ac
+#define A_MAC_PORT_MTIP_FEC2_CERR_CNT_1 0x26b0
+#define A_MAC_PORT_MTIP_FEC2_NCERR_CNT_0 0x26b4
+#define A_MAC_PORT_MTIP_FEC2_NCERR_CNT_1 0x26b8
+#define A_MAC_PORT_MTIP_FEC_STATUS3 0x26ec
+#define A_MAC_PORT_MTIP_FEC3_CERR_CNT_0 0x26f0
+#define A_MAC_PORT_MTIP_FEC3_CERR_CNT_1 0x26f4
+#define A_MAC_PORT_MTIP_FEC3_NCERR_CNT_0 0x26f8
+#define A_MAC_PORT_MTIP_FEC3_NCERR_CNT_1 0x26fc
+#define A_MAC_PORT_AE_RX_COEF_REQ 0x2a00
+
+#define S_T5_RXREQ_C2    4
+#define M_T5_RXREQ_C2    0x3U
+#define V_T5_RXREQ_C2(x) ((x) << S_T5_RXREQ_C2)
+#define G_T5_RXREQ_C2(x) (((x) >> S_T5_RXREQ_C2) & M_T5_RXREQ_C2)
+
+#define S_T5_RXREQ_C1    2
+#define M_T5_RXREQ_C1    0x3U
+#define V_T5_RXREQ_C1(x) ((x) << S_T5_RXREQ_C1)
+#define G_T5_RXREQ_C1(x) (((x) >> S_T5_RXREQ_C1) & M_T5_RXREQ_C1)
+
+#define S_T5_RXREQ_C0    0
+#define M_T5_RXREQ_C0    0x3U
+#define V_T5_RXREQ_C0(x) ((x) << S_T5_RXREQ_C0)
+#define G_T5_RXREQ_C0(x) (((x) >> S_T5_RXREQ_C0) & M_T5_RXREQ_C0)
+
+#define S_T5_RXREQ_C3    6
+#define M_T5_RXREQ_C3    0x3U
+#define V_T5_RXREQ_C3(x) ((x) << S_T5_RXREQ_C3)
+#define G_T5_RXREQ_C3(x) (((x) >> S_T5_RXREQ_C3) & M_T5_RXREQ_C3)
+
+#define A_MAC_PORT_AE_RX_COEF_STAT 0x2a04
+
+#define S_T5_AE0_RXSTAT_RDY    15
+#define V_T5_AE0_RXSTAT_RDY(x) ((x) << S_T5_AE0_RXSTAT_RDY)
+#define F_T5_AE0_RXSTAT_RDY    V_T5_AE0_RXSTAT_RDY(1U)
+
+#define S_T5_AE0_RXSTAT_C2    4
+#define M_T5_AE0_RXSTAT_C2    0x3U
+#define V_T5_AE0_RXSTAT_C2(x) ((x) << S_T5_AE0_RXSTAT_C2)
+#define G_T5_AE0_RXSTAT_C2(x) (((x) >> S_T5_AE0_RXSTAT_C2) & M_T5_AE0_RXSTAT_C2)
+
+#define S_T5_AE0_RXSTAT_C1    2
+#define M_T5_AE0_RXSTAT_C1    0x3U
+#define V_T5_AE0_RXSTAT_C1(x) ((x) << S_T5_AE0_RXSTAT_C1)
+#define G_T5_AE0_RXSTAT_C1(x) (((x) >> S_T5_AE0_RXSTAT_C1) & M_T5_AE0_RXSTAT_C1)
+
+#define S_T5_AE0_RXSTAT_C0    0
+#define M_T5_AE0_RXSTAT_C0    0x3U
+#define V_T5_AE0_RXSTAT_C0(x) ((x) << S_T5_AE0_RXSTAT_C0)
+#define G_T5_AE0_RXSTAT_C0(x) (((x) >> S_T5_AE0_RXSTAT_C0) & M_T5_AE0_RXSTAT_C0)
+
+#define S_T5_AE0_RXSTAT_LSNA    14
+#define V_T5_AE0_RXSTAT_LSNA(x) ((x) << S_T5_AE0_RXSTAT_LSNA)
+#define F_T5_AE0_RXSTAT_LSNA    V_T5_AE0_RXSTAT_LSNA(1U)
+
+#define S_T5_AE0_RXSTAT_FEC    13
+#define V_T5_AE0_RXSTAT_FEC(x) ((x) << S_T5_AE0_RXSTAT_FEC)
+#define F_T5_AE0_RXSTAT_FEC    V_T5_AE0_RXSTAT_FEC(1U)
+
+#define S_T5_AE0_RXSTAT_TF    12
+#define V_T5_AE0_RXSTAT_TF(x) ((x) << S_T5_AE0_RXSTAT_TF)
+#define F_T5_AE0_RXSTAT_TF    V_T5_AE0_RXSTAT_TF(1U)
+
+#define S_T5_AE0_RXSTAT_C3    6
+#define M_T5_AE0_RXSTAT_C3    0x3U
+#define V_T5_AE0_RXSTAT_C3(x) ((x) << S_T5_AE0_RXSTAT_C3)
+#define G_T5_AE0_RXSTAT_C3(x) (((x) >> S_T5_AE0_RXSTAT_C3) & M_T5_AE0_RXSTAT_C3)
+
+#define A_MAC_PORT_AE_TX_COEF_REQ 0x2a08
+
+#define S_T5_TXREQ_C2    4
+#define M_T5_TXREQ_C2    0x3U
+#define V_T5_TXREQ_C2(x) ((x) << S_T5_TXREQ_C2)
+#define G_T5_TXREQ_C2(x) (((x) >> S_T5_TXREQ_C2) & M_T5_TXREQ_C2)
+
+#define S_T5_TXREQ_C1    2
+#define M_T5_TXREQ_C1    0x3U
+#define V_T5_TXREQ_C1(x) ((x) << S_T5_TXREQ_C1)
+#define G_T5_TXREQ_C1(x) (((x) >> S_T5_TXREQ_C1) & M_T5_TXREQ_C1)
+
+#define S_T5_TXREQ_C0    0
+#define M_T5_TXREQ_C0    0x3U
+#define V_T5_TXREQ_C0(x) ((x) << S_T5_TXREQ_C0)
+#define G_T5_TXREQ_C0(x) (((x) >> S_T5_TXREQ_C0) & M_T5_TXREQ_C0)
+
+#define S_TXREQ_FEC    11
+#define V_TXREQ_FEC(x) ((x) << S_TXREQ_FEC)
+#define F_TXREQ_FEC    V_TXREQ_FEC(1U)
+
+#define S_T5_TXREQ_C3    6
+#define M_T5_TXREQ_C3    0x3U
+#define V_T5_TXREQ_C3(x) ((x) << S_T5_TXREQ_C3)
+#define G_T5_TXREQ_C3(x) (((x) >> S_T5_TXREQ_C3) & M_T5_TXREQ_C3)
+
+#define A_MAC_PORT_AE_TX_COEF_STAT 0x2a0c
+
+#define S_T5_TXSTAT_C2    4
+#define M_T5_TXSTAT_C2    0x3U
+#define V_T5_TXSTAT_C2(x) ((x) << S_T5_TXSTAT_C2)
+#define G_T5_TXSTAT_C2(x) (((x) >> S_T5_TXSTAT_C2) & M_T5_TXSTAT_C2)
+
+#define S_T5_TXSTAT_C1    2
+#define M_T5_TXSTAT_C1    0x3U
+#define V_T5_TXSTAT_C1(x) ((x) << S_T5_TXSTAT_C1)
+#define G_T5_TXSTAT_C1(x) (((x) >> S_T5_TXSTAT_C1) & M_T5_TXSTAT_C1)
+
+#define S_T5_TXSTAT_C0    0
+#define M_T5_TXSTAT_C0    0x3U
+#define V_T5_TXSTAT_C0(x) ((x) << S_T5_TXSTAT_C0)
+#define G_T5_TXSTAT_C0(x) (((x) >> S_T5_TXSTAT_C0) & M_T5_TXSTAT_C0)
+
+#define S_T5_TXSTAT_C3    6
+#define M_T5_TXSTAT_C3    0x3U
+#define V_T5_TXSTAT_C3(x) ((x) << S_T5_TXSTAT_C3)
+#define G_T5_TXSTAT_C3(x) (((x) >> S_T5_TXSTAT_C3) & M_T5_TXSTAT_C3)
+
+#define A_MAC_PORT_AE_REG_MODE 0x2a10
+
+#define S_AET_RSVD    7
+#define V_AET_RSVD(x) ((x) << S_AET_RSVD)
+#define F_AET_RSVD    V_AET_RSVD(1U)
+
+#define S_AET_ENABLE    6
+#define V_AET_ENABLE(x) ((x) << S_AET_ENABLE)
+#define F_AET_ENABLE    V_AET_ENABLE(1U)
+
+#define S_SET_WAIT_TIMER    13
+#define M_SET_WAIT_TIMER    0x3U
+#define V_SET_WAIT_TIMER(x) ((x) << S_SET_WAIT_TIMER)
+#define G_SET_WAIT_TIMER(x) (((x) >> S_SET_WAIT_TIMER) & M_SET_WAIT_TIMER)
+
+#define S_C2_C3_STATE_SEL    12
+#define V_C2_C3_STATE_SEL(x) ((x) << S_C2_C3_STATE_SEL)
+#define F_C2_C3_STATE_SEL    V_C2_C3_STATE_SEL(1U)
+
+#define S_FFE4_EN    11
+#define V_FFE4_EN(x) ((x) << S_FFE4_EN)
+#define F_FFE4_EN    V_FFE4_EN(1U)
+
+#define S_FEC_REQUEST    10
+#define V_FEC_REQUEST(x) ((x) << S_FEC_REQUEST)
+#define F_FEC_REQUEST    V_FEC_REQUEST(1U)
+
+#define S_FEC_SUPPORTED    9
+#define V_FEC_SUPPORTED(x) ((x) << S_FEC_SUPPORTED)
+#define F_FEC_SUPPORTED    V_FEC_SUPPORTED(1U)
+
+#define S_TX_FIXED    8
+#define V_TX_FIXED(x) ((x) << S_TX_FIXED)
+#define F_TX_FIXED    V_TX_FIXED(1U)
+
+#define A_MAC_PORT_AE_PRBS_CTL 0x2a14
+#define A_MAC_PORT_AE_FSM_CTL 0x2a18
+
+#define S_CIN_ENABLE    15
+#define V_CIN_ENABLE(x) ((x) << S_CIN_ENABLE)
+#define F_CIN_ENABLE    V_CIN_ENABLE(1U)
+
+#define A_MAC_PORT_AE_FSM_STATE 0x2a1c
+#define A_MAC_PORT_AE_RX_COEF_REQ_1 0x2a20
+#define A_MAC_PORT_AE_RX_COEF_STAT_1 0x2a24
+
+#define S_T5_AE1_RXSTAT_RDY    15
+#define V_T5_AE1_RXSTAT_RDY(x) ((x) << S_T5_AE1_RXSTAT_RDY)
+#define F_T5_AE1_RXSTAT_RDY    V_T5_AE1_RXSTAT_RDY(1U)
+
+#define S_T5_AE1_RXSTAT_C2    4
+#define M_T5_AE1_RXSTAT_C2    0x3U
+#define V_T5_AE1_RXSTAT_C2(x) ((x) << S_T5_AE1_RXSTAT_C2)
+#define G_T5_AE1_RXSTAT_C2(x) (((x) >> S_T5_AE1_RXSTAT_C2) & M_T5_AE1_RXSTAT_C2)
+
+#define S_T5_AE1_RXSTAT_C1    2
+#define M_T5_AE1_RXSTAT_C1    0x3U
+#define V_T5_AE1_RXSTAT_C1(x) ((x) << S_T5_AE1_RXSTAT_C1)
+#define G_T5_AE1_RXSTAT_C1(x) (((x) >> S_T5_AE1_RXSTAT_C1) & M_T5_AE1_RXSTAT_C1)
+
+#define S_T5_AE1_RXSTAT_C0    0
+#define M_T5_AE1_RXSTAT_C0    0x3U
+#define V_T5_AE1_RXSTAT_C0(x) ((x) << S_T5_AE1_RXSTAT_C0)
+#define G_T5_AE1_RXSTAT_C0(x) (((x) >> S_T5_AE1_RXSTAT_C0) & M_T5_AE1_RXSTAT_C0)
+
+#define S_T5_AE1_RXSTAT_LSNA    14
+#define V_T5_AE1_RXSTAT_LSNA(x) ((x) << S_T5_AE1_RXSTAT_LSNA)
+#define F_T5_AE1_RXSTAT_LSNA    V_T5_AE1_RXSTAT_LSNA(1U)
+
+#define S_T5_AE1_RXSTAT_FEC    13
+#define V_T5_AE1_RXSTAT_FEC(x) ((x) << S_T5_AE1_RXSTAT_FEC)
+#define F_T5_AE1_RXSTAT_FEC    V_T5_AE1_RXSTAT_FEC(1U)
+
+#define S_T5_AE1_RXSTAT_TF    12
+#define V_T5_AE1_RXSTAT_TF(x) ((x) << S_T5_AE1_RXSTAT_TF)
+#define F_T5_AE1_RXSTAT_TF    V_T5_AE1_RXSTAT_TF(1U)
+
+#define S_T5_AE1_RXSTAT_C3    6
+#define M_T5_AE1_RXSTAT_C3    0x3U
+#define V_T5_AE1_RXSTAT_C3(x) ((x) << S_T5_AE1_RXSTAT_C3)
+#define G_T5_AE1_RXSTAT_C3(x) (((x) >> S_T5_AE1_RXSTAT_C3) & M_T5_AE1_RXSTAT_C3)
+
+#define A_MAC_PORT_AE_TX_COEF_REQ_1 0x2a28
+#define A_MAC_PORT_AE_TX_COEF_STAT_1 0x2a2c
+#define A_MAC_PORT_AE_REG_MODE_1 0x2a30
+#define A_MAC_PORT_AE_PRBS_CTL_1 0x2a34
+#define A_MAC_PORT_AE_FSM_CTL_1 0x2a38
+#define A_MAC_PORT_AE_FSM_STATE_1 0x2a3c
+#define A_MAC_PORT_AE_RX_COEF_REQ_2 0x2a40
+#define A_MAC_PORT_AE_RX_COEF_STAT_2 0x2a44
+
+#define S_T5_AE2_RXSTAT_RDY    15
+#define V_T5_AE2_RXSTAT_RDY(x) ((x) << S_T5_AE2_RXSTAT_RDY)
+#define F_T5_AE2_RXSTAT_RDY    V_T5_AE2_RXSTAT_RDY(1U)
+
+#define S_T5_AE2_RXSTAT_C2    4
+#define M_T5_AE2_RXSTAT_C2    0x3U
+#define V_T5_AE2_RXSTAT_C2(x) ((x) << S_T5_AE2_RXSTAT_C2)
+#define G_T5_AE2_RXSTAT_C2(x) (((x) >> S_T5_AE2_RXSTAT_C2) & M_T5_AE2_RXSTAT_C2)
+
+#define S_T5_AE2_RXSTAT_C1    2
+#define M_T5_AE2_RXSTAT_C1    0x3U
+#define V_T5_AE2_RXSTAT_C1(x) ((x) << S_T5_AE2_RXSTAT_C1)
+#define G_T5_AE2_RXSTAT_C1(x) (((x) >> S_T5_AE2_RXSTAT_C1) & M_T5_AE2_RXSTAT_C1)
+
+#define S_T5_AE2_RXSTAT_C0    0
+#define M_T5_AE2_RXSTAT_C0    0x3U
+#define V_T5_AE2_RXSTAT_C0(x) ((x) << S_T5_AE2_RXSTAT_C0)
+#define G_T5_AE2_RXSTAT_C0(x) (((x) >> S_T5_AE2_RXSTAT_C0) & M_T5_AE2_RXSTAT_C0)
+
+#define S_T5_AE2_RXSTAT_LSNA    14
+#define V_T5_AE2_RXSTAT_LSNA(x) ((x) << S_T5_AE2_RXSTAT_LSNA)
+#define F_T5_AE2_RXSTAT_LSNA    V_T5_AE2_RXSTAT_LSNA(1U)
+
+#define S_T5_AE2_RXSTAT_FEC    13
+#define V_T5_AE2_RXSTAT_FEC(x) ((x) << S_T5_AE2_RXSTAT_FEC)
+#define F_T5_AE2_RXSTAT_FEC    V_T5_AE2_RXSTAT_FEC(1U)
+
+#define S_T5_AE2_RXSTAT_TF    12
+#define V_T5_AE2_RXSTAT_TF(x) ((x) << S_T5_AE2_RXSTAT_TF)
+#define F_T5_AE2_RXSTAT_TF    V_T5_AE2_RXSTAT_TF(1U)
+
+#define S_T5_AE2_RXSTAT_C3    6
+#define M_T5_AE2_RXSTAT_C3    0x3U
+#define V_T5_AE2_RXSTAT_C3(x) ((x) << S_T5_AE2_RXSTAT_C3)
+#define G_T5_AE2_RXSTAT_C3(x) (((x) >> S_T5_AE2_RXSTAT_C3) & M_T5_AE2_RXSTAT_C3)
+
+#define A_MAC_PORT_AE_TX_COEF_REQ_2 0x2a48
+#define A_MAC_PORT_AE_TX_COEF_STAT_2 0x2a4c
+#define A_MAC_PORT_AE_REG_MODE_2 0x2a50
+#define A_MAC_PORT_AE_PRBS_CTL_2 0x2a54
+#define A_MAC_PORT_AE_FSM_CTL_2 0x2a58
+#define A_MAC_PORT_AE_FSM_STATE_2 0x2a5c
+#define A_MAC_PORT_AE_RX_COEF_REQ_3 0x2a60
+#define A_MAC_PORT_AE_RX_COEF_STAT_3 0x2a64
+
+#define S_T5_AE3_RXSTAT_RDY    15
+#define V_T5_AE3_RXSTAT_RDY(x) ((x) << S_T5_AE3_RXSTAT_RDY)
+#define F_T5_AE3_RXSTAT_RDY    V_T5_AE3_RXSTAT_RDY(1U)
+
+#define S_T5_AE3_RXSTAT_C2    4
+#define M_T5_AE3_RXSTAT_C2    0x3U
+#define V_T5_AE3_RXSTAT_C2(x) ((x) << S_T5_AE3_RXSTAT_C2)
+#define G_T5_AE3_RXSTAT_C2(x) (((x) >> S_T5_AE3_RXSTAT_C2) & M_T5_AE3_RXSTAT_C2)
+
+#define S_T5_AE3_RXSTAT_C1    2
+#define M_T5_AE3_RXSTAT_C1    0x3U
+#define V_T5_AE3_RXSTAT_C1(x) ((x) << S_T5_AE3_RXSTAT_C1)
+#define G_T5_AE3_RXSTAT_C1(x) (((x) >> S_T5_AE3_RXSTAT_C1) & M_T5_AE3_RXSTAT_C1)
+
+#define S_T5_AE3_RXSTAT_C0    0
+#define M_T5_AE3_RXSTAT_C0    0x3U
+#define V_T5_AE3_RXSTAT_C0(x) ((x) << S_T5_AE3_RXSTAT_C0)
+#define G_T5_AE3_RXSTAT_C0(x) (((x) >> S_T5_AE3_RXSTAT_C0) & M_T5_AE3_RXSTAT_C0)
+
+#define S_T5_AE3_RXSTAT_LSNA    14
+#define V_T5_AE3_RXSTAT_LSNA(x) ((x) << S_T5_AE3_RXSTAT_LSNA)
+#define F_T5_AE3_RXSTAT_LSNA    V_T5_AE3_RXSTAT_LSNA(1U)
+
+#define S_T5_AE3_RXSTAT_FEC    13
+#define V_T5_AE3_RXSTAT_FEC(x) ((x) << S_T5_AE3_RXSTAT_FEC)
+#define F_T5_AE3_RXSTAT_FEC    V_T5_AE3_RXSTAT_FEC(1U)
+
+#define S_T5_AE3_RXSTAT_TF    12
+#define V_T5_AE3_RXSTAT_TF(x) ((x) << S_T5_AE3_RXSTAT_TF)
+#define F_T5_AE3_RXSTAT_TF    V_T5_AE3_RXSTAT_TF(1U)
+
+#define S_T5_AE3_RXSTAT_C3    6
+#define M_T5_AE3_RXSTAT_C3    0x3U
+#define V_T5_AE3_RXSTAT_C3(x) ((x) << S_T5_AE3_RXSTAT_C3)
+#define G_T5_AE3_RXSTAT_C3(x) (((x) >> S_T5_AE3_RXSTAT_C3) & M_T5_AE3_RXSTAT_C3)
+
+#define A_MAC_PORT_AE_TX_COEF_REQ_3 0x2a68
+#define A_MAC_PORT_AE_TX_COEF_STAT_3 0x2a6c
+#define A_MAC_PORT_AE_REG_MODE_3 0x2a70
+#define A_MAC_PORT_AE_PRBS_CTL_3 0x2a74
+#define A_MAC_PORT_AE_FSM_CTL_3 0x2a78
+#define A_MAC_PORT_AE_FSM_STATE_3 0x2a7c
+#define A_MAC_PORT_AE_TX_DIS 0x2a80
+#define A_MAC_PORT_AE_KR_CTRL 0x2a84
+#define A_MAC_PORT_AE_RX_SIGDET 0x2a88
+#define A_MAC_PORT_AE_KR_STATUS 0x2a8c
+#define A_MAC_PORT_AE_TX_DIS_1 0x2a90
+#define A_MAC_PORT_AE_KR_CTRL_1 0x2a94
+#define A_MAC_PORT_AE_RX_SIGDET_1 0x2a98
+#define A_MAC_PORT_AE_KR_STATUS_1 0x2a9c
+#define A_MAC_PORT_AE_TX_DIS_2 0x2aa0
+#define A_MAC_PORT_AE_KR_CTRL_2 0x2aa4
+#define A_MAC_PORT_AE_RX_SIGDET_2 0x2aa8
+#define A_MAC_PORT_AE_KR_STATUS_2 0x2aac
+#define A_MAC_PORT_AE_TX_DIS_3 0x2ab0
+#define A_MAC_PORT_AE_KR_CTRL_3 0x2ab4
+#define A_MAC_PORT_AE_RX_SIGDET_3 0x2ab8
+#define A_MAC_PORT_AE_KR_STATUS_3 0x2abc
+#define A_MAC_PORT_AET_STAGE_CONFIGURATION_0 0x2b00
+
+#define S_EN_HOLD_FAIL    14
+#define V_EN_HOLD_FAIL(x) ((x) << S_EN_HOLD_FAIL)
+#define F_EN_HOLD_FAIL    V_EN_HOLD_FAIL(1U)
+
+#define S_INIT_METH    12
+#define M_INIT_METH    0x3U
+#define V_INIT_METH(x) ((x) << S_INIT_METH)
+#define G_INIT_METH(x) (((x) >> S_INIT_METH) & M_INIT_METH)
+
+#define S_CE_DECS    8
+#define M_CE_DECS    0xfU
+#define V_CE_DECS(x) ((x) << S_CE_DECS)
+#define G_CE_DECS(x) (((x) >> S_CE_DECS) & M_CE_DECS)
+
+#define S_EN_ZFE    7
+#define V_EN_ZFE(x) ((x) << S_EN_ZFE)
+#define F_EN_ZFE    V_EN_ZFE(1U)
+
+#define S_EN_GAIN_TOG    6
+#define V_EN_GAIN_TOG(x) ((x) << S_EN_GAIN_TOG)
+#define F_EN_GAIN_TOG    V_EN_GAIN_TOG(1U)
+
+#define S_EN_AI_C1    5
+#define V_EN_AI_C1(x) ((x) << S_EN_AI_C1)
+#define F_EN_AI_C1    V_EN_AI_C1(1U)
+
+#define S_EN_MAX_ST    4
+#define V_EN_MAX_ST(x) ((x) << S_EN_MAX_ST)
+#define F_EN_MAX_ST    V_EN_MAX_ST(1U)
+
+#define S_EN_H1T_EQ    3
+#define V_EN_H1T_EQ(x) ((x) << S_EN_H1T_EQ)
+#define F_EN_H1T_EQ    V_EN_H1T_EQ(1U)
+
+#define S_H1TEQ_GOAL    0
+#define M_H1TEQ_GOAL    0x7U
+#define V_H1TEQ_GOAL(x) ((x) << S_H1TEQ_GOAL)
+#define G_H1TEQ_GOAL(x) (((x) >> S_H1TEQ_GOAL) & M_H1TEQ_GOAL)
+
+#define S_T6_INIT_METH    12
+#define M_T6_INIT_METH    0xfU
+#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
+#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
+
+#define S_INIT_CNT    8
+#define M_INIT_CNT    0xfU
+#define V_INIT_CNT(x) ((x) << S_INIT_CNT)
+#define G_INIT_CNT(x) (((x) >> S_INIT_CNT) & M_INIT_CNT)
+
+#define S_EN_AI_N0    5
+#define V_EN_AI_N0(x) ((x) << S_EN_AI_N0)
+#define F_EN_AI_N0    V_EN_AI_N0(1U)
+
+#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_0 0x2b04
+
+#define S_GAIN_TH    6
+#define M_GAIN_TH    0x1fU
+#define V_GAIN_TH(x) ((x) << S_GAIN_TH)
+#define G_GAIN_TH(x) (((x) >> S_GAIN_TH) & M_GAIN_TH)
+
+#define S_EN_SD_TH    5
+#define V_EN_SD_TH(x) ((x) << S_EN_SD_TH)
+#define F_EN_SD_TH    V_EN_SD_TH(1U)
+
+#define S_EN_AMIN_TH    4
+#define V_EN_AMIN_TH(x) ((x) << S_EN_AMIN_TH)
+#define F_EN_AMIN_TH    V_EN_AMIN_TH(1U)
+
+#define S_AMIN_TH    0
+#define M_AMIN_TH    0xfU
+#define V_AMIN_TH(x) ((x) << S_AMIN_TH)
+#define G_AMIN_TH(x) (((x) >> S_AMIN_TH) & M_AMIN_TH)
+
+#define S_FEC_CNV    15
+#define V_FEC_CNV(x) ((x) << S_FEC_CNV)
+#define F_FEC_CNV    V_FEC_CNV(1U)
+
+#define S_EN_RETRY    14
+#define V_EN_RETRY(x) ((x) << S_EN_RETRY)
+#define F_EN_RETRY    V_EN_RETRY(1U)
+
+#define S_DPC_METH    12
+#define M_DPC_METH    0x3U
+#define V_DPC_METH(x) ((x) << S_DPC_METH)
+#define G_DPC_METH(x) (((x) >> S_DPC_METH) & M_DPC_METH)
+
+#define S_EN_P2    11
+#define V_EN_P2(x) ((x) << S_EN_P2)
+#define F_EN_P2    V_EN_P2(1U)
+
+#define A_MAC_PORT_AET_ZFE_LIMITS_0 0x2b08
+
+#define S_ACC_LIM    8
+#define M_ACC_LIM    0xfU
+#define V_ACC_LIM(x) ((x) << S_ACC_LIM)
+#define G_ACC_LIM(x) (((x) >> S_ACC_LIM) & M_ACC_LIM)
+
+#define S_CNV_LIM    4
+#define M_CNV_LIM    0xfU
+#define V_CNV_LIM(x) ((x) << S_CNV_LIM)
+#define G_CNV_LIM(x) (((x) >> S_CNV_LIM) & M_CNV_LIM)
+
+#define S_TOG_LIM    0
+#define M_TOG_LIM    0xfU
+#define V_TOG_LIM(x) ((x) << S_TOG_LIM)
+#define G_TOG_LIM(x) (((x) >> S_TOG_LIM) & M_TOG_LIM)
+
+#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_0 0x2b0c
+
+#define S_BOOT_LUT7    12
+#define M_BOOT_LUT7    0xfU
+#define V_BOOT_LUT7(x) ((x) << S_BOOT_LUT7)
+#define G_BOOT_LUT7(x) (((x) >> S_BOOT_LUT7) & M_BOOT_LUT7)
+
+#define S_BOOT_LUT6    8
+#define M_BOOT_LUT6    0xfU
+#define V_BOOT_LUT6(x) ((x) << S_BOOT_LUT6)
+#define G_BOOT_LUT6(x) (((x) >> S_BOOT_LUT6) & M_BOOT_LUT6)
+
+#define S_BOOT_LUT45    4
+#define M_BOOT_LUT45    0xfU
+#define V_BOOT_LUT45(x) ((x) << S_BOOT_LUT45)
+#define G_BOOT_LUT45(x) (((x) >> S_BOOT_LUT45) & M_BOOT_LUT45)
+
+#define S_BOOT_LUT0123    2
+#define M_BOOT_LUT0123    0x3U
+#define V_BOOT_LUT0123(x) ((x) << S_BOOT_LUT0123)
+#define G_BOOT_LUT0123(x) (((x) >> S_BOOT_LUT0123) & M_BOOT_LUT0123)
+
+#define S_BOOT_DEC_C0    1
+#define V_BOOT_DEC_C0(x) ((x) << S_BOOT_DEC_C0)
+#define F_BOOT_DEC_C0    V_BOOT_DEC_C0(1U)
+
+#define S_BOOT_LUT5    8
+#define M_BOOT_LUT5    0xfU
+#define V_BOOT_LUT5(x) ((x) << S_BOOT_LUT5)
+#define G_BOOT_LUT5(x) (((x) >> S_BOOT_LUT5) & M_BOOT_LUT5)
+
+#define A_MAC_PORT_AET_STATUS_0 0x2b10
+
+#define S_AET_STAT    9
+#define M_AET_STAT    0xfU
+#define V_AET_STAT(x) ((x) << S_AET_STAT)
+#define G_AET_STAT(x) (((x) >> S_AET_STAT) & M_AET_STAT)
+
+#define S_NEU_STATE    5
+#define M_NEU_STATE    0xfU
+#define V_NEU_STATE(x) ((x) << S_NEU_STATE)
+#define G_NEU_STATE(x) (((x) >> S_NEU_STATE) & M_NEU_STATE)
+
+#define S_CTRL_STATE    0
+#define M_CTRL_STATE    0x1fU
+#define V_CTRL_STATE(x) ((x) << S_CTRL_STATE)
+#define G_CTRL_STATE(x) (((x) >> S_CTRL_STATE) & M_CTRL_STATE)
+
+#define S_CTRL_STAT    8
+#define M_CTRL_STAT    0x1fU
+#define V_CTRL_STAT(x) ((x) << S_CTRL_STAT)
+#define G_CTRL_STAT(x) (((x) >> S_CTRL_STAT) & M_CTRL_STAT)
+
+#define S_T6_NEU_STATE    4
+#define M_T6_NEU_STATE    0xfU
+#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
+#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
+
+#define S_T6_CTRL_STATE    0
+#define M_T6_CTRL_STATE    0xfU
+#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
+#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
+
+#define A_MAC_PORT_AET_STATUS_20 0x2b14
+
+#define S_FRAME_LOCK_CNT    0
+#define M_FRAME_LOCK_CNT    0x7U
+#define V_FRAME_LOCK_CNT(x) ((x) << S_FRAME_LOCK_CNT)
+#define G_FRAME_LOCK_CNT(x) (((x) >> S_FRAME_LOCK_CNT) & M_FRAME_LOCK_CNT)
+
+#define A_MAC_PORT_AET_LIMITS0 0x2b18
+
+#define S_DPC_TIME_LIM    0
+#define M_DPC_TIME_LIM    0x3U
+#define V_DPC_TIME_LIM(x) ((x) << S_DPC_TIME_LIM)
+#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM)
+
+#define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20
+
+#define S_T6_INIT_METH    12
+#define M_T6_INIT_METH    0xfU
+#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
+#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
+
+#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24
+#define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28
+#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c
+#define A_MAC_PORT_AET_STATUS_1 0x2b30
+
+#define S_T6_NEU_STATE    4
+#define M_T6_NEU_STATE    0xfU
+#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
+#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
+
+#define S_T6_CTRL_STATE    0
+#define M_T6_CTRL_STATE    0xfU
+#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
+#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
+
+#define A_MAC_PORT_AET_STATUS_21 0x2b34
+#define A_MAC_PORT_AET_LIMITS1 0x2b38
+#define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40
+
+#define S_T6_INIT_METH    12
+#define M_T6_INIT_METH    0xfU
+#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
+#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
+
+#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44
+#define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48
+#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c
+#define A_MAC_PORT_AET_STATUS_2 0x2b50
+
+#define S_T6_NEU_STATE    4
+#define M_T6_NEU_STATE    0xfU
+#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
+#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
+
+#define S_T6_CTRL_STATE    0
+#define M_T6_CTRL_STATE    0xfU
+#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
+#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
+
+#define A_MAC_PORT_AET_STATUS_22 0x2b54
+#define A_MAC_PORT_AET_LIMITS2 0x2b58
+#define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60
+
+#define S_T6_INIT_METH    12
+#define M_T6_INIT_METH    0xfU
+#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
+#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
+
+#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64
+#define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68
+#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c
+#define A_MAC_PORT_AET_STATUS_3 0x2b70
+
+#define S_T6_NEU_STATE    4
+#define M_T6_NEU_STATE    0xfU
+#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
+#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
+
+#define S_T6_CTRL_STATE    0
+#define M_T6_CTRL_STATE    0xfU
+#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
+#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
+
+#define A_MAC_PORT_AET_STATUS_23 0x2b74
+#define A_MAC_PORT_AET_LIMITS3 0x2b78
+#define A_T6_MAC_PORT_BEAN_CTL 0x2c00
+#define A_T6_MAC_PORT_BEAN_STATUS 0x2c04
+#define A_T6_MAC_PORT_BEAN_ABILITY_0 0x2c08
+
+#define S_BEAN_REM_FAULT    13
+#define V_BEAN_REM_FAULT(x) ((x) << S_BEAN_REM_FAULT)
+#define F_BEAN_REM_FAULT    V_BEAN_REM_FAULT(1U)
+
+#define A_T6_MAC_PORT_BEAN_ABILITY_1 0x2c0c
+#define A_T6_MAC_PORT_BEAN_ABILITY_2 0x2c10
+#define A_T6_MAC_PORT_BEAN_REM_ABILITY_0 0x2c14
+
+#define S_BEAN_ABL_REM_FAULT    13
+#define V_BEAN_ABL_REM_FAULT(x) ((x) << S_BEAN_ABL_REM_FAULT)
+#define F_BEAN_ABL_REM_FAULT    V_BEAN_ABL_REM_FAULT(1U)
+
+#define A_T6_MAC_PORT_BEAN_REM_ABILITY_1 0x2c18
+#define A_T6_MAC_PORT_BEAN_REM_ABILITY_2 0x2c1c
+#define A_T6_MAC_PORT_BEAN_MS_COUNT 0x2c20
+#define A_T6_MAC_PORT_BEAN_XNP_0 0x2c24
+#define A_T6_MAC_PORT_BEAN_XNP_1 0x2c28
+#define A_T6_MAC_PORT_BEAN_XNP_2 0x2c2c
+#define A_T6_MAC_PORT_LP_BEAN_XNP_0 0x2c30
+#define A_T6_MAC_PORT_LP_BEAN_XNP_1 0x2c34
+#define A_T6_MAC_PORT_LP_BEAN_XNP_2 0x2c38
+#define A_T6_MAC_PORT_BEAN_ETH_STATUS 0x2c3c
+
+#define S_100GCR4    11
+#define V_100GCR4(x) ((x) << S_100GCR4)
+#define F_100GCR4    V_100GCR4(1U)
+
+#define S_100GKR4    10
+#define V_100GKR4(x) ((x) << S_100GKR4)
+#define F_100GKR4    V_100GKR4(1U)
+
+#define S_100GKP4    9
+#define V_100GKP4(x) ((x) << S_100GKP4)
+#define F_100GKP4    V_100GKP4(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_CONFIGURATION_MODE 0x3000
+
+#define S_T5_TX_LINKEN    15
+#define V_T5_TX_LINKEN(x) ((x) << S_T5_TX_LINKEN)
+#define F_T5_TX_LINKEN    V_T5_TX_LINKEN(1U)
+
+#define S_T5_TX_LINKRST    14
+#define V_T5_TX_LINKRST(x) ((x) << S_T5_TX_LINKRST)
+#define F_T5_TX_LINKRST    V_T5_TX_LINKRST(1U)
+
+#define S_T5_TX_CFGWRT    13
+#define V_T5_TX_CFGWRT(x) ((x) << S_T5_TX_CFGWRT)
+#define F_T5_TX_CFGWRT    V_T5_TX_CFGWRT(1U)
+
+#define S_T5_TX_CFGPTR    11
+#define M_T5_TX_CFGPTR    0x3U
+#define V_T5_TX_CFGPTR(x) ((x) << S_T5_TX_CFGPTR)
+#define G_T5_TX_CFGPTR(x) (((x) >> S_T5_TX_CFGPTR) & M_T5_TX_CFGPTR)
+
+#define S_T5_TX_CFGEXT    10
+#define V_T5_TX_CFGEXT(x) ((x) << S_T5_TX_CFGEXT)
+#define F_T5_TX_CFGEXT    V_T5_TX_CFGEXT(1U)
+
+#define S_T5_TX_CFGACT    9
+#define V_T5_TX_CFGACT(x) ((x) << S_T5_TX_CFGACT)
+#define F_T5_TX_CFGACT    V_T5_TX_CFGACT(1U)
+
+#define S_T5_TX_RSYNCC    8
+#define V_T5_TX_RSYNCC(x) ((x) << S_T5_TX_RSYNCC)
+#define F_T5_TX_RSYNCC    V_T5_TX_RSYNCC(1U)
+
+#define S_T5_TX_PLLSEL    6
+#define M_T5_TX_PLLSEL    0x3U
+#define V_T5_TX_PLLSEL(x) ((x) << S_T5_TX_PLLSEL)
+#define G_T5_TX_PLLSEL(x) (((x) >> S_T5_TX_PLLSEL) & M_T5_TX_PLLSEL)
+
+#define S_T5_TX_EXTC16    5
+#define V_T5_TX_EXTC16(x) ((x) << S_T5_TX_EXTC16)
+#define F_T5_TX_EXTC16    V_T5_TX_EXTC16(1U)
+
+#define S_T5_TX_DCKSEL    4
+#define V_T5_TX_DCKSEL(x) ((x) << S_T5_TX_DCKSEL)
+#define F_T5_TX_DCKSEL    V_T5_TX_DCKSEL(1U)
+
+#define S_T5_TX_RXLOOP    3
+#define V_T5_TX_RXLOOP(x) ((x) << S_T5_TX_RXLOOP)
+#define F_T5_TX_RXLOOP    V_T5_TX_RXLOOP(1U)
+
+#define S_T5_TX_BWSEL    2
+#define V_T5_TX_BWSEL(x) ((x) << S_T5_TX_BWSEL)
+#define F_T5_TX_BWSEL    V_T5_TX_BWSEL(1U)
+
+#define S_T5_TX_RTSEL    0
+#define M_T5_TX_RTSEL    0x3U
+#define V_T5_TX_RTSEL(x) ((x) << S_T5_TX_RTSEL)
+#define G_T5_TX_RTSEL(x) (((x) >> S_T5_TX_RTSEL) & M_T5_TX_RTSEL)
+
+#define S_T6_T5_TX_RXLOOP    5
+#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
+#define F_T6_T5_TX_RXLOOP    V_T6_T5_TX_RXLOOP(1U)
+
+#define S_T5_TX_ENFFE4    4
+#define V_T5_TX_ENFFE4(x) ((x) << S_T5_TX_ENFFE4)
+#define F_T5_TX_ENFFE4    V_T5_TX_ENFFE4(1U)
+
+#define S_T6_T5_TX_BWSEL    2
+#define M_T6_T5_TX_BWSEL    0x3U
+#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
+#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TEST_CONTROL 0x3004
+
+#define S_SPSEL    11
+#define M_SPSEL    0x7U
+#define V_SPSEL(x) ((x) << S_SPSEL)
+#define G_SPSEL(x) (((x) >> S_SPSEL) & M_SPSEL)
+
+#define S_AFDWEN    7
+#define V_AFDWEN(x) ((x) << S_AFDWEN)
+#define F_AFDWEN    V_AFDWEN(1U)
+
+#define S_TPGMD    3
+#define V_TPGMD(x) ((x) << S_TPGMD)
+#define F_TPGMD    V_TPGMD(1U)
+
+#define S_TC_FRCERR    10
+#define V_TC_FRCERR(x) ((x) << S_TC_FRCERR)
+#define F_TC_FRCERR    V_TC_FRCERR(1U)
+
+#define S_T6_ERROR    9
+#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
+#define F_T6_ERROR    V_T6_ERROR(1U)
+
+#define S_SYNC    8
+#define V_SYNC(x) ((x) << S_SYNC)
+#define F_SYNC    V_SYNC(1U)
+
+#define S_P7CHK    5
+#define V_P7CHK(x) ((x) << S_P7CHK)
+#define F_P7CHK    V_P7CHK(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_COEFFICIENT_CONTROL 0x3008
+
+#define S_ZCALOVRD    8
+#define V_ZCALOVRD(x) ((x) << S_ZCALOVRD)
+#define F_ZCALOVRD    V_ZCALOVRD(1U)
+
+#define S_AMMODE    7
+#define V_AMMODE(x) ((x) << S_AMMODE)
+#define F_AMMODE    V_AMMODE(1U)
+
+#define S_AEPOL    6
+#define V_AEPOL(x) ((x) << S_AEPOL)
+#define F_AEPOL    V_AEPOL(1U)
+
+#define S_AESRC    5
+#define V_AESRC(x) ((x) << S_AESRC)
+#define F_AESRC    V_AESRC(1U)
+
+#define S_SASMODE    7
+#define V_SASMODE(x) ((x) << S_SASMODE)
+#define F_SASMODE    V_SASMODE(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DRIVER_MODE_CONTROL 0x300c
+
+#define S_T5DRVHIZ    5
+#define V_T5DRVHIZ(x) ((x) << S_T5DRVHIZ)
+#define F_T5DRVHIZ    V_T5DRVHIZ(1U)
+
+#define S_T5SASIMP    4
+#define V_T5SASIMP(x) ((x) << S_T5SASIMP)
+#define F_T5SASIMP    V_T5SASIMP(1U)
+
+#define S_T5SLEW    2
+#define M_T5SLEW    0x3U
+#define V_T5SLEW(x) ((x) << S_T5SLEW)
+#define G_T5SLEW(x) (((x) >> S_T5SLEW) & M_T5SLEW)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3010
+
+#define S_T5C2BUFDCEN    5
+#define V_T5C2BUFDCEN(x) ((x) << S_T5C2BUFDCEN)
+#define F_T5C2BUFDCEN    V_T5C2BUFDCEN(1U)
+
+#define S_T5DCCEN    4
+#define V_T5DCCEN(x) ((x) << S_T5DCCEN)
+#define F_T5DCCEN    V_T5DCCEN(1U)
+
+#define S_T5REGBYP    3
+#define V_T5REGBYP(x) ((x) << S_T5REGBYP)
+#define F_T5REGBYP    V_T5REGBYP(1U)
+
+#define S_T5REGAEN    2
+#define V_T5REGAEN(x) ((x) << S_T5REGAEN)
+#define F_T5REGAEN    V_T5REGAEN(1U)
+
+#define S_T5REGAMP    0
+#define M_T5REGAMP    0x3U
+#define V_T5REGAMP(x) ((x) << S_T5REGAMP)
+#define G_T5REGAMP(x) (((x) >> S_T5REGAMP) & M_T5REGAMP)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3014
+
+#define S_RSTEP    15
+#define V_RSTEP(x) ((x) << S_RSTEP)
+#define F_RSTEP    V_RSTEP(1U)
+
+#define S_RLOCK    14
+#define V_RLOCK(x) ((x) << S_RLOCK)
+#define F_RLOCK    V_RLOCK(1U)
+
+#define S_RPOS    8
+#define M_RPOS    0x3fU
+#define V_RPOS(x) ((x) << S_RPOS)
+#define G_RPOS(x) (((x) >> S_RPOS) & M_RPOS)
+
+#define S_DCLKSAM    7
+#define V_DCLKSAM(x) ((x) << S_DCLKSAM)
+#define F_DCLKSAM    V_DCLKSAM(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3018
+
+#define S_CALSSTN    3
+#define M_CALSSTN    0x7U
+#define V_CALSSTN(x) ((x) << S_CALSSTN)
+#define G_CALSSTN(x) (((x) >> S_CALSSTN) & M_CALSSTN)
+
+#define S_CALSSTP    0
+#define M_CALSSTP    0x7U
+#define V_CALSSTP(x) ((x) << S_CALSSTP)
+#define G_CALSSTP(x) (((x) >> S_CALSSTP) & M_CALSSTP)
+
+#define S_T6_CALSSTN    8
+#define M_T6_CALSSTN    0x3fU
+#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
+#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
+
+#define S_T6_CALSSTP    0
+#define M_T6_CALSSTP    0x3fU
+#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
+#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x301c
+
+#define S_DRTOL    0
+#define M_DRTOL    0x1fU
+#define V_DRTOL(x) ((x) << S_DRTOL)
+#define G_DRTOL(x) (((x) >> S_DRTOL) & M_DRTOL)
+
+#define S_T6_DRTOL    2
+#define M_T6_DRTOL    0x7U
+#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
+#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT 0x3020
+
+#define S_T5NXTT0    0
+#define M_T5NXTT0    0x1fU
+#define V_T5NXTT0(x) ((x) << S_T5NXTT0)
+#define G_T5NXTT0(x) (((x) >> S_T5NXTT0) & M_T5NXTT0)
+
+#define S_T6_NXTT0    0
+#define M_T6_NXTT0    0x3fU
+#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
+#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT 0x3024
+
+#define S_T5NXTT1    0
+#define M_T5NXTT1    0x3fU
+#define V_T5NXTT1(x) ((x) << S_T5NXTT1)
+#define G_T5NXTT1(x) (((x) >> S_T5NXTT1) & M_T5NXTT1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_2_COEFFICIENT 0x3028
+
+#define S_T5NXTT2    0
+#define M_T5NXTT2    0x3fU
+#define V_T5NXTT2(x) ((x) << S_T5NXTT2)
+#define G_T5NXTT2(x) (((x) >> S_T5NXTT2) & M_T5NXTT2)
+
+#define S_T6_NXTT2    0
+#define M_T6_NXTT2    0x3fU
+#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
+#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_3_COEFFICIENT 0x302c
+
+#define S_NXTT3    0
+#define M_NXTT3    0x3fU
+#define V_NXTT3(x) ((x) << S_NXTT3)
+#define G_NXTT3(x) (((x) >> S_NXTT3) & M_NXTT3)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AMPLITUDE 0x3030
+
+#define S_T5TXPWR    0
+#define M_T5TXPWR    0x3fU
+#define V_T5TXPWR(x) ((x) << S_T5TXPWR)
+#define G_T5TXPWR(x) (((x) >> S_T5TXPWR) & M_T5TXPWR)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_POLARITY 0x3034
+
+#define S_NXTPOL    0
+#define M_NXTPOL    0x7U
+#define V_NXTPOL(x) ((x) << S_NXTPOL)
+#define G_NXTPOL(x) (((x) >> S_NXTPOL) & M_NXTPOL)
+
+#define S_T6_NXTPOL    0
+#define M_T6_NXTPOL    0xfU
+#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
+#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3038
+
+#define S_CPREST    13
+#define V_CPREST(x) ((x) << S_CPREST)
+#define F_CPREST    V_CPREST(1U)
+
+#define S_CINIT    12
+#define V_CINIT(x) ((x) << S_CINIT)
+#define F_CINIT    V_CINIT(1U)
+
+#define S_SASCMD    10
+#define M_SASCMD    0x3U
+#define V_SASCMD(x) ((x) << S_SASCMD)
+#define G_SASCMD(x) (((x) >> S_SASCMD) & M_SASCMD)
+
+#define S_T6_C0UPDT    6
+#define M_T6_C0UPDT    0x3U
+#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
+#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
+
+#define S_C3UPDT    4
+#define M_C3UPDT    0x3U
+#define V_C3UPDT(x) ((x) << S_C3UPDT)
+#define G_C3UPDT(x) (((x) >> S_C3UPDT) & M_C3UPDT)
+
+#define S_T6_C2UPDT    2
+#define M_T6_C2UPDT    0x3U
+#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
+#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
+
+#define S_T6_C1UPDT    0
+#define M_T6_C1UPDT    0x3U
+#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
+#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x303c
+
+#define S_T6_C0STAT    6
+#define M_T6_C0STAT    0x3U
+#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
+#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
+
+#define S_C3STAT    4
+#define M_C3STAT    0x3U
+#define V_C3STAT(x) ((x) << S_C3STAT)
+#define G_C3STAT(x) (((x) >> S_C3STAT) & M_C3STAT)
+
+#define S_T6_C2STAT    2
+#define M_T6_C2STAT    0x3U
+#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
+#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
+
+#define S_T6_C1STAT    0
+#define M_T6_C1STAT    0x3U
+#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
+#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3040
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3040
+
+#define S_AETAP0    0
+#define M_AETAP0    0x7fU
+#define V_AETAP0(x) ((x) << S_AETAP0)
+#define G_AETAP0(x) (((x) >> S_AETAP0) & M_AETAP0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3044
+
+#define S_T5NIDAC1    0
+#define M_T5NIDAC1    0x3fU
+#define V_T5NIDAC1(x) ((x) << S_T5NIDAC1)
+#define G_T5NIDAC1(x) (((x) >> S_T5NIDAC1) & M_T5NIDAC1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3044
+
+#define S_AETAP1    0
+#define M_AETAP1    0x7fU
+#define V_AETAP1(x) ((x) << S_AETAP1)
+#define G_AETAP1(x) (((x) >> S_AETAP1) & M_AETAP1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3048
+
+#define S_T5NIDAC2    0
+#define M_T5NIDAC2    0x3fU
+#define V_T5NIDAC2(x) ((x) << S_T5NIDAC2)
+#define G_T5NIDAC2(x) (((x) >> S_T5NIDAC2) & M_T5NIDAC2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3048
+
+#define S_AETAP2    0
+#define M_AETAP2    0x7fU
+#define V_AETAP2(x) ((x) << S_AETAP2)
+#define G_AETAP2(x) (((x) >> S_AETAP2) & M_AETAP2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x304c
+
+#define S_AETAP3    0
+#define M_AETAP3    0x7fU
+#define V_AETAP3(x) ((x) << S_AETAP3)
+#define G_AETAP3(x) (((x) >> S_AETAP3) & M_AETAP3)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_APPLIED_TUNE_REGISTER 0x3050
+
+#define S_ATUNEN    8
+#define M_ATUNEN    0xffU
+#define V_ATUNEN(x) ((x) << S_ATUNEN)
+#define G_ATUNEN(x) (((x) >> S_ATUNEN) & M_ATUNEN)
+
+#define S_ATUNEP    0
+#define M_ATUNEP    0xffU
+#define V_ATUNEP(x) ((x) << S_ATUNEP)
+#define G_ATUNEP(x) (((x) >> S_ATUNEP) & M_ATUNEP)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3058
+
+#define S_DCCCOMPINV    8
+#define V_DCCCOMPINV(x) ((x) << S_DCCCOMPINV)
+#define F_DCCCOMPINV    V_DCCCOMPINV(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3060
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_4X_SEGMENT_APPLIED 0x3060
+
+#define S_AS4X7    14
+#define M_AS4X7    0x3U
+#define V_AS4X7(x) ((x) << S_AS4X7)
+#define G_AS4X7(x) (((x) >> S_AS4X7) & M_AS4X7)
+
+#define S_AS4X6    12
+#define M_AS4X6    0x3U
+#define V_AS4X6(x) ((x) << S_AS4X6)
+#define G_AS4X6(x) (((x) >> S_AS4X6) & M_AS4X6)
+
+#define S_AS4X5    10
+#define M_AS4X5    0x3U
+#define V_AS4X5(x) ((x) << S_AS4X5)
+#define G_AS4X5(x) (((x) >> S_AS4X5) & M_AS4X5)
+
+#define S_AS4X4    8
+#define M_AS4X4    0x3U
+#define V_AS4X4(x) ((x) << S_AS4X4)
+#define G_AS4X4(x) (((x) >> S_AS4X4) & M_AS4X4)
+
+#define S_AS4X3    6
+#define M_AS4X3    0x3U
+#define V_AS4X3(x) ((x) << S_AS4X3)
+#define G_AS4X3(x) (((x) >> S_AS4X3) & M_AS4X3)
+
+#define S_AS4X2    4
+#define M_AS4X2    0x3U
+#define V_AS4X2(x) ((x) << S_AS4X2)
+#define G_AS4X2(x) (((x) >> S_AS4X2) & M_AS4X2)
+
+#define S_AS4X1    2
+#define M_AS4X1    0x3U
+#define V_AS4X1(x) ((x) << S_AS4X1)
+#define G_AS4X1(x) (((x) >> S_AS4X1) & M_AS4X1)
+
+#define S_AS4X0    0
+#define M_AS4X0    0x3U
+#define V_AS4X0(x) ((x) << S_AS4X0)
+#define G_AS4X0(x) (((x) >> S_AS4X0) & M_AS4X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3064
+
+#define S_T5AIDAC1    0
+#define M_T5AIDAC1    0x3fU
+#define V_T5AIDAC1(x) ((x) << S_T5AIDAC1)
+#define G_T5AIDAC1(x) (((x) >> S_T5AIDAC1) & M_T5AIDAC1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_2X_SEGMENT_APPLIED 0x3064
+
+#define S_AS2X3    6
+#define M_AS2X3    0x3U
+#define V_AS2X3(x) ((x) << S_AS2X3)
+#define G_AS2X3(x) (((x) >> S_AS2X3) & M_AS2X3)
+
+#define S_AS2X2    4
+#define M_AS2X2    0x3U
+#define V_AS2X2(x) ((x) << S_AS2X2)
+#define G_AS2X2(x) (((x) >> S_AS2X2) & M_AS2X2)
+
+#define S_AS2X1    2
+#define M_AS2X1    0x3U
+#define V_AS2X1(x) ((x) << S_AS2X1)
+#define G_AS2X1(x) (((x) >> S_AS2X1) & M_AS2X1)
+
+#define S_AS2X0    0
+#define M_AS2X0    0x3U
+#define V_AS2X0(x) ((x) << S_AS2X0)
+#define G_AS2X0(x) (((x) >> S_AS2X0) & M_AS2X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3068
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_1X_SEGMENT_APPLIED 0x3068
+
+#define S_AS1X7    14
+#define M_AS1X7    0x3U
+#define V_AS1X7(x) ((x) << S_AS1X7)
+#define G_AS1X7(x) (((x) >> S_AS1X7) & M_AS1X7)
+
+#define S_AS1X6    12
+#define M_AS1X6    0x3U
+#define V_AS1X6(x) ((x) << S_AS1X6)
+#define G_AS1X6(x) (((x) >> S_AS1X6) & M_AS1X6)
+
+#define S_AS1X5    10
+#define M_AS1X5    0x3U
+#define V_AS1X5(x) ((x) << S_AS1X5)
+#define G_AS1X5(x) (((x) >> S_AS1X5) & M_AS1X5)
+
+#define S_AS1X4    8
+#define M_AS1X4    0x3U
+#define V_AS1X4(x) ((x) << S_AS1X4)
+#define G_AS1X4(x) (((x) >> S_AS1X4) & M_AS1X4)
+
+#define S_AS1X3    6
+#define M_AS1X3    0x3U
+#define V_AS1X3(x) ((x) << S_AS1X3)
+#define G_AS1X3(x) (((x) >> S_AS1X3) & M_AS1X3)
+
+#define S_AS1X2    4
+#define M_AS1X2    0x3U
+#define V_AS1X2(x) ((x) << S_AS1X2)
+#define G_AS1X2(x) (((x) >> S_AS1X2) & M_AS1X2)
+
+#define S_AS1X1    2
+#define M_AS1X1    0x3U
+#define V_AS1X1(x) ((x) << S_AS1X1)
+#define G_AS1X1(x) (((x) >> S_AS1X1) & M_AS1X1)
+
+#define S_AS1X0    0
+#define M_AS1X0    0x3U
+#define V_AS1X0(x) ((x) << S_AS1X0)
+#define G_AS1X0(x) (((x) >> S_AS1X0) & M_AS1X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x306c
+
+#define S_AT4X    0
+#define M_AT4X    0xffU
+#define V_AT4X(x) ((x) << S_AT4X)
+#define G_AT4X(x) (((x) >> S_AT4X) & M_AT4X)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3070
+
+#define S_MAINSC    6
+#define M_MAINSC    0x3fU
+#define V_MAINSC(x) ((x) << S_MAINSC)
+#define G_MAINSC(x) (((x) >> S_MAINSC) & M_MAINSC)
+
+#define S_POSTSC    0
+#define M_POSTSC    0x3fU
+#define V_POSTSC(x) ((x) << S_POSTSC)
+#define G_POSTSC(x) (((x) >> S_POSTSC) & M_POSTSC)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3070
+
+#define S_AT2X    8
+#define M_AT2X    0xfU
+#define V_AT2X(x) ((x) << S_AT2X)
+#define G_AT2X(x) (((x) >> S_AT2X) & M_AT2X)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3074
+
+#define S_PRESC    0
+#define M_PRESC    0x1fU
+#define V_PRESC(x) ((x) << S_PRESC)
+#define G_PRESC(x) (((x) >> S_PRESC) & M_PRESC)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3074
+
+#define S_ATSIGN    0
+#define M_ATSIGN    0xfU
+#define V_ATSIGN(x) ((x) << S_ATSIGN)
+#define G_ATSIGN(x) (((x) >> S_ATSIGN) & M_ATSIGN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3078
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x307c
+
+#define S_T5XADDR    1
+#define M_T5XADDR    0x1fU
+#define V_T5XADDR(x) ((x) << S_T5XADDR)
+#define G_T5XADDR(x) (((x) >> S_T5XADDR) & M_T5XADDR)
+
+#define S_T5XWR    0
+#define V_T5XWR(x) ((x) << S_T5XWR)
+#define F_T5XWR    V_T5XWR(1U)
+
+#define S_T6_XADDR    1
+#define M_T6_XADDR    0x1fU
+#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
+#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3080
+
+#define S_XDAT10    0
+#define M_XDAT10    0xffffU
+#define V_XDAT10(x) ((x) << S_XDAT10)
+#define G_XDAT10(x) (((x) >> S_XDAT10) & M_XDAT10)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3084
+
+#define S_XDAT32    0
+#define M_XDAT32    0xffffU
+#define V_XDAT32(x) ((x) << S_XDAT32)
+#define G_XDAT32(x) (((x) >> S_XDAT32) & M_XDAT32)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3088
+
+#define S_XDAT4    0
+#define M_XDAT4    0xffU
+#define V_XDAT4(x) ((x) << S_XDAT4)
+#define G_XDAT4(x) (((x) >> S_XDAT4) & M_XDAT4)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3088
+
+#define S_XDAT54    0
+#define M_XDAT54    0xffffU
+#define V_XDAT54(x) ((x) << S_XDAT54)
+#define G_XDAT54(x) (((x) >> S_XDAT54) & M_XDAT54)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_CONTROL 0x308c
+
+#define S_DCCTIMEDOUT    15
+#define V_DCCTIMEDOUT(x) ((x) << S_DCCTIMEDOUT)
+#define F_DCCTIMEDOUT    V_DCCTIMEDOUT(1U)
+
+#define S_DCCTIMEEN    14
+#define V_DCCTIMEEN(x) ((x) << S_DCCTIMEEN)
+#define F_DCCTIMEEN    V_DCCTIMEEN(1U)
+
+#define S_DCCLOCK    13
+#define V_DCCLOCK(x) ((x) << S_DCCLOCK)
+#define F_DCCLOCK    V_DCCLOCK(1U)
+
+#define S_DCCOFFSET    8
+#define M_DCCOFFSET    0x1fU
+#define V_DCCOFFSET(x) ((x) << S_DCCOFFSET)
+#define G_DCCOFFSET(x) (((x) >> S_DCCOFFSET) & M_DCCOFFSET)
+
+#define S_DCCSTEP    6
+#define M_DCCSTEP    0x3U
+#define V_DCCSTEP(x) ((x) << S_DCCSTEP)
+#define G_DCCSTEP(x) (((x) >> S_DCCSTEP) & M_DCCSTEP)
+
+#define S_DCCASTEP    1
+#define M_DCCASTEP    0x1fU
+#define V_DCCASTEP(x) ((x) << S_DCCASTEP)
+#define G_DCCASTEP(x) (((x) >> S_DCCASTEP) & M_DCCASTEP)
+
+#define S_DCCAEN    0
+#define V_DCCAEN(x) ((x) << S_DCCAEN)
+#define F_DCCAEN    V_DCCAEN(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x308c
+
+#define S_XDAT76    0
+#define M_XDAT76    0xffffU
+#define V_XDAT76(x) ((x) << S_XDAT76)
+#define G_XDAT76(x) (((x) >> S_XDAT76) & M_XDAT76)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_OVERRIDE 0x3090
+
+#define S_DCCOUT    12
+#define V_DCCOUT(x) ((x) << S_DCCOUT)
+#define F_DCCOUT    V_DCCOUT(1U)
+
+#define S_DCCCLK    11
+#define V_DCCCLK(x) ((x) << S_DCCCLK)
+#define F_DCCCLK    V_DCCCLK(1U)
+
+#define S_DCCHOLD    10
+#define V_DCCHOLD(x) ((x) << S_DCCHOLD)
+#define F_DCCHOLD    V_DCCHOLD(1U)
+
+#define S_DCCSIGN    8
+#define M_DCCSIGN    0x3U
+#define V_DCCSIGN(x) ((x) << S_DCCSIGN)
+#define G_DCCSIGN(x) (((x) >> S_DCCSIGN) & M_DCCSIGN)
+
+#define S_DCCAMP    1
+#define M_DCCAMP    0x7fU
+#define V_DCCAMP(x) ((x) << S_DCCAMP)
+#define G_DCCAMP(x) (((x) >> S_DCCAMP) & M_DCCAMP)
+
+#define S_DCCOEN    0
+#define V_DCCOEN(x) ((x) << S_DCCOEN)
+#define F_DCCOEN    V_DCCOEN(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_APPLIED 0x3094
+
+#define S_DCCASIGN    7
+#define M_DCCASIGN    0x3U
+#define V_DCCASIGN(x) ((x) << S_DCCASIGN)
+#define G_DCCASIGN(x) (((x) >> S_DCCASIGN) & M_DCCASIGN)
+
+#define S_DCCAAMP    0
+#define M_DCCAAMP    0x7fU
+#define V_DCCAAMP(x) ((x) << S_DCCAAMP)
+#define G_DCCAAMP(x) (((x) >> S_DCCAAMP) & M_DCCAAMP)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_TIME_OUT 0x3098
+
+#define S_DCCTIMEOUTVAL    0
+#define M_DCCTIMEOUTVAL    0xffffU
+#define V_DCCTIMEOUTVAL(x) ((x) << S_DCCTIMEOUTVAL)
+#define G_DCCTIMEOUTVAL(x) (((x) >> S_DCCTIMEOUTVAL) & M_DCCTIMEOUTVAL)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AZ_CONTROL 0x309c
+
+#define S_LPIDCLK    4
+#define V_LPIDCLK(x) ((x) << S_LPIDCLK)
+#define F_LPIDCLK    V_LPIDCLK(1U)
+
+#define S_LPITERM    2
+#define M_LPITERM    0x3U
+#define V_LPITERM(x) ((x) << S_LPITERM)
+#define G_LPITERM(x) (((x) >> S_LPITERM) & M_LPITERM)
+
+#define S_LPIPRCD    0
+#define M_LPIPRCD    0x3U
+#define V_LPIPRCD(x) ((x) << S_LPIPRCD)
+#define G_LPIPRCD(x) (((x) >> S_LPIPRCD) & M_LPIPRCD)
+
+#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_CONTROL 0x30a0
+
+#define S_T6_DCCTIMEEN    13
+#define M_T6_DCCTIMEEN    0x3U
+#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
+#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
+
+#define S_T6_DCCLOCK    11
+#define M_T6_DCCLOCK    0x3U
+#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
+#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
+
+#define S_T6_DCCOFFSET    8
+#define M_T6_DCCOFFSET    0x7U
+#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
+#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
+
+#define S_TX_LINKA_DCCSTEP_CTL    6
+#define M_TX_LINKA_DCCSTEP_CTL    0x3U
+#define V_TX_LINKA_DCCSTEP_CTL(x) ((x) << S_TX_LINKA_DCCSTEP_CTL)
+#define G_TX_LINKA_DCCSTEP_CTL(x) (((x) >> S_TX_LINKA_DCCSTEP_CTL) & M_TX_LINKA_DCCSTEP_CTL)
+
+#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_OVERRIDE 0x30a4
+#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_APPLIED 0x30a8
+#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_TIME_OUT 0x30ac
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SIGN_OVERRIDE 0x30c0
+
+#define S_OSIGN    0
+#define M_OSIGN    0xfU
+#define V_OSIGN(x) ((x) << S_OSIGN)
+#define G_OSIGN(x) (((x) >> S_OSIGN) & M_OSIGN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_4X_OVERRIDE 0x30c8
+
+#define S_OS4X7    14
+#define M_OS4X7    0x3U
+#define V_OS4X7(x) ((x) << S_OS4X7)
+#define G_OS4X7(x) (((x) >> S_OS4X7) & M_OS4X7)
+
+#define S_OS4X6    12
+#define M_OS4X6    0x3U
+#define V_OS4X6(x) ((x) << S_OS4X6)
+#define G_OS4X6(x) (((x) >> S_OS4X6) & M_OS4X6)
+
+#define S_OS4X5    10
+#define M_OS4X5    0x3U
+#define V_OS4X5(x) ((x) << S_OS4X5)
+#define G_OS4X5(x) (((x) >> S_OS4X5) & M_OS4X5)
+
+#define S_OS4X4    8
+#define M_OS4X4    0x3U
+#define V_OS4X4(x) ((x) << S_OS4X4)
+#define G_OS4X4(x) (((x) >> S_OS4X4) & M_OS4X4)
+
+#define S_OS4X3    6
+#define M_OS4X3    0x3U
+#define V_OS4X3(x) ((x) << S_OS4X3)
+#define G_OS4X3(x) (((x) >> S_OS4X3) & M_OS4X3)
+
+#define S_OS4X2    4
+#define M_OS4X2    0x3U
+#define V_OS4X2(x) ((x) << S_OS4X2)
+#define G_OS4X2(x) (((x) >> S_OS4X2) & M_OS4X2)
+
+#define S_OS4X1    2
+#define M_OS4X1    0x3U
+#define V_OS4X1(x) ((x) << S_OS4X1)
+#define G_OS4X1(x) (((x) >> S_OS4X1) & M_OS4X1)
+
+#define S_OS4X0    0
+#define M_OS4X0    0x3U
+#define V_OS4X0(x) ((x) << S_OS4X0)
+#define G_OS4X0(x) (((x) >> S_OS4X0) & M_OS4X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_2X_OVERRIDE 0x30cc
+
+#define S_OS2X3    6
+#define M_OS2X3    0x3U
+#define V_OS2X3(x) ((x) << S_OS2X3)
+#define G_OS2X3(x) (((x) >> S_OS2X3) & M_OS2X3)
+
+#define S_OS2X2    4
+#define M_OS2X2    0x3U
+#define V_OS2X2(x) ((x) << S_OS2X2)
+#define G_OS2X2(x) (((x) >> S_OS2X2) & M_OS2X2)
+
+#define S_OS2X1    2
+#define M_OS2X1    0x3U
+#define V_OS2X1(x) ((x) << S_OS2X1)
+#define G_OS2X1(x) (((x) >> S_OS2X1) & M_OS2X1)
+
+#define S_OS2X0    0
+#define M_OS2X0    0x3U
+#define V_OS2X0(x) ((x) << S_OS2X0)
+#define G_OS2X0(x) (((x) >> S_OS2X0) & M_OS2X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_1X_OVERRIDE 0x30d0
+
+#define S_OS1X7    14
+#define M_OS1X7    0x3U
+#define V_OS1X7(x) ((x) << S_OS1X7)
+#define G_OS1X7(x) (((x) >> S_OS1X7) & M_OS1X7)
+
+#define S_OS1X6    12
+#define M_OS1X6    0x3U
+#define V_OS1X6(x) ((x) << S_OS1X6)
+#define G_OS1X6(x) (((x) >> S_OS1X6) & M_OS1X6)
+
+#define S_OS1X5    10
+#define M_OS1X5    0x3U
+#define V_OS1X5(x) ((x) << S_OS1X5)
+#define G_OS1X5(x) (((x) >> S_OS1X5) & M_OS1X5)
+
+#define S_OS1X4    8
+#define M_OS1X4    0x3U
+#define V_OS1X4(x) ((x) << S_OS1X4)
+#define G_OS1X4(x) (((x) >> S_OS1X4) & M_OS1X4)
+
+#define S_OS1X3    6
+#define M_OS1X3    0x3U
+#define V_OS1X3(x) ((x) << S_OS1X3)
+#define G_OS1X3(x) (((x) >> S_OS1X3) & M_OS1X3)
+
+#define S_OS1X2    4
+#define M_OS1X2    0x3U
+#define V_OS1X2(x) ((x) << S_OS1X2)
+#define G_OS1X2(x) (((x) >> S_OS1X2) & M_OS1X2)
+
+#define S_OS1X1    2
+#define M_OS1X1    0x3U
+#define V_OS1X1(x) ((x) << S_OS1X1)
+#define G_OS1X1(x) (((x) >> S_OS1X1) & M_OS1X1)
+
+#define S_OS1X0    0
+#define M_OS1X0    0x3U
+#define V_OS1X0(x) ((x) << S_OS1X0)
+#define G_OS1X0(x) (((x) >> S_OS1X0) & M_OS1X0)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x30d8
+
+#define S_OT4X    0
+#define M_OT4X    0xffU
+#define V_OT4X(x) ((x) << S_OT4X)
+#define G_OT4X(x) (((x) >> S_OT4X) & M_OT4X)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x30dc
+
+#define S_OT2X    0
+#define M_OT2X    0xfU
+#define V_OT2X(x) ((x) << S_OT2X)
+#define G_OT2X(x) (((x) >> S_OT2X) & M_OT2X)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x30e0
+
+#define S_OT1X    0
+#define M_OT1X    0xffU
+#define V_OT1X(x) ((x) << S_OT1X)
+#define G_OT1X(x) (((x) >> S_OT1X) & M_OT1X)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_5 0x30ec
+
+#define S_ERRORP    15
+#define V_ERRORP(x) ((x) << S_ERRORP)
+#define F_ERRORP    V_ERRORP(1U)
+
+#define S_ERRORN    14
+#define V_ERRORN(x) ((x) << S_ERRORN)
+#define F_ERRORN    V_ERRORN(1U)
+
+#define S_TESTENA    13
+#define V_TESTENA(x) ((x) << S_TESTENA)
+#define F_TESTENA    V_TESTENA(1U)
+
+#define S_TUNEBIT    10
+#define M_TUNEBIT    0x7U
+#define V_TUNEBIT(x) ((x) << S_TUNEBIT)
+#define G_TUNEBIT(x) (((x) >> S_TUNEBIT) & M_TUNEBIT)
+
+#define S_DATAPOS    8
+#define M_DATAPOS    0x3U
+#define V_DATAPOS(x) ((x) << S_DATAPOS)
+#define G_DATAPOS(x) (((x) >> S_DATAPOS) & M_DATAPOS)
+
+#define S_SEGSEL    3
+#define M_SEGSEL    0x1fU
+#define V_SEGSEL(x) ((x) << S_SEGSEL)
+#define G_SEGSEL(x) (((x) >> S_SEGSEL) & M_SEGSEL)
+
+#define S_TAPSEL    1
+#define M_TAPSEL    0x3U
+#define V_TAPSEL(x) ((x) << S_TAPSEL)
+#define G_TAPSEL(x) (((x) >> S_TAPSEL) & M_TAPSEL)
+
+#define S_DATASIGN    0
+#define V_DATASIGN(x) ((x) << S_DATASIGN)
+#define F_DATASIGN    V_DATASIGN(1U)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_4 0x30f0
+
+#define S_SDOVRDEN    8
+#define V_SDOVRDEN(x) ((x) << S_SDOVRDEN)
+#define F_SDOVRDEN    V_SDOVRDEN(1U)
+
+#define S_SDOVRD    0
+#define M_SDOVRD    0xffU
+#define V_SDOVRD(x) ((x) << S_SDOVRD)
+#define G_SDOVRD(x) (((x) >> S_SDOVRD) & M_SDOVRD)
+
+#define S_T6_SDOVRD    0
+#define M_T6_SDOVRD    0xffffU
+#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
+#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_3 0x30f4
+
+#define S_SLEWCODE    1
+#define M_SLEWCODE    0x3U
+#define V_SLEWCODE(x) ((x) << S_SLEWCODE)
+#define G_SLEWCODE(x) (((x) >> S_SLEWCODE) & M_SLEWCODE)
+
+#define S_ASEGEN    0
+#define V_ASEGEN(x) ((x) << S_ASEGEN)
+#define F_ASEGEN    V_ASEGEN(1U)
+
+#define S_WCNT    0
+#define M_WCNT    0x3ffU
+#define V_WCNT(x) ((x) << S_WCNT)
+#define G_WCNT(x) (((x) >> S_WCNT) & M_WCNT)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_2 0x30f8
+
+#define S_AECMDVAL    14
+#define V_AECMDVAL(x) ((x) << S_AECMDVAL)
+#define F_AECMDVAL    V_AECMDVAL(1U)
+
+#define S_AECMD1312    12
+#define M_AECMD1312    0x3U
+#define V_AECMD1312(x) ((x) << S_AECMD1312)
+#define G_AECMD1312(x) (((x) >> S_AECMD1312) & M_AECMD1312)
+
+#define S_AECMD70    0
+#define M_AECMD70    0xffU
+#define V_AECMD70(x) ((x) << S_AECMD70)
+#define G_AECMD70(x) (((x) >> S_AECMD70) & M_AECMD70)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_1 0x30fc
+
+#define S_C48DIVCTL    12
+#define M_C48DIVCTL    0x7U
+#define V_C48DIVCTL(x) ((x) << S_C48DIVCTL)
+#define G_C48DIVCTL(x) (((x) >> S_C48DIVCTL) & M_C48DIVCTL)
+
+#define S_RATEDIVCTL    9
+#define M_RATEDIVCTL    0x7U
+#define V_RATEDIVCTL(x) ((x) << S_RATEDIVCTL)
+#define G_RATEDIVCTL(x) (((x) >> S_RATEDIVCTL) & M_RATEDIVCTL)
+
+#define S_ANLGFLSH    8
+#define V_ANLGFLSH(x) ((x) << S_ANLGFLSH)
+#define F_ANLGFLSH    V_ANLGFLSH(1U)
+
+#define S_DCCTSTOUT    7
+#define V_DCCTSTOUT(x) ((x) << S_DCCTSTOUT)
+#define F_DCCTSTOUT    V_DCCTSTOUT(1U)
+
+#define S_BSOUT    6
+#define V_BSOUT(x) ((x) << S_BSOUT)
+#define F_BSOUT    V_BSOUT(1U)
+
+#define S_BSIN    5
+#define V_BSIN(x) ((x) << S_BSIN)
+#define F_BSIN    V_BSIN(1U)
+
+#define S_JTAGAMPL    3
+#define M_JTAGAMPL    0x3U
+#define V_JTAGAMPL(x) ((x) << S_JTAGAMPL)
+#define G_JTAGAMPL(x) (((x) >> S_JTAGAMPL) & M_JTAGAMPL)
+
+#define S_JTAGTS    2
+#define V_JTAGTS(x) ((x) << S_JTAGTS)
+#define F_JTAGTS    V_JTAGTS(1U)
+
+#define S_TS    1
+#define V_TS(x) ((x) << S_TS)
+#define F_TS    V_TS(1U)
+
+#define S_OBS    0
+#define V_OBS(x) ((x) << S_OBS)
+#define F_OBS    V_OBS(1U)
+
+#define S_T6_SDOVRDEN    15
+#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
+#define F_T6_SDOVRDEN    V_T6_SDOVRDEN(1U)
+
+#define S_BSOUTN    7
+#define V_BSOUTN(x) ((x) << S_BSOUTN)
+#define F_BSOUTN    V_BSOUTN(1U)
+
+#define S_BSOUTP    6
+#define V_BSOUTP(x) ((x) << S_BSOUTP)
+#define F_BSOUTP    V_BSOUTP(1U)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100
+
+#define S_T6_T5_TX_RXLOOP    5
+#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
+#define F_T6_T5_TX_RXLOOP    V_T6_T5_TX_RXLOOP(1U)
+
+#define S_T6_T5_TX_BWSEL    2
+#define M_T6_T5_TX_BWSEL    0x3U
+#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
+#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104
+
+#define S_T6_ERROR    9
+#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
+#define F_T6_ERROR    V_T6_ERROR(1U)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118
+
+#define S_T6_CALSSTN    8
+#define M_T6_CALSSTN    0x3fU
+#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
+#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
+
+#define S_T6_CALSSTP    0
+#define M_T6_CALSSTP    0x3fU
+#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
+#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c
+
+#define S_T6_DRTOL    2
+#define M_T6_DRTOL    0x7U
+#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
+#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120
+
+#define S_T6_NXTT0    0
+#define M_T6_NXTT0    0x3fU
+#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
+#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128
+
+#define S_T6_NXTT2    0
+#define M_T6_NXTT2    0x3fU
+#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
+#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134
+
+#define S_T6_NXTPOL    0
+#define M_T6_NXTPOL    0xfU
+#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
+#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138
+
+#define S_T6_C0UPDT    6
+#define M_T6_C0UPDT    0x3U
+#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
+#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
+
+#define S_T6_C2UPDT    2
+#define M_T6_C2UPDT    0x3U
+#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
+#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
+
+#define S_T6_C1UPDT    0
+#define M_T6_C1UPDT    0x3U
+#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
+#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c
+
+#define S_T6_C0STAT    6
+#define M_T6_C0STAT    0x3U
+#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
+#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
+
+#define S_T6_C2STAT    2
+#define M_T6_C2STAT    0x3U
+#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
+#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
+
+#define S_T6_C1STAT    0
+#define M_T6_C1STAT    0x3U
+#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
+#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3144
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3148
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3148
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x314c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_APPLIED_TUNE_REGISTER 0x3150
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3158
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3160
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_4X_SEGMENT_APPLIED 0x3160
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3164
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_2X_SEGMENT_APPLIED 0x3164
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3168
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_1X_SEGMENT_APPLIED 0x3168
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x316c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3170
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3170
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3174
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c
+
+#define S_T6_XADDR    1
+#define M_T6_XADDR    0x1fU
+#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
+#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3188
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x318c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x318c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_OVERRIDE 0x3190
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_APPLIED 0x3194
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_TIME_OUT 0x3198
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c
+#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0
+
+#define S_T6_DCCTIMEEN    13
+#define M_T6_DCCTIMEEN    0x3U
+#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
+#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
+
+#define S_T6_DCCLOCK    11
+#define M_T6_DCCLOCK    0x3U
+#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
+#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
+
+#define S_T6_DCCOFFSET    8
+#define M_T6_DCCOFFSET    0x7U
+#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
+#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
+
+#define S_TX_LINKB_DCCSTEP_CTL    6
+#define M_TX_LINKB_DCCSTEP_CTL    0x3U
+#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL)
+#define G_TX_LINKB_DCCSTEP_CTL(x) (((x) >> S_TX_LINKB_DCCSTEP_CTL) & M_TX_LINKB_DCCSTEP_CTL)
+
+#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_OVERRIDE 0x31a4
+#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_APPLIED 0x31a8
+#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_TIME_OUT 0x31ac
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_OVERRIDE 0x31c0
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_4X_OVERRIDE 0x31c8
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_2X_OVERRIDE 0x31cc
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_1X_OVERRIDE 0x31d0
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x31d8
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x31dc
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0
+
+#define S_T6_SDOVRD    0
+#define M_T6_SDOVRD    0xffffU
+#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
+#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc
+
+#define S_T6_SDOVRDEN    15
+#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
+#define F_T6_SDOVRDEN    V_T6_SDOVRDEN(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200
+
+#define S_T5_RX_LINKEN    15
+#define V_T5_RX_LINKEN(x) ((x) << S_T5_RX_LINKEN)
+#define F_T5_RX_LINKEN    V_T5_RX_LINKEN(1U)
+
+#define S_T5_RX_LINKRST    14
+#define V_T5_RX_LINKRST(x) ((x) << S_T5_RX_LINKRST)
+#define F_T5_RX_LINKRST    V_T5_RX_LINKRST(1U)
+
+#define S_T5_RX_CFGWRT    13
+#define V_T5_RX_CFGWRT(x) ((x) << S_T5_RX_CFGWRT)
+#define F_T5_RX_CFGWRT    V_T5_RX_CFGWRT(1U)
+
+#define S_T5_RX_CFGPTR    11
+#define M_T5_RX_CFGPTR    0x3U
+#define V_T5_RX_CFGPTR(x) ((x) << S_T5_RX_CFGPTR)
+#define G_T5_RX_CFGPTR(x) (((x) >> S_T5_RX_CFGPTR) & M_T5_RX_CFGPTR)
+
+#define S_T5_RX_CFGEXT    10
+#define V_T5_RX_CFGEXT(x) ((x) << S_T5_RX_CFGEXT)
+#define F_T5_RX_CFGEXT    V_T5_RX_CFGEXT(1U)
+
+#define S_T5_RX_CFGACT    9
+#define V_T5_RX_CFGACT(x) ((x) << S_T5_RX_CFGACT)
+#define F_T5_RX_CFGACT    V_T5_RX_CFGACT(1U)
+
+#define S_T5_RX_AUXCLK    8
+#define V_T5_RX_AUXCLK(x) ((x) << S_T5_RX_AUXCLK)
+#define F_T5_RX_AUXCLK    V_T5_RX_AUXCLK(1U)
+
+#define S_T5_RX_PLLSEL    6
+#define M_T5_RX_PLLSEL    0x3U
+#define V_T5_RX_PLLSEL(x) ((x) << S_T5_RX_PLLSEL)
+#define G_T5_RX_PLLSEL(x) (((x) >> S_T5_RX_PLLSEL) & M_T5_RX_PLLSEL)
+
+#define S_T5_RX_DMSEL    4
+#define M_T5_RX_DMSEL    0x3U
+#define V_T5_RX_DMSEL(x) ((x) << S_T5_RX_DMSEL)
+#define G_T5_RX_DMSEL(x) (((x) >> S_T5_RX_DMSEL) & M_T5_RX_DMSEL)
+
+#define S_T5_RX_BWSEL    2
+#define M_T5_RX_BWSEL    0x3U
+#define V_T5_RX_BWSEL(x) ((x) << S_T5_RX_BWSEL)
+#define G_T5_RX_BWSEL(x) (((x) >> S_T5_RX_BWSEL) & M_T5_RX_BWSEL)
+
+#define S_T5_RX_RTSEL    0
+#define M_T5_RX_RTSEL    0x3U
+#define V_T5_RX_RTSEL(x) ((x) << S_T5_RX_RTSEL)
+#define G_T5_RX_RTSEL(x) (((x) >> S_T5_RX_RTSEL) & M_T5_RX_RTSEL)
+
+#define S_T5_RX_MODE8023AZ    8
+#define V_T5_RX_MODE8023AZ(x) ((x) << S_T5_RX_MODE8023AZ)
+#define F_T5_RX_MODE8023AZ    V_T5_RX_MODE8023AZ(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_TEST_CONTROL 0x3204
+
+#define S_FERRST    10
+#define V_FERRST(x) ((x) << S_FERRST)
+#define F_FERRST    V_FERRST(1U)
+
+#define S_ERRST    9
+#define V_ERRST(x) ((x) << S_ERRST)
+#define F_ERRST    V_ERRST(1U)
+
+#define S_SYNCST    8
+#define V_SYNCST(x) ((x) << S_SYNCST)
+#define F_SYNCST    V_SYNCST(1U)
+
+#define S_WRPSM    7
+#define V_WRPSM(x) ((x) << S_WRPSM)
+#define F_WRPSM    V_WRPSM(1U)
+
+#define S_WPLPEN    6
+#define V_WPLPEN(x) ((x) << S_WPLPEN)
+#define F_WPLPEN    V_WPLPEN(1U)
+
+#define S_WRPMD    5
+#define V_WRPMD(x) ((x) << S_WRPMD)
+#define F_WRPMD    V_WRPMD(1U)
+
+#define S_PATSEL    0
+#define M_PATSEL    0x7U
+#define V_PATSEL(x) ((x) << S_PATSEL)
+#define G_PATSEL(x) (((x) >> S_PATSEL) & M_PATSEL)
+
+#define S_APLYDCD    15
+#define V_APLYDCD(x) ((x) << S_APLYDCD)
+#define F_APLYDCD    V_APLYDCD(1U)
+
+#define S_PPOL    13
+#define M_PPOL    0x3U
+#define V_PPOL(x) ((x) << S_PPOL)
+#define G_PPOL(x) (((x) >> S_PPOL) & M_PPOL)
+
+#define S_PCLKSEL    11
+#define M_PCLKSEL    0x3U
+#define V_PCLKSEL(x) ((x) << S_PCLKSEL)
+#define G_PCLKSEL(x) (((x) >> S_PCLKSEL) & M_PCLKSEL)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_CONTROL 0x3208
+
+#define S_RSTUCK    3
+#define V_RSTUCK(x) ((x) << S_RSTUCK)
+#define F_RSTUCK    V_RSTUCK(1U)
+
+#define S_FRZFW    2
+#define V_FRZFW(x) ((x) << S_FRZFW)
+#define F_FRZFW    V_FRZFW(1U)
+
+#define S_RSTFW    1
+#define V_RSTFW(x) ((x) << S_RSTFW)
+#define F_RSTFW    V_RSTFW(1U)
+
+#define S_SSCEN    0
+#define V_SSCEN(x) ((x) << S_SSCEN)
+#define F_SSCEN    V_SSCEN(1U)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_OFFSET_CONTROL 0x320c
+
+#define S_H1ANOFST    12
+#define M_H1ANOFST    0xfU
+#define V_H1ANOFST(x) ((x) << S_H1ANOFST)
+#define G_H1ANOFST(x) (((x) >> S_H1ANOFST) & M_H1ANOFST)
+
+#define S_T6_TMSCAL    8
+#define M_T6_TMSCAL    0x3U
+#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
+#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
+
+#define S_T6_APADJ    7
+#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
+#define F_T6_APADJ    V_T6_APADJ(1U)
+
+#define S_T6_RSEL    6
+#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
+#define F_T6_RSEL    V_T6_RSEL(1U)
+
+#define S_T6_PHOFFS    0
+#define M_T6_PHOFFS    0x3fU
+#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
+#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_POSITION_1 0x3210
+
+#define S_ROT00    0
+#define M_ROT00    0x3fU
+#define V_ROT00(x) ((x) << S_ROT00)
+#define G_ROT00(x) (((x) >> S_ROT00) & M_ROT00)
+
+#define S_ROTA    8
+#define M_ROTA    0x3fU
+#define V_ROTA(x) ((x) << S_ROTA)
+#define G_ROTA(x) (((x) >> S_ROTA) & M_ROTA)
+
+#define S_ROTD    0
+#define M_ROTD    0x3fU
+#define V_ROTD(x) ((x) << S_ROTD)
+#define G_ROTD(x) (((x) >> S_ROTD) & M_ROTD)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_POSITION_2 0x3214
+
+#define S_FREQFW    8
+#define M_FREQFW    0xffU
+#define V_FREQFW(x) ((x) << S_FREQFW)
+#define G_FREQFW(x) (((x) >> S_FREQFW) & M_FREQFW)
+
+#define S_FWSNAP    7
+#define V_FWSNAP(x) ((x) << S_FWSNAP)
+#define F_FWSNAP    V_FWSNAP(1U)
+
+#define S_ROTE    0
+#define M_ROTE    0x3fU
+#define V_ROTE(x) ((x) << S_ROTE)
+#define G_ROTE(x) (((x) >> S_ROTE) & M_ROTE)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3218
+
+#define S_RAOFFF    8
+#define M_RAOFFF    0xfU
+#define V_RAOFFF(x) ((x) << S_RAOFFF)
+#define G_RAOFFF(x) (((x) >> S_RAOFFF) & M_RAOFFF)
+
+#define S_RAOFF    0
+#define M_RAOFF    0x1fU
+#define V_RAOFF(x) ((x) << S_RAOFF)
+#define G_RAOFF(x) (((x) >> S_RAOFF) & M_RAOFF)
+
+#define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x321c
+
+#define S_RBOOFF    10
+#define M_RBOOFF    0x1fU
+#define V_RBOOFF(x) ((x) << S_RBOOFF)
+#define G_RBOOFF(x) (((x) >> S_RBOOFF) & M_RBOOFF)
+
+#define S_RBEOFF    5
+#define M_RBEOFF    0x1fU
+#define V_RBEOFF(x) ((x) << S_RBEOFF)
+#define G_RBEOFF(x) (((x) >> S_RBEOFF) & M_RBEOFF)
+
+#define A_MAC_PORT_RX_LINKA_DFE_CONTROL 0x3220
+
+#define S_T6_SPIFMT    8
+#define M_T6_SPIFMT    0xfU
+#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
+#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
+
+#define A_MAC_PORT_RX_LINKA_DFE_SAMPLE_SNAPSHOT_1 0x3224
+
+#define S_T5BYTE1    8
+#define M_T5BYTE1    0xffU
+#define V_T5BYTE1(x) ((x) << S_T5BYTE1)
+#define G_T5BYTE1(x) (((x) >> S_T5BYTE1) & M_T5BYTE1)
+
+#define S_T5BYTE0    0
+#define M_T5BYTE0    0xffU
+#define V_T5BYTE0(x) ((x) << S_T5BYTE0)
+#define G_T5BYTE0(x) (((x) >> S_T5BYTE0) & M_T5BYTE0)
+
+#define A_MAC_PORT_RX_LINKA_DFE_SAMPLE_SNAPSHOT_2 0x3228
+
+#define S_T5_RX_SMODE    8
+#define M_T5_RX_SMODE    0x7U
+#define V_T5_RX_SMODE(x) ((x) << S_T5_RX_SMODE)
+#define G_T5_RX_SMODE(x) (((x) >> S_T5_RX_SMODE) & M_T5_RX_SMODE)
+
+#define S_T5_RX_ADCORR    7
+#define V_T5_RX_ADCORR(x) ((x) << S_T5_RX_ADCORR)
+#define F_T5_RX_ADCORR    V_T5_RX_ADCORR(1U)
+
+#define S_T5_RX_TRAINEN    6
+#define V_T5_RX_TRAINEN(x) ((x) << S_T5_RX_TRAINEN)
+#define F_T5_RX_TRAINEN    V_T5_RX_TRAINEN(1U)
+
+#define S_T5_RX_ASAMPQ    3
+#define M_T5_RX_ASAMPQ    0x7U
+#define V_T5_RX_ASAMPQ(x) ((x) << S_T5_RX_ASAMPQ)
+#define G_T5_RX_ASAMPQ(x) (((x) >> S_T5_RX_ASAMPQ) & M_T5_RX_ASAMPQ)
+
+#define S_T5_RX_ASAMP    0
+#define M_T5_RX_ASAMP    0x7U
+#define V_T5_RX_ASAMP(x) ((x) << S_T5_RX_ASAMP)
+#define G_T5_RX_ASAMP(x) (((x) >> S_T5_RX_ASAMP) & M_T5_RX_ASAMP)
+
+#define S_REQWOV    15
+#define V_REQWOV(x) ((x) << S_REQWOV)
+#define F_REQWOV    V_REQWOV(1U)
+
+#define S_RASEL    11
+#define M_RASEL    0x7U
+#define V_RASEL(x) ((x) << S_RASEL)
+#define G_RASEL(x) (((x) >> S_RASEL) & M_RASEL)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_1 0x322c
+
+#define S_T6_WRAPSEL    15
+#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
+#define F_T6_WRAPSEL    V_T6_WRAPSEL(1U)
+
+#define S_ACTL    14
+#define V_ACTL(x) ((x) << S_ACTL)
+#define F_ACTL    V_ACTL(1U)
+
+#define S_T6_PEAK    9
+#define M_T6_PEAK    0x1fU
+#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
+#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_2 0x3230
+
+#define S_T5SHORTV    10
+#define V_T5SHORTV(x) ((x) << S_T5SHORTV)
+#define F_T5SHORTV    V_T5SHORTV(1U)
+
+#define S_T5VGAIN    0
+#define M_T5VGAIN    0x1fU
+#define V_T5VGAIN(x) ((x) << S_T5VGAIN)
+#define G_T5VGAIN(x) (((x) >> S_T5VGAIN) & M_T5VGAIN)
+
+#define S_FVOFFSKP    15
+#define V_FVOFFSKP(x) ((x) << S_FVOFFSKP)
+#define F_FVOFFSKP    V_FVOFFSKP(1U)
+
+#define S_FGAINCHK    14
+#define V_FGAINCHK(x) ((x) << S_FGAINCHK)
+#define F_FGAINCHK    V_FGAINCHK(1U)
+
+#define S_FH1ACAL    13
+#define V_FH1ACAL(x) ((x) << S_FH1ACAL)
+#define F_FH1ACAL    V_FH1ACAL(1U)
+
+#define S_FH1AFLTR    11
+#define M_FH1AFLTR    0x3U
+#define V_FH1AFLTR(x) ((x) << S_FH1AFLTR)
+#define G_FH1AFLTR(x) (((x) >> S_FH1AFLTR) & M_FH1AFLTR)
+
+#define S_WGAIN    8
+#define M_WGAIN    0x3U
+#define V_WGAIN(x) ((x) << S_WGAIN)
+#define G_WGAIN(x) (((x) >> S_WGAIN) & M_WGAIN)
+
+#define S_GAIN_STAT    7
+#define V_GAIN_STAT(x) ((x) << S_GAIN_STAT)
+#define F_GAIN_STAT    V_GAIN_STAT(1U)
+
+#define S_T6_T5VGAIN    0
+#define M_T6_T5VGAIN    0x7fU
+#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
+#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_3 0x3234
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DQCC_CONTROL_1 0x3238
+
+#define S_IQSEP    10
+#define M_IQSEP    0x1fU
+#define V_IQSEP(x) ((x) << S_IQSEP)
+#define G_IQSEP(x) (((x) >> S_IQSEP) & M_IQSEP)
+
+#define S_DUTYQ    5
+#define M_DUTYQ    0x1fU
+#define V_DUTYQ(x) ((x) << S_DUTYQ)
+#define G_DUTYQ(x) (((x) >> S_DUTYQ) & M_DUTYQ)
+
+#define S_DUTYI    0
+#define M_DUTYI    0x1fU
+#define V_DUTYI(x) ((x) << S_DUTYI)
+#define G_DUTYI(x) (((x) >> S_DUTYI) & M_DUTYI)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3238
+
+#define S_PMCFG    6
+#define M_PMCFG    0x3U
+#define V_PMCFG(x) ((x) << S_PMCFG)
+#define G_PMCFG(x) (((x) >> S_PMCFG) & M_PMCFG)
+
+#define S_PMOFFTIME    0
+#define M_PMOFFTIME    0x3fU
+#define V_PMOFFTIME(x) ((x) << S_PMOFFTIME)
+#define G_PMOFFTIME(x) (((x) >> S_PMOFFTIME) & M_PMOFFTIME)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_IQAMP_CONTROL_1 0x323c
+
+#define S_SELI    9
+#define V_SELI(x) ((x) << S_SELI)
+#define F_SELI    V_SELI(1U)
+
+#define S_SERVREF    5
+#define M_SERVREF    0x7U
+#define V_SERVREF(x) ((x) << S_SERVREF)
+#define G_SERVREF(x) (((x) >> S_SERVREF) & M_SERVREF)
+
+#define S_IQAMP    0
+#define M_IQAMP    0x1fU
+#define V_IQAMP(x) ((x) << S_IQAMP)
+#define G_IQAMP(x) (((x) >> S_IQAMP) & M_IQAMP)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DQCC_CONTROL_3 0x3240
+
+#define S_DTHR    8
+#define M_DTHR    0x3fU
+#define V_DTHR(x) ((x) << S_DTHR)
+#define G_DTHR(x) (((x) >> S_DTHR) & M_DTHR)
+
+#define S_SNUL    0
+#define M_SNUL    0x1fU
+#define V_SNUL(x) ((x) << S_SNUL)
+#define G_SNUL(x) (((x) >> S_SNUL) & M_SNUL)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_IQAMP_CONTROL_2 0x3240
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3244
+
+#define S_SAVEADAC    8
+#define V_SAVEADAC(x) ((x) << S_SAVEADAC)
+#define F_SAVEADAC    V_SAVEADAC(1U)
+
+#define S_LOAD2    7
+#define V_LOAD2(x) ((x) << S_LOAD2)
+#define F_LOAD2    V_LOAD2(1U)
+
+#define S_LOAD1    6
+#define V_LOAD1(x) ((x) << S_LOAD1)
+#define F_LOAD1    V_LOAD1(1U)
+
+#define S_WRTACC2    5
+#define V_WRTACC2(x) ((x) << S_WRTACC2)
+#define F_WRTACC2    V_WRTACC2(1U)
+
+#define S_WRTACC1    4
+#define V_WRTACC1(x) ((x) << S_WRTACC1)
+#define F_WRTACC1    V_WRTACC1(1U)
+
+#define S_SELAPAN    3
+#define V_SELAPAN(x) ((x) << S_SELAPAN)
+#define F_SELAPAN    V_SELAPAN(1U)
+
+#define S_DASEL    0
+#define M_DASEL    0x7U
+#define V_DASEL(x) ((x) << S_DASEL)
+#define G_DASEL(x) (((x) >> S_DASEL) & M_DASEL)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DACAP_AND_DACAN 0x3248
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DACA_MIN_AND_DACAZ 0x324c
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DACA_MIN 0x324c
+#define A_MAC_PORT_RX_LINKA_RECEIVER_ADAC_CONTROL 0x3250
+
+#define S_ADSN_READWRITE    8
+#define V_ADSN_READWRITE(x) ((x) << S_ADSN_READWRITE)
+#define F_ADSN_READWRITE    V_ADSN_READWRITE(1U)
+
+#define S_ADSN_READONLY    7
+#define V_ADSN_READONLY(x) ((x) << S_ADSN_READONLY)
+#define F_ADSN_READONLY    V_ADSN_READONLY(1U)
+
+#define S_ADAC2    8
+#define M_ADAC2    0xffU
+#define V_ADAC2(x) ((x) << S_ADAC2)
+#define G_ADAC2(x) (((x) >> S_ADAC2) & M_ADAC2)
+
+#define S_ADAC1    0
+#define M_ADAC1    0xffU
+#define V_ADAC1(x) ((x) << S_ADAC1)
+#define G_ADAC1(x) (((x) >> S_ADAC1) & M_ADAC1)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_AC_COUPLING_CONTROL 0x3254
+
+#define S_FACCPLDYN    13
+#define V_FACCPLDYN(x) ((x) << S_FACCPLDYN)
+#define F_FACCPLDYN    V_FACCPLDYN(1U)
+
+#define S_ACCPLGAIN    10
+#define M_ACCPLGAIN    0x7U
+#define V_ACCPLGAIN(x) ((x) << S_ACCPLGAIN)
+#define G_ACCPLGAIN(x) (((x) >> S_ACCPLGAIN) & M_ACCPLGAIN)
+
+#define S_ACCPLREF    8
+#define M_ACCPLREF    0x3U
+#define V_ACCPLREF(x) ((x) << S_ACCPLREF)
+#define G_ACCPLREF(x) (((x) >> S_ACCPLREF) & M_ACCPLREF)
+
+#define S_ACCPLSTEP    6
+#define M_ACCPLSTEP    0x3U
+#define V_ACCPLSTEP(x) ((x) << S_ACCPLSTEP)
+#define G_ACCPLSTEP(x) (((x) >> S_ACCPLSTEP) & M_ACCPLSTEP)
+
+#define S_ACCPLASTEP    1
+#define M_ACCPLASTEP    0x1fU
+#define V_ACCPLASTEP(x) ((x) << S_ACCPLASTEP)
+#define G_ACCPLASTEP(x) (((x) >> S_ACCPLASTEP) & M_ACCPLASTEP)
+
+#define S_FACCPL    0
+#define V_FACCPL(x) ((x) << S_FACCPL)
+#define F_FACCPL    V_FACCPL(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_AC_COUPLING_VALUE 0x3258
+
+#define S_ACCPLMEANS    15
+#define V_ACCPLMEANS(x) ((x) << S_ACCPLMEANS)
+#define F_ACCPLMEANS    V_ACCPLMEANS(1U)
+
+#define S_CDROVREN    8
+#define V_CDROVREN(x) ((x) << S_CDROVREN)
+#define F_CDROVREN    V_CDROVREN(1U)
+
+#define S_ACCPLBIAS    0
+#define M_ACCPLBIAS    0xffU
+#define V_ACCPLBIAS(x) ((x) << S_ACCPLBIAS)
+#define G_ACCPLBIAS(x) (((x) >> S_ACCPLBIAS) & M_ACCPLBIAS)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x325c
+
+#define S_H1O2    8
+#define M_H1O2    0x3fU
+#define V_H1O2(x) ((x) << S_H1O2)
+#define G_H1O2(x) (((x) >> S_H1O2) & M_H1O2)
+
+#define S_H1E2    0
+#define M_H1E2    0x3fU
+#define V_H1E2(x) ((x) << S_H1E2)
+#define G_H1E2(x) (((x) >> S_H1E2) & M_H1E2)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1H2H3_LOCAL_OFFSET 0x325c
+
+#define S_H123CH    0
+#define M_H123CH    0x3fU
+#define V_H123CH(x) ((x) << S_H123CH)
+#define G_H123CH(x) (((x) >> S_H123CH) & M_H123CH)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3260
+
+#define S_H1O3    8
+#define M_H1O3    0x3fU
+#define V_H1O3(x) ((x) << S_H1O3)
+#define G_H1O3(x) (((x) >> S_H1O3) & M_H1O3)
+
+#define S_H1E3    0
+#define M_H1E3    0x3fU
+#define V_H1E3(x) ((x) << S_H1E3)
+#define G_H1E3(x) (((x) >> S_H1E3) & M_H1E3)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3260
+
+#define S_H1OX    8
+#define M_H1OX    0x3fU
+#define V_H1OX(x) ((x) << S_H1OX)
+#define G_H1OX(x) (((x) >> S_H1OX) & M_H1OX)
+
+#define S_H1EX    0
+#define M_H1EX    0x3fU
+#define V_H1EX(x) ((x) << S_H1EX)
+#define G_H1EX(x) (((x) >> S_H1EX) & M_H1EX)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3264
+
+#define S_H1O4    8
+#define M_H1O4    0x3fU
+#define V_H1O4(x) ((x) << S_H1O4)
+#define G_H1O4(x) (((x) >> S_H1O4) & M_H1O4)
+
+#define S_H1E4    0
+#define M_H1E4    0x3fU
+#define V_H1E4(x) ((x) << S_H1E4)
+#define G_H1E4(x) (((x) >> S_H1E4) & M_H1E4)
+
+#define A_MAC_PORT_RX_LINKA_PEAKED_INTEGRATOR 0x3264
+
+#define S_PILOCK    10
+#define V_PILOCK(x) ((x) << S_PILOCK)
+#define F_PILOCK    V_PILOCK(1U)
+
+#define S_UNPKPKA    2
+#define M_UNPKPKA    0x3fU
+#define V_UNPKPKA(x) ((x) << S_UNPKPKA)
+#define G_UNPKPKA(x) (((x) >> S_UNPKPKA) & M_UNPKPKA)
+
+#define S_UNPKVGA    0
+#define M_UNPKVGA    0x3U
+#define V_UNPKVGA(x) ((x) << S_UNPKVGA)
+#define G_UNPKVGA(x) (((x) >> S_UNPKVGA) & M_UNPKVGA)
+
+#define A_MAC_PORT_RX_LINKA_CDR_ANALOG_SWITCH 0x3268
+
+#define S_OVRAC    15
+#define V_OVRAC(x) ((x) << S_OVRAC)
+#define F_OVRAC    V_OVRAC(1U)
+
+#define S_OVRPK    14
+#define V_OVRPK(x) ((x) << S_OVRPK)
+#define F_OVRPK    V_OVRPK(1U)
+
+#define S_OVRTAILS    12
+#define M_OVRTAILS    0x3U
+#define V_OVRTAILS(x) ((x) << S_OVRTAILS)
+#define G_OVRTAILS(x) (((x) >> S_OVRTAILS) & M_OVRTAILS)
+
+#define S_OVRTAILV    9
+#define M_OVRTAILV    0x7U
+#define V_OVRTAILV(x) ((x) << S_OVRTAILV)
+#define G_OVRTAILV(x) (((x) >> S_OVRTAILV) & M_OVRTAILV)
+
+#define S_OVRCAP    8
+#define V_OVRCAP(x) ((x) << S_OVRCAP)
+#define F_OVRCAP    V_OVRCAP(1U)
+
+#define S_OVRDCDPRE    7
+#define V_OVRDCDPRE(x) ((x) << S_OVRDCDPRE)
+#define F_OVRDCDPRE    V_OVRDCDPRE(1U)
+
+#define S_OVRDCDPST    6
+#define V_OVRDCDPST(x) ((x) << S_OVRDCDPST)
+#define F_OVRDCDPST    V_OVRDCDPST(1U)
+
+#define S_DCVSCTMODE    2
+#define V_DCVSCTMODE(x) ((x) << S_DCVSCTMODE)
+#define F_DCVSCTMODE    V_DCVSCTMODE(1U)
+
+#define S_CDRANLGSW    0
+#define M_CDRANLGSW    0x3U
+#define V_CDRANLGSW(x) ((x) << S_CDRANLGSW)
+#define G_CDRANLGSW(x) (((x) >> S_CDRANLGSW) & M_CDRANLGSW)
+
+#define A_MAC_PORT_RX_LINKA_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x326c
+
+#define S_PFLAG    5
+#define M_PFLAG    0x3U
+#define V_PFLAG(x) ((x) << S_PFLAG)
+#define G_PFLAG(x) (((x) >> S_PFLAG) & M_PFLAG)
+
+#define A_MAC_PORT_RX_LINKA_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3270
+
+#define S_DPCMD    14
+#define V_DPCMD(x) ((x) << S_DPCMD)
+#define F_DPCMD    V_DPCMD(1U)
+
+#define S_DACCLIP    15
+#define V_DACCLIP(x) ((x) << S_DACCLIP)
+#define F_DACCLIP    V_DACCLIP(1U)
+
+#define S_DPCFRZ    14
+#define V_DPCFRZ(x) ((x) << S_DPCFRZ)
+#define F_DPCFRZ    V_DPCFRZ(1U)
+
+#define S_DPCLKNQ    11
+#define V_DPCLKNQ(x) ((x) << S_DPCLKNQ)
+#define F_DPCLKNQ    V_DPCLKNQ(1U)
+
+#define S_DPCWDFE    10
+#define V_DPCWDFE(x) ((x) << S_DPCWDFE)
+#define F_DPCWDFE    V_DPCWDFE(1U)
+
+#define S_DPCWPK    9
+#define V_DPCWPK(x) ((x) << S_DPCWPK)
+#define F_DPCWPK    V_DPCWPK(1U)
+
+#define A_MAC_PORT_RX_LINKA_DYNAMIC_DATA_CENTERING_DDC 0x3274
+
+#define S_VIEWSCAN    4
+#define V_VIEWSCAN(x) ((x) << S_VIEWSCAN)
+#define F_VIEWSCAN    V_VIEWSCAN(1U)
+
+#define S_T6_ODEC    0
+#define M_T6_ODEC    0xfU
+#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
+#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_INTERNAL_STATUS 0x3278
+
+#define S_T5BER6VAL    15
+#define V_T5BER6VAL(x) ((x) << S_T5BER6VAL)
+#define F_T5BER6VAL    V_T5BER6VAL(1U)
+
+#define S_T5BER6    14
+#define V_T5BER6(x) ((x) << S_T5BER6)
+#define F_T5BER6    V_T5BER6(1U)
+
+#define S_T5BER3VAL    13
+#define V_T5BER3VAL(x) ((x) << S_T5BER3VAL)
+#define F_T5BER3VAL    V_T5BER3VAL(1U)
+
+#define S_T5TOOFAST    12
+#define V_T5TOOFAST(x) ((x) << S_T5TOOFAST)
+#define F_T5TOOFAST    V_T5TOOFAST(1U)
+
+#define S_T5DPCCMP    9
+#define V_T5DPCCMP(x) ((x) << S_T5DPCCMP)
+#define F_T5DPCCMP    V_T5DPCCMP(1U)
+
+#define S_T5DACCMP    8
+#define V_T5DACCMP(x) ((x) << S_T5DACCMP)
+#define F_T5DACCMP    V_T5DACCMP(1U)
+
+#define S_T5DDCCMP    7
+#define V_T5DDCCMP(x) ((x) << S_T5DDCCMP)
+#define F_T5DDCCMP    V_T5DDCCMP(1U)
+
+#define S_T5AERRFLG    6
+#define V_T5AERRFLG(x) ((x) << S_T5AERRFLG)
+#define F_T5AERRFLG    V_T5AERRFLG(1U)
+
+#define S_T5WERRFLG    5
+#define V_T5WERRFLG(x) ((x) << S_T5WERRFLG)
+#define F_T5WERRFLG    V_T5WERRFLG(1U)
+
+#define S_T5TRCMP    4
+#define V_T5TRCMP(x) ((x) << S_T5TRCMP)
+#define F_T5TRCMP    V_T5TRCMP(1U)
+
+#define S_T5VLCKF    3
+#define V_T5VLCKF(x) ((x) << S_T5VLCKF)
+#define F_T5VLCKF    V_T5VLCKF(1U)
+
+#define S_T5ROCCMP    2
+#define V_T5ROCCMP(x) ((x) << S_T5ROCCMP)
+#define F_T5ROCCMP    V_T5ROCCMP(1U)
+
+#define S_T5DQCCCMP    1
+#define V_T5DQCCCMP(x) ((x) << S_T5DQCCCMP)
+#define F_T5DQCCCMP    V_T5DQCCCMP(1U)
+
+#define S_T5OCCMP    0
+#define V_T5OCCMP(x) ((x) << S_T5OCCMP)
+#define F_T5OCCMP    V_T5OCCMP(1U)
+
+#define S_RX_LINKA_ACCCMP_RIS    11
+#define V_RX_LINKA_ACCCMP_RIS(x) ((x) << S_RX_LINKA_ACCCMP_RIS)
+#define F_RX_LINKA_ACCCMP_RIS    V_RX_LINKA_ACCCMP_RIS(1U)
+
+#define S_DCCCMP    10
+#define V_DCCCMP(x) ((x) << S_DCCCMP)
+#define F_DCCCMP    V_DCCCMP(1U)
+
+#define S_T5IQCMP    1
+#define V_T5IQCMP(x) ((x) << S_T5IQCMP)
+#define F_T5IQCMP    V_T5IQCMP(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_FUNCTION_CONTROL_1 0x327c
+
+#define S_FLOFF    1
+#define V_FLOFF(x) ((x) << S_FLOFF)
+#define F_FLOFF    V_FLOFF(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_FUNCTION_CONTROL_2 0x3280
+
+#define S_H25SPC    15
+#define V_H25SPC(x) ((x) << S_H25SPC)
+#define F_H25SPC    V_H25SPC(1U)
+
+#define S_FTOOFAST    8
+#define V_FTOOFAST(x) ((x) << S_FTOOFAST)
+#define F_FTOOFAST    V_FTOOFAST(1U)
+
+#define S_FINTTRIM    7
+#define V_FINTTRIM(x) ((x) << S_FINTTRIM)
+#define F_FINTTRIM    V_FINTTRIM(1U)
+
+#define S_FDINV    6
+#define V_FDINV(x) ((x) << S_FDINV)
+#define F_FDINV    V_FDINV(1U)
+
+#define S_FHGS    5
+#define V_FHGS(x) ((x) << S_FHGS)
+#define F_FHGS    V_FHGS(1U)
+
+#define S_FH6H12    4
+#define V_FH6H12(x) ((x) << S_FH6H12)
+#define F_FH6H12    V_FH6H12(1U)
+
+#define S_FH1CAL    3
+#define V_FH1CAL(x) ((x) << S_FH1CAL)
+#define F_FH1CAL    V_FH1CAL(1U)
+
+#define S_FINTCAL    2
+#define V_FINTCAL(x) ((x) << S_FINTCAL)
+#define F_FINTCAL    V_FINTCAL(1U)
+
+#define S_FDCA    1
+#define V_FDCA(x) ((x) << S_FDCA)
+#define F_FDCA    V_FDCA(1U)
+
+#define S_FDQCC    0
+#define V_FDQCC(x) ((x) << S_FDQCC)
+#define F_FDQCC    V_FDQCC(1U)
+
+#define S_FDCCAL    14
+#define V_FDCCAL(x) ((x) << S_FDCCAL)
+#define F_FDCCAL    V_FDCCAL(1U)
+
+#define S_FROTCAL    13
+#define V_FROTCAL(x) ((x) << S_FROTCAL)
+#define F_FROTCAL    V_FROTCAL(1U)
+
+#define S_FIQAMP    12
+#define V_FIQAMP(x) ((x) << S_FIQAMP)
+#define F_FIQAMP    V_FIQAMP(1U)
+
+#define S_FRPTCALF    11
+#define V_FRPTCALF(x) ((x) << S_FRPTCALF)
+#define F_FRPTCALF    V_FRPTCALF(1U)
+
+#define S_FINTCALGS    10
+#define V_FINTCALGS(x) ((x) << S_FINTCALGS)
+#define F_FINTCALGS    V_FINTCALGS(1U)
+
+#define S_FDCC    9
+#define V_FDCC(x) ((x) << S_FDCC)
+#define F_FDCC    V_FDCC(1U)
+
+#define S_FDCD    7
+#define V_FDCD(x) ((x) << S_FDCD)
+#define F_FDCD    V_FDCD(1U)
+
+#define S_FINTRCALDYN    1
+#define V_FINTRCALDYN(x) ((x) << S_FINTRCALDYN)
+#define F_FINTRCALDYN    V_FINTRCALDYN(1U)
+
+#define S_FQCC    0
+#define V_FQCC(x) ((x) << S_FQCC)
+#define F_FQCC    V_FQCC(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_EVN1_EVN2 0x3284
+
+#define S_LOFE2S_READWRITE    16
+#define V_LOFE2S_READWRITE(x) ((x) << S_LOFE2S_READWRITE)
+#define F_LOFE2S_READWRITE    V_LOFE2S_READWRITE(1U)
+
+#define S_LOFE2S_READONLY    14
+#define M_LOFE2S_READONLY    0x3U
+#define V_LOFE2S_READONLY(x) ((x) << S_LOFE2S_READONLY)
+#define G_LOFE2S_READONLY(x) (((x) >> S_LOFE2S_READONLY) & M_LOFE2S_READONLY)
+
+#define S_LOFE2    8
+#define M_LOFE2    0x3fU
+#define V_LOFE2(x) ((x) << S_LOFE2)
+#define G_LOFE2(x) (((x) >> S_LOFE2) & M_LOFE2)
+
+#define S_LOFE1S_READWRITE    7
+#define V_LOFE1S_READWRITE(x) ((x) << S_LOFE1S_READWRITE)
+#define F_LOFE1S_READWRITE    V_LOFE1S_READWRITE(1U)
+
+#define S_LOFE1S_READONLY    6
+#define V_LOFE1S_READONLY(x) ((x) << S_LOFE1S_READONLY)
+#define F_LOFE1S_READONLY    V_LOFE1S_READONLY(1U)
+
+#define S_LOFE1    0
+#define M_LOFE1    0x3fU
+#define V_LOFE1(x) ((x) << S_LOFE1)
+#define G_LOFE1(x) (((x) >> S_LOFE1) & M_LOFE1)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_CHANNEL 0x3284
+
+#define S_QCCIND    13
+#define V_QCCIND(x) ((x) << S_QCCIND)
+#define F_QCCIND    V_QCCIND(1U)
+
+#define S_DCDIND    10
+#define M_DCDIND    0x7U
+#define V_DCDIND(x) ((x) << S_DCDIND)
+#define G_DCDIND(x) (((x) >> S_DCDIND) & M_DCDIND)
+
+#define S_DCCIND    8
+#define M_DCCIND    0x3U
+#define V_DCCIND(x) ((x) << S_DCCIND)
+#define G_DCCIND(x) (((x) >> S_DCCIND) & M_DCCIND)
+
+#define S_CFSEL    5
+#define V_CFSEL(x) ((x) << S_CFSEL)
+#define F_CFSEL    V_CFSEL(1U)
+
+#define S_LOFCH    0
+#define M_LOFCH    0x1fU
+#define V_LOFCH(x) ((x) << S_LOFCH)
+#define G_LOFCH(x) (((x) >> S_LOFCH) & M_LOFCH)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_ODD1_ODD2 0x3288
+
+#define S_LOFO2S_READWRITE    15
+#define V_LOFO2S_READWRITE(x) ((x) << S_LOFO2S_READWRITE)
+#define F_LOFO2S_READWRITE    V_LOFO2S_READWRITE(1U)
+
+#define S_LOFO2S_READONLY    14
+#define V_LOFO2S_READONLY(x) ((x) << S_LOFO2S_READONLY)
+#define F_LOFO2S_READONLY    V_LOFO2S_READONLY(1U)
+
+#define S_LOFO2    8
+#define M_LOFO2    0x3fU
+#define V_LOFO2(x) ((x) << S_LOFO2)
+#define G_LOFO2(x) (((x) >> S_LOFO2) & M_LOFO2)
+
+#define S_LOFO1S_READWRITE    7
+#define V_LOFO1S_READWRITE(x) ((x) << S_LOFO1S_READWRITE)
+#define F_LOFO1S_READWRITE    V_LOFO1S_READWRITE(1U)
+
+#define S_LOFO1S_READONLY    6
+#define V_LOFO1S_READONLY(x) ((x) << S_LOFO1S_READONLY)
+#define F_LOFO1S_READONLY    V_LOFO1S_READONLY(1U)
+
+#define S_LOFO1    0
+#define M_LOFO1    0x3fU
+#define V_LOFO1(x) ((x) << S_LOFO1)
+#define G_LOFO1(x) (((x) >> S_LOFO1) & M_LOFO1)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_VALUE 0x3288
+
+#define S_LOFU    8
+#define M_LOFU    0x7fU
+#define V_LOFU(x) ((x) << S_LOFU)
+#define G_LOFU(x) (((x) >> S_LOFU) & M_LOFU)
+
+#define S_LOFL    0
+#define M_LOFL    0x7fU
+#define V_LOFL(x) ((x) << S_LOFL)
+#define G_LOFL(x) (((x) >> S_LOFL) & M_LOFL)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_EVN3_EVN4 0x328c
+
+#define S_LOFE4S_READWRITE    15
+#define V_LOFE4S_READWRITE(x) ((x) << S_LOFE4S_READWRITE)
+#define F_LOFE4S_READWRITE    V_LOFE4S_READWRITE(1U)
+
+#define S_LOFE4S_READONLY    14
+#define V_LOFE4S_READONLY(x) ((x) << S_LOFE4S_READONLY)
+#define F_LOFE4S_READONLY    V_LOFE4S_READONLY(1U)
+
+#define S_LOFE    8
+#define M_LOFE    0x3fU
+#define V_LOFE(x) ((x) << S_LOFE)
+#define G_LOFE(x) (((x) >> S_LOFE) & M_LOFE)
+
+#define S_LOFE3S_READWRITE    7
+#define V_LOFE3S_READWRITE(x) ((x) << S_LOFE3S_READWRITE)
+#define F_LOFE3S_READWRITE    V_LOFE3S_READWRITE(1U)
+
+#define S_LOFE3S_READONLY    6
+#define V_LOFE3S_READONLY(x) ((x) << S_LOFE3S_READONLY)
+#define F_LOFE3S_READONLY    V_LOFE3S_READONLY(1U)
+
+#define S_LOFE3    0
+#define M_LOFE3    0x3fU
+#define V_LOFE3(x) ((x) << S_LOFE3)
+#define G_LOFE3(x) (((x) >> S_LOFE3) & M_LOFE3)
+
+#define A_MAC_PORT_RX_LINKA_H_COEFFICIENBT_BIST 0x328c
+
+#define S_HBISTMAN    12
+#define V_HBISTMAN(x) ((x) << S_HBISTMAN)
+#define F_HBISTMAN    V_HBISTMAN(1U)
+
+#define S_HBISTRES    11
+#define V_HBISTRES(x) ((x) << S_HBISTRES)
+#define F_HBISTRES    V_HBISTRES(1U)
+
+#define S_HBISTSP    8
+#define M_HBISTSP    0x7U
+#define V_HBISTSP(x) ((x) << S_HBISTSP)
+#define G_HBISTSP(x) (((x) >> S_HBISTSP) & M_HBISTSP)
+
+#define S_HBISTEN    7
+#define V_HBISTEN(x) ((x) << S_HBISTEN)
+#define F_HBISTEN    V_HBISTEN(1U)
+
+#define S_HBISTRST    6
+#define V_HBISTRST(x) ((x) << S_HBISTRST)
+#define F_HBISTRST    V_HBISTRST(1U)
+
+#define S_HCOMP    5
+#define V_HCOMP(x) ((x) << S_HCOMP)
+#define F_HCOMP    V_HCOMP(1U)
+
+#define S_HPASS    4
+#define V_HPASS(x) ((x) << S_HPASS)
+#define F_HPASS    V_HPASS(1U)
+
+#define S_HSEL    0
+#define M_HSEL    0xfU
+#define V_HSEL(x) ((x) << S_HSEL)
+#define G_HSEL(x) (((x) >> S_HSEL) & M_HSEL)
+
+#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_ODD3_ODD4 0x3290
+
+#define S_LOFO4S_READWRITE    15
+#define V_LOFO4S_READWRITE(x) ((x) << S_LOFO4S_READWRITE)
+#define F_LOFO4S_READWRITE    V_LOFO4S_READWRITE(1U)
+
+#define S_LOFO4S_READONLY    14
+#define V_LOFO4S_READONLY(x) ((x) << S_LOFO4S_READONLY)
+#define F_LOFO4S_READONLY    V_LOFO4S_READONLY(1U)
+
+#define S_LOFO4    8
+#define M_LOFO4    0x3fU
+#define V_LOFO4(x) ((x) << S_LOFO4)
+#define G_LOFO4(x) (((x) >> S_LOFO4) & M_LOFO4)
+
+#define S_LOFO3S_READWRITE    7
+#define V_LOFO3S_READWRITE(x) ((x) << S_LOFO3S_READWRITE)
+#define F_LOFO3S_READWRITE    V_LOFO3S_READWRITE(1U)
+
+#define S_LOFO3S_READONLY    6
+#define V_LOFO3S_READONLY(x) ((x) << S_LOFO3S_READONLY)
+#define F_LOFO3S_READONLY    V_LOFO3S_READONLY(1U)
+
+#define S_LOFO3    0
+#define M_LOFO3    0x3fU
+#define V_LOFO3(x) ((x) << S_LOFO3)
+#define G_LOFO3(x) (((x) >> S_LOFO3) & M_LOFO3)
+
+#define A_MAC_PORT_RX_LINKA_AC_CAPACITOR_BIST 0x3290
+
+#define S_RX_LINKA_ACCCMP_BIST    13
+#define V_RX_LINKA_ACCCMP_BIST(x) ((x) << S_RX_LINKA_ACCCMP_BIST)
+#define F_RX_LINKA_ACCCMP_BIST    V_RX_LINKA_ACCCMP_BIST(1U)
+
+#define S_ACCEN    12
+#define V_ACCEN(x) ((x) << S_ACCEN)
+#define F_ACCEN    V_ACCEN(1U)
+
+#define S_ACCRST    11
+#define V_ACCRST(x) ((x) << S_ACCRST)
+#define F_ACCRST    V_ACCRST(1U)
+
+#define S_ACCIND    8
+#define M_ACCIND    0x7U
+#define V_ACCIND(x) ((x) << S_ACCIND)
+#define G_ACCIND(x) (((x) >> S_ACCIND) & M_ACCIND)
+
+#define S_ACCRD    0
+#define M_ACCRD    0xffU
+#define V_ACCRD(x) ((x) << S_ACCRD)
+#define G_ACCRD(x) (((x) >> S_ACCRD) & M_ACCRD)
+
+#define A_MAC_PORT_RX_LINKA_DFE_E0_AND_E1_OFFSET 0x3294
+
+#define S_T5E1SN_READWRITE    15
+#define V_T5E1SN_READWRITE(x) ((x) << S_T5E1SN_READWRITE)
+#define F_T5E1SN_READWRITE    V_T5E1SN_READWRITE(1U)
+
+#define S_T5E1SN_READONLY    14
+#define V_T5E1SN_READONLY(x) ((x) << S_T5E1SN_READONLY)
+#define F_T5E1SN_READONLY    V_T5E1SN_READONLY(1U)
+
+#define S_T5E1AMP    8
+#define M_T5E1AMP    0x3fU
+#define V_T5E1AMP(x) ((x) << S_T5E1AMP)
+#define G_T5E1AMP(x) (((x) >> S_T5E1AMP) & M_T5E1AMP)
+
+#define S_T5E0SN_READWRITE    7
+#define V_T5E0SN_READWRITE(x) ((x) << S_T5E0SN_READWRITE)
+#define F_T5E0SN_READWRITE    V_T5E0SN_READWRITE(1U)
+
+#define S_T5E0SN_READONLY    6
+#define V_T5E0SN_READONLY(x) ((x) << S_T5E0SN_READONLY)
+#define F_T5E0SN_READONLY    V_T5E0SN_READONLY(1U)
+
+#define S_T5E0AMP    0
+#define M_T5E0AMP    0x3fU
+#define V_T5E0AMP(x) ((x) << S_T5E0AMP)
+#define G_T5E0AMP(x) (((x) >> S_T5E0AMP) & M_T5E0AMP)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_LOFF_CONTROL 0x3298
+
+#define S_T5LFREG    12
+#define V_T5LFREG(x) ((x) << S_T5LFREG)
+#define F_T5LFREG    V_T5LFREG(1U)
+
+#define S_T5LFRC    11
+#define V_T5LFRC(x) ((x) << S_T5LFRC)
+#define F_T5LFRC    V_T5LFRC(1U)
+
+#define S_T5LFSEL    8
+#define M_T5LFSEL    0x7U
+#define V_T5LFSEL(x) ((x) << S_T5LFSEL)
+#define G_T5LFSEL(x) (((x) >> S_T5LFSEL) & M_T5LFSEL)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_LOFF_CONTROL_REGISTER 0x3298
+
+#define S_LFREG    15
+#define V_LFREG(x) ((x) << S_LFREG)
+#define F_LFREG    V_LFREG(1U)
+
+#define S_LFRC    14
+#define V_LFRC(x) ((x) << S_LFRC)
+#define F_LFRC    V_LFRC(1U)
+
+#define S_LGIDLE    13
+#define V_LGIDLE(x) ((x) << S_LGIDLE)
+#define F_LGIDLE    V_LGIDLE(1U)
+
+#define S_LFTGT    8
+#define M_LFTGT    0x1fU
+#define V_LFTGT(x) ((x) << S_LFTGT)
+#define G_LFTGT(x) (((x) >> S_LFTGT) & M_LFTGT)
+
+#define S_LGTGT    7
+#define V_LGTGT(x) ((x) << S_LGTGT)
+#define F_LGTGT    V_LGTGT(1U)
+
+#define S_LRDY    6
+#define V_LRDY(x) ((x) << S_LRDY)
+#define F_LRDY    V_LRDY(1U)
+
+#define S_LIDLE    5
+#define V_LIDLE(x) ((x) << S_LIDLE)
+#define F_LIDLE    V_LIDLE(1U)
+
+#define S_LCURR    0
+#define M_LCURR    0x1fU
+#define V_LCURR(x) ((x) << S_LCURR)
+#define G_LCURR(x) (((x) >> S_LCURR) & M_LCURR)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_SIGDET_CONTROL 0x329c
+
+#define S_OFFSN_READWRITE    14
+#define V_OFFSN_READWRITE(x) ((x) << S_OFFSN_READWRITE)
+#define F_OFFSN_READWRITE    V_OFFSN_READWRITE(1U)
+
+#define S_OFFSN_READONLY    13
+#define V_OFFSN_READONLY(x) ((x) << S_OFFSN_READONLY)
+#define F_OFFSN_READONLY    V_OFFSN_READONLY(1U)
+
+#define S_OFFAMP    8
+#define M_OFFAMP    0x1fU
+#define V_OFFAMP(x) ((x) << S_OFFAMP)
+#define G_OFFAMP(x) (((x) >> S_OFFAMP) & M_OFFAMP)
+
+#define S_SDACDC    7
+#define V_SDACDC(x) ((x) << S_SDACDC)
+#define F_SDACDC    V_SDACDC(1U)
+
+#define S_OFFSN    13
+#define M_OFFSN    0x3U
+#define V_OFFSN(x) ((x) << S_OFFSN)
+#define G_OFFSN(x) (((x) >> S_OFFSN) & M_OFFSN)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_ANALOG_CONTROL_SWITCH 0x32a0
+
+#define S_T5_RX_SETHDIS    7
+#define V_T5_RX_SETHDIS(x) ((x) << S_T5_RX_SETHDIS)
+#define F_T5_RX_SETHDIS    V_T5_RX_SETHDIS(1U)
+
+#define S_T5_RX_PDTERM    6
+#define V_T5_RX_PDTERM(x) ((x) << S_T5_RX_PDTERM)
+#define F_T5_RX_PDTERM    V_T5_RX_PDTERM(1U)
+
+#define S_T5_RX_BYPASS    5
+#define V_T5_RX_BYPASS(x) ((x) << S_T5_RX_BYPASS)
+#define F_T5_RX_BYPASS    V_T5_RX_BYPASS(1U)
+
+#define S_T5_RX_LPFEN    4
+#define V_T5_RX_LPFEN(x) ((x) << S_T5_RX_LPFEN)
+#define F_T5_RX_LPFEN    V_T5_RX_LPFEN(1U)
+
+#define S_T5_RX_VGABOD    3
+#define V_T5_RX_VGABOD(x) ((x) << S_T5_RX_VGABOD)
+#define F_T5_RX_VGABOD    V_T5_RX_VGABOD(1U)
+
+#define S_T5_RX_VTBYP    2
+#define V_T5_RX_VTBYP(x) ((x) << S_T5_RX_VTBYP)
+#define F_T5_RX_VTBYP    V_T5_RX_VTBYP(1U)
+
+#define S_T5_RX_VTERM    0
+#define M_T5_RX_VTERM    0x3U
+#define V_T5_RX_VTERM(x) ((x) << S_T5_RX_VTERM)
+#define G_T5_RX_VTERM(x) (((x) >> S_T5_RX_VTERM) & M_T5_RX_VTERM)
+
+#define S_RX_OVRSUMPD    15
+#define V_RX_OVRSUMPD(x) ((x) << S_RX_OVRSUMPD)
+#define F_RX_OVRSUMPD    V_RX_OVRSUMPD(1U)
+
+#define S_RX_OVRKBPD    14
+#define V_RX_OVRKBPD(x) ((x) << S_RX_OVRKBPD)
+#define F_RX_OVRKBPD    V_RX_OVRKBPD(1U)
+
+#define S_RX_OVRDIVPD    13
+#define V_RX_OVRDIVPD(x) ((x) << S_RX_OVRDIVPD)
+#define F_RX_OVRDIVPD    V_RX_OVRDIVPD(1U)
+
+#define S_RX_OFFVGADIS    12
+#define V_RX_OFFVGADIS(x) ((x) << S_RX_OFFVGADIS)
+#define F_RX_OFFVGADIS    V_RX_OFFVGADIS(1U)
+
+#define S_RX_OFFACDIS    11
+#define V_RX_OFFACDIS(x) ((x) << S_RX_OFFACDIS)
+#define F_RX_OFFACDIS    V_RX_OFFACDIS(1U)
+
+#define S_RX_VTERM    10
+#define V_RX_VTERM(x) ((x) << S_RX_VTERM)
+#define F_RX_VTERM    V_RX_VTERM(1U)
+
+#define S_RX_DISSPY2D    8
+#define V_RX_DISSPY2D(x) ((x) << S_RX_DISSPY2D)
+#define F_RX_DISSPY2D    V_RX_DISSPY2D(1U)
+
+#define S_RX_OBSOVEN    7
+#define V_RX_OBSOVEN(x) ((x) << S_RX_OBSOVEN)
+#define F_RX_OBSOVEN    V_RX_OBSOVEN(1U)
+
+#define S_RX_LINKANLGSW    0
+#define M_RX_LINKANLGSW    0x7fU
+#define V_RX_LINKANLGSW(x) ((x) << S_RX_LINKANLGSW)
+#define G_RX_LINKANLGSW(x) (((x) >> S_RX_LINKANLGSW) & M_RX_LINKANLGSW)
+
+#define A_MAC_PORT_RX_LINKA_INTEGRATOR_DAC_OFFSET 0x32a4
+
+#define S_ISTRIMS    14
+#define M_ISTRIMS    0x3U
+#define V_ISTRIMS(x) ((x) << S_ISTRIMS)
+#define G_ISTRIMS(x) (((x) >> S_ISTRIMS) & M_ISTRIMS)
+
+#define S_ISTRIM    8
+#define M_ISTRIM    0x3fU
+#define V_ISTRIM(x) ((x) << S_ISTRIM)
+#define G_ISTRIM(x) (((x) >> S_ISTRIM) & M_ISTRIM)
+
+#define S_HALF1    7
+#define V_HALF1(x) ((x) << S_HALF1)
+#define F_HALF1    V_HALF1(1U)
+
+#define S_HALF2    6
+#define V_HALF2(x) ((x) << S_HALF2)
+#define F_HALF2    V_HALF2(1U)
+
+#define S_INTDAC    0
+#define M_INTDAC    0x3fU
+#define V_INTDAC(x) ((x) << S_INTDAC)
+#define G_INTDAC(x) (((x) >> S_INTDAC) & M_INTDAC)
+
+#define S_INTDACEGS    13
+#define M_INTDACEGS    0x7U
+#define V_INTDACEGS(x) ((x) << S_INTDACEGS)
+#define G_INTDACEGS(x) (((x) >> S_INTDACEGS) & M_INTDACEGS)
+
+#define S_INTDACE    8
+#define M_INTDACE    0x1fU
+#define V_INTDACE(x) ((x) << S_INTDACE)
+#define G_INTDACE(x) (((x) >> S_INTDACE) & M_INTDACE)
+
+#define S_INTDACGS    6
+#define M_INTDACGS    0x3U
+#define V_INTDACGS(x) ((x) << S_INTDACGS)
+#define G_INTDACGS(x) (((x) >> S_INTDACGS) & M_INTDACGS)
+
+#define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_CONTROL 0x32a8
+
+#define S_MINWDTH    5
+#define M_MINWDTH    0x1fU
+#define V_MINWDTH(x) ((x) << S_MINWDTH)
+#define G_MINWDTH(x) (((x) >> S_MINWDTH) & M_MINWDTH)
+
+#define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS 0x32ac
+
+#define S_T5SMQM    13
+#define M_T5SMQM    0x7U
+#define V_T5SMQM(x) ((x) << S_T5SMQM)
+#define G_T5SMQM(x) (((x) >> S_T5SMQM) & M_T5SMQM)
+
+#define S_T5SMQ    5
+#define M_T5SMQ    0xffU
+#define V_T5SMQ(x) ((x) << S_T5SMQ)
+#define G_T5SMQ(x) (((x) >> S_T5SMQ) & M_T5SMQ)
+
+#define S_T5EMMD    3
+#define M_T5EMMD    0x3U
+#define V_T5EMMD(x) ((x) << S_T5EMMD)
+#define G_T5EMMD(x) (((x) >> S_T5EMMD) & M_T5EMMD)
+
+#define S_T5EMBRDY    2
+#define V_T5EMBRDY(x) ((x) << S_T5EMBRDY)
+#define F_T5EMBRDY    V_T5EMBRDY(1U)
+
+#define S_T5EMBUMP    1
+#define V_T5EMBUMP(x) ((x) << S_T5EMBUMP)
+#define F_T5EMBUMP    V_T5EMBUMP(1U)
+
+#define S_T5EMEN    0
+#define V_T5EMEN(x) ((x) << S_T5EMEN)
+#define F_T5EMEN    V_T5EMEN(1U)
+
+#define S_SMQM    13
+#define M_SMQM    0x7U
+#define V_SMQM(x) ((x) << S_SMQM)
+#define G_SMQM(x) (((x) >> S_SMQM) & M_SMQM)
+
+#define S_SMQ    5
+#define M_SMQ    0xffU
+#define V_SMQ(x) ((x) << S_SMQ)
+#define G_SMQ(x) (((x) >> S_SMQ) & M_SMQ)
+
+#define S_T6_EMMD    3
+#define M_T6_EMMD    0x3U
+#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
+#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
+
+#define S_T6_EMBRDY    2
+#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
+#define F_T6_EMBRDY    V_T6_EMBRDY(1U)
+
+#define S_T6_EMBUMP    1
+#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
+#define F_T6_EMBUMP    V_T6_EMBUMP(1U)
+
+#define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS_ERROR_COUNT 0x32b0
+
+#define S_EMF8    15
+#define V_EMF8(x) ((x) << S_EMF8)
+#define F_EMF8    V_EMF8(1U)
+
+#define S_EMCNT    4
+#define M_EMCNT    0xffU
+#define V_EMCNT(x) ((x) << S_EMCNT)
+#define G_EMCNT(x) (((x) >> S_EMCNT) & M_EMCNT)
+
+#define S_EMOFLO    2
+#define V_EMOFLO(x) ((x) << S_EMOFLO)
+#define F_EMOFLO    V_EMOFLO(1U)
+
+#define S_EMCRST    1
+#define V_EMCRST(x) ((x) << S_EMCRST)
+#define F_EMCRST    V_EMCRST(1U)
+
+#define S_EMCEN    0
+#define V_EMCEN(x) ((x) << S_EMCEN)
+#define F_EMCEN    V_EMCEN(1U)
+
+#define S_EMSF    13
+#define V_EMSF(x) ((x) << S_EMSF)
+#define F_EMSF    V_EMSF(1U)
+
+#define S_EMDATA59    12
+#define V_EMDATA59(x) ((x) << S_EMDATA59)
+#define F_EMDATA59    V_EMDATA59(1U)
+
+#define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x32b4
+
+#define S_SM2RDY    15
+#define V_SM2RDY(x) ((x) << S_SM2RDY)
+#define F_SM2RDY    V_SM2RDY(1U)
+
+#define S_SM2RST    14
+#define V_SM2RST(x) ((x) << S_SM2RST)
+#define F_SM2RST    V_SM2RST(1U)
+
+#define S_APDF    0
+#define M_APDF    0xfffU
+#define V_APDF(x) ((x) << S_APDF)
+#define G_APDF(x) (((x) >> S_APDF) & M_APDF)
+
+#define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x32b8
+
+#define S_SM0LEN    0
+#define M_SM0LEN    0x7fffU
+#define V_SM0LEN(x) ((x) << S_SM0LEN)
+#define G_SM0LEN(x) (((x) >> S_SM0LEN) & M_SM0LEN)
+
+#define A_MAC_PORT_RX_LINKA_DFE_FUNCTION_CONTROL_3 0x32bc
+
+#define S_FTIMEOUT    15
+#define V_FTIMEOUT(x) ((x) << S_FTIMEOUT)
+#define F_FTIMEOUT    V_FTIMEOUT(1U)
+
+#define S_FROTCAL4    14
+#define V_FROTCAL4(x) ((x) << S_FROTCAL4)
+#define F_FROTCAL4    V_FROTCAL4(1U)
+
+#define S_FDCD2    13
+#define V_FDCD2(x) ((x) << S_FDCD2)
+#define F_FDCD2    V_FDCD2(1U)
+
+#define S_FPRBSPOLTOG    12
+#define V_FPRBSPOLTOG(x) ((x) << S_FPRBSPOLTOG)
+#define F_FPRBSPOLTOG    V_FPRBSPOLTOG(1U)
+
+#define S_FPRBSOFF2    11
+#define V_FPRBSOFF2(x) ((x) << S_FPRBSOFF2)
+#define F_FPRBSOFF2    V_FPRBSOFF2(1U)
+
+#define S_FDDCAL2    10
+#define V_FDDCAL2(x) ((x) << S_FDDCAL2)
+#define F_FDDCAL2    V_FDDCAL2(1U)
+
+#define S_FDDCFLTR    9
+#define V_FDDCFLTR(x) ((x) << S_FDDCFLTR)
+#define F_FDDCFLTR    V_FDDCFLTR(1U)
+
+#define S_FDAC6    8
+#define V_FDAC6(x) ((x) << S_FDAC6)
+#define F_FDAC6    V_FDAC6(1U)
+
+#define S_FDDC5    7
+#define V_FDDC5(x) ((x) << S_FDDC5)
+#define F_FDDC5    V_FDDC5(1U)
+
+#define S_FDDC3456    6
+#define V_FDDC3456(x) ((x) << S_FDDC3456)
+#define F_FDDC3456    V_FDDC3456(1U)
+
+#define S_FSPY2DATA    5
+#define V_FSPY2DATA(x) ((x) << S_FSPY2DATA)
+#define F_FSPY2DATA    V_FSPY2DATA(1U)
+
+#define S_FPHSLOCK    4
+#define V_FPHSLOCK(x) ((x) << S_FPHSLOCK)
+#define F_FPHSLOCK    V_FPHSLOCK(1U)
+
+#define S_FCLKALGN    3
+#define V_FCLKALGN(x) ((x) << S_FCLKALGN)
+#define F_FCLKALGN    V_FCLKALGN(1U)
+
+#define S_FCLKALDYN    2
+#define V_FCLKALDYN(x) ((x) << S_FCLKALDYN)
+#define F_FCLKALDYN    V_FCLKALDYN(1U)
+
+#define S_FDFE    1
+#define V_FDFE(x) ((x) << S_FDFE)
+#define F_FDFE    V_FDFE(1U)
+
+#define S_FPRBSOFF    0
+#define V_FPRBSOFF(x) ((x) << S_FPRBSOFF)
+#define F_FPRBSOFF    V_FPRBSOFF(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_TAP_ENABLE 0x32c0
+
+#define S_H_EN    1
+#define M_H_EN    0xfffU
+#define V_H_EN(x) ((x) << S_H_EN)
+#define G_H_EN(x) (((x) >> S_H_EN) & M_H_EN)
+
+#define A_MAC_PORT_RX_LINKA_DFE_TAP_CONTROL 0x32c0
+
+#define S_RX_LINKA_INDEX_DFE_TC    0
+#define M_RX_LINKA_INDEX_DFE_TC    0xfU
+#define V_RX_LINKA_INDEX_DFE_TC(x) ((x) << S_RX_LINKA_INDEX_DFE_TC)
+#define G_RX_LINKA_INDEX_DFE_TC(x) (((x) >> S_RX_LINKA_INDEX_DFE_TC) & M_RX_LINKA_INDEX_DFE_TC)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1 0x32c4
+#define A_MAC_PORT_RX_LINKA_DFE_TAP 0x32c4
+
+#define S_RX_LINKA_INDEX_DFE_TAP    0
+#define M_RX_LINKA_INDEX_DFE_TAP    0xfU
+#define V_RX_LINKA_INDEX_DFE_TAP(x) ((x) << S_RX_LINKA_INDEX_DFE_TAP)
+#define G_RX_LINKA_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKA_INDEX_DFE_TAP) & M_RX_LINKA_INDEX_DFE_TAP)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H2 0x32c8
+
+#define S_H2OSN_READWRITE    14
+#define V_H2OSN_READWRITE(x) ((x) << S_H2OSN_READWRITE)
+#define F_H2OSN_READWRITE    V_H2OSN_READWRITE(1U)
+
+#define S_H2OSN_READONLY    13
+#define V_H2OSN_READONLY(x) ((x) << S_H2OSN_READONLY)
+#define F_H2OSN_READONLY    V_H2OSN_READONLY(1U)
+
+#define S_H2ESN_READWRITE    6
+#define V_H2ESN_READWRITE(x) ((x) << S_H2ESN_READWRITE)
+#define F_H2ESN_READWRITE    V_H2ESN_READWRITE(1U)
+
+#define S_H2ESN_READONLY    5
+#define V_H2ESN_READONLY(x) ((x) << S_H2ESN_READONLY)
+#define F_H2ESN_READONLY    V_H2ESN_READONLY(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H3 0x32cc
+
+#define S_H3OSN_READWRITE    13
+#define V_H3OSN_READWRITE(x) ((x) << S_H3OSN_READWRITE)
+#define F_H3OSN_READWRITE    V_H3OSN_READWRITE(1U)
+
+#define S_H3OSN_READONLY    12
+#define V_H3OSN_READONLY(x) ((x) << S_H3OSN_READONLY)
+#define F_H3OSN_READONLY    V_H3OSN_READONLY(1U)
+
+#define S_H3ESN_READWRITE    5
+#define V_H3ESN_READWRITE(x) ((x) << S_H3ESN_READWRITE)
+#define F_H3ESN_READWRITE    V_H3ESN_READWRITE(1U)
+
+#define S_H3ESN_READONLY    4
+#define V_H3ESN_READONLY(x) ((x) << S_H3ESN_READONLY)
+#define F_H3ESN_READONLY    V_H3ESN_READONLY(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H4 0x32d0
+
+#define S_H4OGS    14
+#define M_H4OGS    0x3U
+#define V_H4OGS(x) ((x) << S_H4OGS)
+#define G_H4OGS(x) (((x) >> S_H4OGS) & M_H4OGS)
+
+#define S_H4OSN_READWRITE    13
+#define V_H4OSN_READWRITE(x) ((x) << S_H4OSN_READWRITE)
+#define F_H4OSN_READWRITE    V_H4OSN_READWRITE(1U)
+
+#define S_H4OSN_READONLY    12
+#define V_H4OSN_READONLY(x) ((x) << S_H4OSN_READONLY)
+#define F_H4OSN_READONLY    V_H4OSN_READONLY(1U)
+
+#define S_H4EGS    6
+#define M_H4EGS    0x3U
+#define V_H4EGS(x) ((x) << S_H4EGS)
+#define G_H4EGS(x) (((x) >> S_H4EGS) & M_H4EGS)
+
+#define S_H4ESN_READWRITE    5
+#define V_H4ESN_READWRITE(x) ((x) << S_H4ESN_READWRITE)
+#define F_H4ESN_READWRITE    V_H4ESN_READWRITE(1U)
+
+#define S_H4ESN_READONLY    4
+#define V_H4ESN_READONLY(x) ((x) << S_H4ESN_READONLY)
+#define F_H4ESN_READONLY    V_H4ESN_READONLY(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H5 0x32d4
+
+#define S_H5OGS    14
+#define M_H5OGS    0x3U
+#define V_H5OGS(x) ((x) << S_H5OGS)
+#define G_H5OGS(x) (((x) >> S_H5OGS) & M_H5OGS)
+
+#define S_H5OSN_READWRITE    13
+#define V_H5OSN_READWRITE(x) ((x) << S_H5OSN_READWRITE)
+#define F_H5OSN_READWRITE    V_H5OSN_READWRITE(1U)
+
+#define S_H5OSN_READONLY    12
+#define V_H5OSN_READONLY(x) ((x) << S_H5OSN_READONLY)
+#define F_H5OSN_READONLY    V_H5OSN_READONLY(1U)
+
+#define S_H5EGS    6
+#define M_H5EGS    0x3U
+#define V_H5EGS(x) ((x) << S_H5EGS)
+#define G_H5EGS(x) (((x) >> S_H5EGS) & M_H5EGS)
+
+#define S_H5ESN_READWRITE    5
+#define V_H5ESN_READWRITE(x) ((x) << S_H5ESN_READWRITE)
+#define F_H5ESN_READWRITE    V_H5ESN_READWRITE(1U)
+
+#define S_H5ESN_READONLY    4
+#define V_H5ESN_READONLY(x) ((x) << S_H5ESN_READONLY)
+#define F_H5ESN_READONLY    V_H5ESN_READONLY(1U)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H6_AND_H7 0x32d8
+
+#define S_H7GS    14
+#define M_H7GS    0x3U
+#define V_H7GS(x) ((x) << S_H7GS)
+#define G_H7GS(x) (((x) >> S_H7GS) & M_H7GS)
+
+#define S_H7SN_READWRITE    13
+#define V_H7SN_READWRITE(x) ((x) << S_H7SN_READWRITE)
+#define F_H7SN_READWRITE    V_H7SN_READWRITE(1U)
+
+#define S_H7SN_READONLY    12
+#define V_H7SN_READONLY(x) ((x) << S_H7SN_READONLY)
+#define F_H7SN_READONLY    V_H7SN_READONLY(1U)
+
+#define S_H7MAG    8
+#define M_H7MAG    0xfU
+#define V_H7MAG(x) ((x) << S_H7MAG)
+#define G_H7MAG(x) (((x) >> S_H7MAG) & M_H7MAG)
+
+#define S_H6GS    6
+#define M_H6GS    0x3U
+#define V_H6GS(x) ((x) << S_H6GS)
+#define G_H6GS(x) (((x) >> S_H6GS) & M_H6GS)
+
+#define S_H6SN_READWRITE    5
+#define V_H6SN_READWRITE(x) ((x) << S_H6SN_READWRITE)
+#define F_H6SN_READWRITE    V_H6SN_READWRITE(1U)
+
+#define S_H6SN_READONLY    4
+#define V_H6SN_READONLY(x) ((x) << S_H6SN_READONLY)
+#define F_H6SN_READONLY    V_H6SN_READONLY(1U)
+
+#define S_H6MAG    0
+#define M_H6MAG    0xfU
+#define V_H6MAG(x) ((x) << S_H6MAG)
+#define G_H6MAG(x) (((x) >> S_H6MAG) & M_H6MAG)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H8_AND_H9 0x32dc
+
+#define S_H9GS    14
+#define M_H9GS    0x3U
+#define V_H9GS(x) ((x) << S_H9GS)
+#define G_H9GS(x) (((x) >> S_H9GS) & M_H9GS)
+
+#define S_H9SN_READWRITE    13
+#define V_H9SN_READWRITE(x) ((x) << S_H9SN_READWRITE)
+#define F_H9SN_READWRITE    V_H9SN_READWRITE(1U)
+
+#define S_H9SN_READONLY    12
+#define V_H9SN_READONLY(x) ((x) << S_H9SN_READONLY)
+#define F_H9SN_READONLY    V_H9SN_READONLY(1U)
+
+#define S_H9MAG    8
+#define M_H9MAG    0xfU
+#define V_H9MAG(x) ((x) << S_H9MAG)
+#define G_H9MAG(x) (((x) >> S_H9MAG) & M_H9MAG)
+
+#define S_H8GS    6
+#define M_H8GS    0x3U
+#define V_H8GS(x) ((x) << S_H8GS)
+#define G_H8GS(x) (((x) >> S_H8GS) & M_H8GS)
+
+#define S_H8SN_READWRITE    5
+#define V_H8SN_READWRITE(x) ((x) << S_H8SN_READWRITE)
+#define F_H8SN_READWRITE    V_H8SN_READWRITE(1U)
+
+#define S_H8SN_READONLY    4
+#define V_H8SN_READONLY(x) ((x) << S_H8SN_READONLY)
+#define F_H8SN_READONLY    V_H8SN_READONLY(1U)
+
+#define S_H8MAG    0
+#define M_H8MAG    0xfU
+#define V_H8MAG(x) ((x) << S_H8MAG)
+#define G_H8MAG(x) (((x) >> S_H8MAG) & M_H8MAG)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H10_AND_H11 0x32e0
+
+#define S_H11GS    14
+#define M_H11GS    0x3U
+#define V_H11GS(x) ((x) << S_H11GS)
+#define G_H11GS(x) (((x) >> S_H11GS) & M_H11GS)
+
+#define S_H11SN_READWRITE    13
+#define V_H11SN_READWRITE(x) ((x) << S_H11SN_READWRITE)
+#define F_H11SN_READWRITE    V_H11SN_READWRITE(1U)
+
+#define S_H11SN_READONLY    12
+#define V_H11SN_READONLY(x) ((x) << S_H11SN_READONLY)
+#define F_H11SN_READONLY    V_H11SN_READONLY(1U)
+
+#define S_H11MAG    8
+#define M_H11MAG    0xfU
+#define V_H11MAG(x) ((x) << S_H11MAG)
+#define G_H11MAG(x) (((x) >> S_H11MAG) & M_H11MAG)
+
+#define S_H10GS    6
+#define M_H10GS    0x3U
+#define V_H10GS(x) ((x) << S_H10GS)
+#define G_H10GS(x) (((x) >> S_H10GS) & M_H10GS)
+
+#define S_H10SN_READWRITE    5
+#define V_H10SN_READWRITE(x) ((x) << S_H10SN_READWRITE)
+#define F_H10SN_READWRITE    V_H10SN_READWRITE(1U)
+
+#define S_H10SN_READONLY    4
+#define V_H10SN_READONLY(x) ((x) << S_H10SN_READONLY)
+#define F_H10SN_READONLY    V_H10SN_READONLY(1U)
+
+#define S_H10MAG    0
+#define M_H10MAG    0xfU
+#define V_H10MAG(x) ((x) << S_H10MAG)
+#define G_H10MAG(x) (((x) >> S_H10MAG) & M_H10MAG)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H12 0x32e4
+
+#define S_H12GS    6
+#define M_H12GS    0x3U
+#define V_H12GS(x) ((x) << S_H12GS)
+#define G_H12GS(x) (((x) >> S_H12GS) & M_H12GS)
+
+#define S_H12SN_READWRITE    5
+#define V_H12SN_READWRITE(x) ((x) << S_H12SN_READWRITE)
+#define F_H12SN_READWRITE    V_H12SN_READWRITE(1U)
+
+#define S_H12SN_READONLY    4
+#define V_H12SN_READONLY(x) ((x) << S_H12SN_READONLY)
+#define F_H12SN_READONLY    V_H12SN_READONLY(1U)
+
+#define S_H12MAG    0
+#define M_H12MAG    0xfU
+#define V_H12MAG(x) ((x) << S_H12MAG)
+#define G_H12MAG(x) (((x) >> S_H12MAG) & M_H12MAG)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_INTERNAL_STATUS_2 0x32e4
+
+#define S_STNDBYSTAT    15
+#define V_STNDBYSTAT(x) ((x) << S_STNDBYSTAT)
+#define F_STNDBYSTAT    V_STNDBYSTAT(1U)
+
+#define S_CALSDONE    14
+#define V_CALSDONE(x) ((x) << S_CALSDONE)
+#define F_CALSDONE    V_CALSDONE(1U)
+
+#define S_ACISRCCMP    5
+#define V_ACISRCCMP(x) ((x) << S_ACISRCCMP)
+#define F_ACISRCCMP    V_ACISRCCMP(1U)
+
+#define S_PRBSOFFCMP    4
+#define V_PRBSOFFCMP(x) ((x) << S_PRBSOFFCMP)
+#define F_PRBSOFFCMP    V_PRBSOFFCMP(1U)
+
+#define S_CLKALGNCMP    3
+#define V_CLKALGNCMP(x) ((x) << S_CLKALGNCMP)
+#define F_CLKALGNCMP    V_CLKALGNCMP(1U)
+
+#define S_ROTFCMP    2
+#define V_ROTFCMP(x) ((x) << S_ROTFCMP)
+#define F_ROTFCMP    V_ROTFCMP(1U)
+
+#define S_DCDCMP    1
+#define V_DCDCMP(x) ((x) << S_DCDCMP)
+#define F_DCDCMP    V_DCDCMP(1U)
+
+#define S_QCCCMP    0
+#define V_QCCCMP(x) ((x) << S_QCCCMP)
+#define F_QCCCMP    V_QCCCMP(1U)
+
+#define A_MAC_PORT_RX_LINKA_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x32e8
+
+#define S_FCSADJ    6
+#define V_FCSADJ(x) ((x) << S_FCSADJ)
+#define F_FCSADJ    V_FCSADJ(1U)
+
+#define S_CSIND    3
+#define M_CSIND    0x3U
+#define V_CSIND(x) ((x) << S_CSIND)
+#define G_CSIND(x) (((x) >> S_CSIND) & M_CSIND)
+
+#define S_CSVAL    0
+#define M_CSVAL    0x7U
+#define V_CSVAL(x) ((x) << S_CSVAL)
+#define G_CSVAL(x) (((x) >> S_CSVAL) & M_CSVAL)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DCD_CONTROL 0x32ec
+
+#define S_DCDTMDOUT    15
+#define V_DCDTMDOUT(x) ((x) << S_DCDTMDOUT)
+#define F_DCDTMDOUT    V_DCDTMDOUT(1U)
+
+#define S_DCDTOEN    14
+#define V_DCDTOEN(x) ((x) << S_DCDTOEN)
+#define F_DCDTOEN    V_DCDTOEN(1U)
+
+#define S_DCDLOCK    13
+#define V_DCDLOCK(x) ((x) << S_DCDLOCK)
+#define F_DCDLOCK    V_DCDLOCK(1U)
+
+#define S_DCDSTEP    11
+#define M_DCDSTEP    0x3U
+#define V_DCDSTEP(x) ((x) << S_DCDSTEP)
+#define G_DCDSTEP(x) (((x) >> S_DCDSTEP) & M_DCDSTEP)
+
+#define S_DCDALTWPDIS    10
+#define V_DCDALTWPDIS(x) ((x) << S_DCDALTWPDIS)
+#define F_DCDALTWPDIS    V_DCDALTWPDIS(1U)
+
+#define S_DCDOVRDEN    9
+#define V_DCDOVRDEN(x) ((x) << S_DCDOVRDEN)
+#define F_DCDOVRDEN    V_DCDOVRDEN(1U)
+
+#define S_DCCAOVRDEN    8
+#define V_DCCAOVRDEN(x) ((x) << S_DCCAOVRDEN)
+#define F_DCCAOVRDEN    V_DCCAOVRDEN(1U)
+
+#define S_DCDSIGN    6
+#define M_DCDSIGN    0x3U
+#define V_DCDSIGN(x) ((x) << S_DCDSIGN)
+#define G_DCDSIGN(x) (((x) >> S_DCDSIGN) & M_DCDSIGN)
+
+#define S_DCDAMP    0
+#define M_DCDAMP    0x3fU
+#define V_DCDAMP(x) ((x) << S_DCDAMP)
+#define G_DCDAMP(x) (((x) >> S_DCDAMP) & M_DCDAMP)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_DCC_CONTROL 0x32f0
+
+#define S_PRBSMODE    14
+#define M_PRBSMODE    0x3U
+#define V_PRBSMODE(x) ((x) << S_PRBSMODE)
+#define G_PRBSMODE(x) (((x) >> S_PRBSMODE) & M_PRBSMODE)
+
+#define S_RX_LINKA_DCCSTEP_RXCTL    10
+#define M_RX_LINKA_DCCSTEP_RXCTL    0x3U
+#define V_RX_LINKA_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKA_DCCSTEP_RXCTL)
+#define G_RX_LINKA_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKA_DCCSTEP_RXCTL) & M_RX_LINKA_DCCSTEP_RXCTL)
+
+#define S_DCCOVRDEN    9
+#define V_DCCOVRDEN(x) ((x) << S_DCCOVRDEN)
+#define F_DCCOVRDEN    V_DCCOVRDEN(1U)
+
+#define S_RX_LINKA_DCCLOCK_RXCTL    8
+#define V_RX_LINKA_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKA_DCCLOCK_RXCTL)
+#define F_RX_LINKA_DCCLOCK_RXCTL    V_RX_LINKA_DCCLOCK_RXCTL(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_QCC_CONTROL 0x32f4
+
+#define S_DCCQCCMODE    15
+#define V_DCCQCCMODE(x) ((x) << S_DCCQCCMODE)
+#define F_DCCQCCMODE    V_DCCQCCMODE(1U)
+
+#define S_DCCQCCDYN    14
+#define V_DCCQCCDYN(x) ((x) << S_DCCQCCDYN)
+#define F_DCCQCCDYN    V_DCCQCCDYN(1U)
+
+#define S_DCCQCCHOLD    13
+#define V_DCCQCCHOLD(x) ((x) << S_DCCQCCHOLD)
+#define F_DCCQCCHOLD    V_DCCQCCHOLD(1U)
+
+#define S_QCCSTEP    10
+#define M_QCCSTEP    0x3U
+#define V_QCCSTEP(x) ((x) << S_QCCSTEP)
+#define G_QCCSTEP(x) (((x) >> S_QCCSTEP) & M_QCCSTEP)
+
+#define S_QCCOVRDEN    9
+#define V_QCCOVRDEN(x) ((x) << S_QCCOVRDEN)
+#define F_QCCOVRDEN    V_QCCOVRDEN(1U)
+
+#define S_QCCLOCK    8
+#define V_QCCLOCK(x) ((x) << S_QCCLOCK)
+#define F_QCCLOCK    V_QCCLOCK(1U)
+
+#define S_QCCSIGN    6
+#define M_QCCSIGN    0x3U
+#define V_QCCSIGN(x) ((x) << S_QCCSIGN)
+#define G_QCCSIGN(x) (((x) >> S_QCCSIGN) & M_QCCSIGN)
+
+#define S_QCDAMP    0
+#define M_QCDAMP    0x3fU
+#define V_QCDAMP(x) ((x) << S_QCDAMP)
+#define G_QCDAMP(x) (((x) >> S_QCDAMP) & M_QCDAMP)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_2 0x32f8
+
+#define S_DFEDACLSSD    6
+#define V_DFEDACLSSD(x) ((x) << S_DFEDACLSSD)
+#define F_DFEDACLSSD    V_DFEDACLSSD(1U)
+
+#define S_SDLSSD    5
+#define V_SDLSSD(x) ((x) << S_SDLSSD)
+#define F_SDLSSD    V_SDLSSD(1U)
+
+#define S_DFEOBSBIAS    4
+#define V_DFEOBSBIAS(x) ((x) << S_DFEOBSBIAS)
+#define F_DFEOBSBIAS    V_DFEOBSBIAS(1U)
+
+#define S_GBOFSTLSSD    3
+#define V_GBOFSTLSSD(x) ((x) << S_GBOFSTLSSD)
+#define F_GBOFSTLSSD    V_GBOFSTLSSD(1U)
+
+#define S_RXDOBS    2
+#define V_RXDOBS(x) ((x) << S_RXDOBS)
+#define F_RXDOBS    V_RXDOBS(1U)
+
+#define S_ACJZPT    1
+#define V_ACJZPT(x) ((x) << S_ACJZPT)
+#define F_ACJZPT    V_ACJZPT(1U)
+
+#define S_ACJZNT    0
+#define V_ACJZNT(x) ((x) << S_ACJZNT)
+#define F_ACJZNT    V_ACJZNT(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x32f8
+
+#define S_TSTCMP    15
+#define V_TSTCMP(x) ((x) << S_TSTCMP)
+#define F_TSTCMP    V_TSTCMP(1U)
+
+#define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_1 0x32fc
+
+#define S_PHSLOCK    10
+#define V_PHSLOCK(x) ((x) << S_PHSLOCK)
+#define F_PHSLOCK    V_PHSLOCK(1U)
+
+#define S_TESTMODE    9
+#define V_TESTMODE(x) ((x) << S_TESTMODE)
+#define F_TESTMODE    V_TESTMODE(1U)
+
+#define S_CALMODE    8
+#define V_CALMODE(x) ((x) << S_CALMODE)
+#define F_CALMODE    V_CALMODE(1U)
+
+#define S_AMPSEL    7
+#define V_AMPSEL(x) ((x) << S_AMPSEL)
+#define F_AMPSEL    V_AMPSEL(1U)
+
+#define S_WHICHNRZ    6
+#define V_WHICHNRZ(x) ((x) << S_WHICHNRZ)
+#define F_WHICHNRZ    V_WHICHNRZ(1U)
+
+#define S_BANKA    5
+#define V_BANKA(x) ((x) << S_BANKA)
+#define F_BANKA    V_BANKA(1U)
+
+#define S_BANKB    4
+#define V_BANKB(x) ((x) << S_BANKB)
+#define F_BANKB    V_BANKB(1U)
+
+#define S_ACJPDP    3
+#define V_ACJPDP(x) ((x) << S_ACJPDP)
+#define F_ACJPDP    V_ACJPDP(1U)
+
+#define S_ACJPDN    2
+#define V_ACJPDN(x) ((x) << S_ACJPDN)
+#define F_ACJPDN    V_ACJPDN(1U)
+
+#define S_LSSDT    1
+#define V_LSSDT(x) ((x) << S_LSSDT)
+#define F_LSSDT    V_LSSDT(1U)
+
+#define S_MTHOLD    0
+#define V_MTHOLD(x) ((x) << S_MTHOLD)
+#define F_MTHOLD    V_MTHOLD(1U)
+
+#define S_CALMODEEDGE    14
+#define V_CALMODEEDGE(x) ((x) << S_CALMODEEDGE)
+#define F_CALMODEEDGE    V_CALMODEEDGE(1U)
+
+#define S_TESTCAP    13
+#define V_TESTCAP(x) ((x) << S_TESTCAP)
+#define F_TESTCAP    V_TESTCAP(1U)
+
+#define S_SNAPEN    12
+#define V_SNAPEN(x) ((x) << S_SNAPEN)
+#define F_SNAPEN    V_SNAPEN(1U)
+
+#define S_ASYNCDIR    11
+#define V_ASYNCDIR(x) ((x) << S_ASYNCDIR)
+#define F_ASYNCDIR    V_ASYNCDIR(1U)
+
+#define A_MAC_PORT_RX_LINKB_RECEIVER_CONFIGURATION_MODE 0x3300
+#define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c
+
+#define S_T6_TMSCAL    8
+#define M_T6_TMSCAL    0x3U
+#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
+#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
+
+#define S_T6_APADJ    7
+#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
+#define F_T6_APADJ    V_T6_APADJ(1U)
+
+#define S_T6_RSEL    6
+#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
+#define F_T6_RSEL    V_T6_RSEL(1U)
+
+#define S_T6_PHOFFS    0
+#define M_T6_PHOFFS    0x3fU
+#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
+#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
+
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318
+#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c
+#define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320
+
+#define S_T6_SPIFMT    8
+#define M_T6_SPIFMT    0xfU
+#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
+#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
+
+#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324
+#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328
+#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c
+
+#define S_T6_WRAPSEL    15
+#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
+#define F_T6_WRAPSEL    V_T6_WRAPSEL(1U)
+
+#define S_T6_PEAK    9
+#define M_T6_PEAK    0x1fU
+#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
+#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
+
+#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330
+
+#define S_T6_T5VGAIN    0
+#define M_T6_T5VGAIN    0x7fU
+#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
+#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
+
+#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338
+#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338
+#define A_MAC_PORT_RX_LINKB_RECEIVER_IQAMP_CONTROL_1 0x333c
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_3 0x3340
+#define A_MAC_PORT_RX_LINKB_RECEIVER_IQAMP_CONTROL_2 0x3340
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3344
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DACAP_AND_DACAN 0x3348
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DACA_MIN_AND_DACAZ 0x334c
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DACA_MIN 0x334c
+#define A_MAC_PORT_RX_LINKB_RECEIVER_ADAC_CONTROL 0x3350
+#define A_MAC_PORT_RX_LINKB_RECEIVER_AC_COUPLING_CONTROL 0x3354
+#define A_MAC_PORT_RX_LINKB_RECEIVER_AC_COUPLING_VALUE 0x3358
+#define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x335c
+#define A_MAC_PORT_RX_LINKB_DFE_H1H2H3_LOCAL_OFFSET 0x335c
+#define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3360
+#define A_MAC_PORT_RX_LINKB_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3360
+#define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3364
+#define A_MAC_PORT_RX_LINKB_PEAKED_INTEGRATOR 0x3364
+#define A_MAC_PORT_RX_LINKB_CDR_ANALOG_SWITCH 0x3368
+#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c
+#define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370
+#define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374
+
+#define S_T6_ODEC    0
+#define M_T6_ODEC    0xfU
+#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
+#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
+
+#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378
+
+#define S_RX_LINKB_ACCCMP_RIS    11
+#define V_RX_LINKB_ACCCMP_RIS(x) ((x) << S_RX_LINKB_ACCCMP_RIS)
+#define F_RX_LINKB_ACCCMP_RIS    V_RX_LINKB_ACCCMP_RIS(1U)
+
+#define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_1 0x337c
+#define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_2 0x3380
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_EVN1_EVN2 0x3384
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_CHANNEL 0x3384
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_ODD1_ODD2 0x3388
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_VALUE 0x3388
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_EVN3_EVN4 0x338c
+#define A_MAC_PORT_RX_LINKB_H_COEFFICIENBT_BIST 0x338c
+#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_ODD3_ODD4 0x3390
+#define A_MAC_PORT_RX_LINKB_AC_CAPACITOR_BIST 0x3390
+
+#define S_RX_LINKB_ACCCMP_BIST    13
+#define V_RX_LINKB_ACCCMP_BIST(x) ((x) << S_RX_LINKB_ACCCMP_BIST)
+#define F_RX_LINKB_ACCCMP_BIST    V_RX_LINKB_ACCCMP_BIST(1U)
+
+#define A_MAC_PORT_RX_LINKB_DFE_E0_AND_E1_OFFSET 0x3394
+#define A_MAC_PORT_RX_LINKB_RECEIVER_LOFF_CONTROL 0x3398
+#define A_MAC_PORT_RX_LINKB_RECEIVER_LOFF_CONTROL_REGISTER 0x3398
+#define A_MAC_PORT_RX_LINKB_RECEIVER_SIGDET_CONTROL 0x339c
+#define A_MAC_PORT_RX_LINKB_RECEIVER_ANALOG_CONTROL_SWITCH 0x33a0
+#define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4
+#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8
+#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac
+
+#define S_T6_EMMD    3
+#define M_T6_EMMD    0x3U
+#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
+#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
+
+#define S_T6_EMBRDY    2
+#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
+#define F_T6_EMBRDY    V_T6_EMBRDY(1U)
+
+#define S_T6_EMBUMP    1
+#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
+#define F_T6_EMBUMP    V_T6_EMBUMP(1U)
+
+#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0
+#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4
+#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8
+#define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_3 0x33bc
+#define A_MAC_PORT_RX_LINKB_DFE_TAP_ENABLE 0x33c0
+#define A_MAC_PORT_RX_LINKB_DFE_TAP_CONTROL 0x33c0
+
+#define S_RX_LINKB_INDEX_DFE_TC    0
+#define M_RX_LINKB_INDEX_DFE_TC    0xfU
+#define V_RX_LINKB_INDEX_DFE_TC(x) ((x) << S_RX_LINKB_INDEX_DFE_TC)
+#define G_RX_LINKB_INDEX_DFE_TC(x) (((x) >> S_RX_LINKB_INDEX_DFE_TC) & M_RX_LINKB_INDEX_DFE_TC)
+
+#define A_MAC_PORT_RX_LINKB_DFE_H1 0x33c4
+#define A_MAC_PORT_RX_LINKB_DFE_TAP 0x33c4
+
+#define S_RX_LINKB_INDEX_DFE_TAP    0
+#define M_RX_LINKB_INDEX_DFE_TAP    0xfU
+#define V_RX_LINKB_INDEX_DFE_TAP(x) ((x) << S_RX_LINKB_INDEX_DFE_TAP)
+#define G_RX_LINKB_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKB_INDEX_DFE_TAP) & M_RX_LINKB_INDEX_DFE_TAP)
+
+#define A_MAC_PORT_RX_LINKB_DFE_H2 0x33c8
+#define A_MAC_PORT_RX_LINKB_DFE_H3 0x33cc
+#define A_MAC_PORT_RX_LINKB_DFE_H4 0x33d0
+#define A_MAC_PORT_RX_LINKB_DFE_H5 0x33d4
+#define A_MAC_PORT_RX_LINKB_DFE_H6_AND_H7 0x33d8
+#define A_MAC_PORT_RX_LINKB_DFE_H8_AND_H9 0x33dc
+#define A_MAC_PORT_RX_LINKB_DFE_H10_AND_H11 0x33e0
+#define A_MAC_PORT_RX_LINKB_DFE_H12 0x33e4
+#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS_2 0x33e4
+#define A_MAC_PORT_RX_LINKB_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x33e8
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DCD_CONTROL 0x33ec
+#define A_MAC_PORT_RX_LINKB_RECEIVER_DCC_CONTROL 0x33f0
+
+#define S_RX_LINKB_DCCSTEP_RXCTL    10
+#define M_RX_LINKB_DCCSTEP_RXCTL    0x3U
+#define V_RX_LINKB_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKB_DCCSTEP_RXCTL)
+#define G_RX_LINKB_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKB_DCCSTEP_RXCTL) & M_RX_LINKB_DCCSTEP_RXCTL)
+
+#define S_RX_LINKB_DCCLOCK_RXCTL    8
+#define V_RX_LINKB_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKB_DCCLOCK_RXCTL)
+#define F_RX_LINKB_DCCLOCK_RXCTL    V_RX_LINKB_DCCLOCK_RXCTL(1U)
+
+#define A_MAC_PORT_RX_LINKB_RECEIVER_QCC_CONTROL 0x33f4
+#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_2 0x33f8
+#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8
+#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400
+
+#define S_T6_T5_TX_RXLOOP    5
+#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
+#define F_T6_T5_TX_RXLOOP    V_T6_T5_TX_RXLOOP(1U)
+
+#define S_T6_T5_TX_BWSEL    2
+#define M_T6_T5_TX_BWSEL    0x3U
+#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
+#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404
+
+#define S_T6_ERROR    9
+#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
+#define F_T6_ERROR    V_T6_ERROR(1U)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418
+
+#define S_T6_CALSSTN    8
+#define M_T6_CALSSTN    0x3fU
+#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
+#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
+
+#define S_T6_CALSSTP    0
+#define M_T6_CALSSTP    0x3fU
+#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
+#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c
+
+#define S_T6_DRTOL    2
+#define M_T6_DRTOL    0x7U
+#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
+#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420
+
+#define S_T6_NXTT0    0
+#define M_T6_NXTT0    0x3fU
+#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
+#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428
+
+#define S_T6_NXTT2    0
+#define M_T6_NXTT2    0x3fU
+#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
+#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434
+
+#define S_T6_NXTPOL    0
+#define M_T6_NXTPOL    0xfU
+#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
+#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438
+
+#define S_T6_C0UPDT    6
+#define M_T6_C0UPDT    0x3U
+#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
+#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
+
+#define S_T6_C2UPDT    2
+#define M_T6_C2UPDT    0x3U
+#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
+#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
+
+#define S_T6_C1UPDT    0
+#define M_T6_C1UPDT    0x3U
+#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
+#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c
+
+#define S_T6_C0STAT    6
+#define M_T6_C0STAT    0x3U
+#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
+#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
+
+#define S_T6_C2STAT    2
+#define M_T6_C2STAT    0x3U
+#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
+#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
+
+#define S_T6_C1STAT    0
+#define M_T6_C1STAT    0x3U
+#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
+#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3444
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3448
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3448
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x344c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_APPLIED_TUNE_REGISTER 0x3450
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3458
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3460
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_4X_SEGMENT_APPLIED 0x3460
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3464
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_2X_SEGMENT_APPLIED 0x3464
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3468
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_1X_SEGMENT_APPLIED 0x3468
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x346c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3470
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3470
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3474
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c
+
+#define S_T6_XADDR    1
+#define M_T6_XADDR    0x1fU
+#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
+#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3488
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x348c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x348c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_OVERRIDE 0x3490
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_APPLIED 0x3494
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_TIME_OUT 0x3498
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c
+#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0
+
+#define S_T6_DCCTIMEEN    13
+#define M_T6_DCCTIMEEN    0x3U
+#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
+#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
+
+#define S_T6_DCCLOCK    11
+#define M_T6_DCCLOCK    0x3U
+#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
+#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
+
+#define S_T6_DCCOFFSET    8
+#define M_T6_DCCOFFSET    0x7U
+#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
+#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
+
+#define S_TX_LINKC_DCCSTEP_CTL    6
+#define M_TX_LINKC_DCCSTEP_CTL    0x3U
+#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL)
+#define G_TX_LINKC_DCCSTEP_CTL(x) (((x) >> S_TX_LINKC_DCCSTEP_CTL) & M_TX_LINKC_DCCSTEP_CTL)
+
+#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_OVERRIDE 0x34a4
+#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_APPLIED 0x34a8
+#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_TIME_OUT 0x34ac
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_OVERRIDE 0x34c0
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_4X_OVERRIDE 0x34c8
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_2X_OVERRIDE 0x34cc
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_1X_OVERRIDE 0x34d0
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x34d8
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x34dc
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0
+
+#define S_T6_SDOVRD    0
+#define M_T6_SDOVRD    0xffffU
+#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
+#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc
+
+#define S_T6_SDOVRDEN    15
+#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
+#define F_T6_SDOVRDEN    V_T6_SDOVRDEN(1U)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500
+
+#define S_T6_T5_TX_RXLOOP    5
+#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
+#define F_T6_T5_TX_RXLOOP    V_T6_T5_TX_RXLOOP(1U)
+
+#define S_T6_T5_TX_BWSEL    2
+#define M_T6_T5_TX_BWSEL    0x3U
+#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
+#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504
+
+#define S_T6_ERROR    9
+#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
+#define F_T6_ERROR    V_T6_ERROR(1U)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518
+
+#define S_T6_CALSSTN    8
+#define M_T6_CALSSTN    0x3fU
+#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
+#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
+
+#define S_T6_CALSSTP    0
+#define M_T6_CALSSTP    0x3fU
+#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
+#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c
+
+#define S_T6_DRTOL    2
+#define M_T6_DRTOL    0x7U
+#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
+#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520
+
+#define S_T6_NXTT0    0
+#define M_T6_NXTT0    0x3fU
+#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
+#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528
+
+#define S_T6_NXTT2    0
+#define M_T6_NXTT2    0x3fU
+#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
+#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534
+
+#define S_T6_NXTPOL    0
+#define M_T6_NXTPOL    0xfU
+#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
+#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538
+
+#define S_T6_C0UPDT    6
+#define M_T6_C0UPDT    0x3U
+#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
+#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
+
+#define S_T6_C2UPDT    2
+#define M_T6_C2UPDT    0x3U
+#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
+#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
+
+#define S_T6_C1UPDT    0
+#define M_T6_C1UPDT    0x3U
+#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
+#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c
+
+#define S_T6_C0STAT    6
+#define M_T6_C0STAT    0x3U
+#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
+#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
+
+#define S_T6_C2STAT    2
+#define M_T6_C2STAT    0x3U
+#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
+#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
+
+#define S_T6_C1STAT    0
+#define M_T6_C1STAT    0x3U
+#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
+#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3544
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3548
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3548
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x354c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_APPLIED_TUNE_REGISTER 0x3550
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3558
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3560
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_4X_SEGMENT_APPLIED 0x3560
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3564
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_2X_SEGMENT_APPLIED 0x3564
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3568
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_1X_SEGMENT_APPLIED 0x3568
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x356c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3570
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3570
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3574
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c
+
+#define S_T6_XADDR    1
+#define M_T6_XADDR    0x1fU
+#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
+#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3588
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x358c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x358c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_OVERRIDE 0x3590
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_APPLIED 0x3594
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_TIME_OUT 0x3598
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c
+#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0
+
+#define S_T6_DCCTIMEEN    13
+#define M_T6_DCCTIMEEN    0x3U
+#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
+#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
+
+#define S_T6_DCCLOCK    11
+#define M_T6_DCCLOCK    0x3U
+#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
+#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
+
+#define S_T6_DCCOFFSET    8
+#define M_T6_DCCOFFSET    0x7U
+#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
+#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
+
+#define S_TX_LINKD_DCCSTEP_CTL    6
+#define M_TX_LINKD_DCCSTEP_CTL    0x3U
+#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL)
+#define G_TX_LINKD_DCCSTEP_CTL(x) (((x) >> S_TX_LINKD_DCCSTEP_CTL) & M_TX_LINKD_DCCSTEP_CTL)
+
+#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_OVERRIDE 0x35a4
+#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_APPLIED 0x35a8
+#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_TIME_OUT 0x35ac
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_OVERRIDE 0x35c0
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_4X_OVERRIDE 0x35c8
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_2X_OVERRIDE 0x35cc
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_1X_OVERRIDE 0x35d0
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x35d8
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x35dc
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0
+
+#define S_T6_SDOVRD    0
+#define M_T6_SDOVRD    0xffffU
+#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
+#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc
+
+#define S_T6_SDOVRDEN    15
+#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
+#define F_T6_SDOVRDEN    V_T6_SDOVRDEN(1U)
+
+#define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600
+#define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c
+
+#define S_T6_TMSCAL    8
+#define M_T6_TMSCAL    0x3U
+#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
+#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
+
+#define S_T6_APADJ    7
+#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
+#define F_T6_APADJ    V_T6_APADJ(1U)
+
+#define S_T6_RSEL    6
+#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
+#define F_T6_RSEL    V_T6_RSEL(1U)
+
+#define S_T6_PHOFFS    0
+#define M_T6_PHOFFS    0x3fU
+#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
+#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
+
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618
+#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c
+#define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620
+
+#define S_T6_SPIFMT    8
+#define M_T6_SPIFMT    0xfU
+#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
+#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
+
+#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624
+#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628
+#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c
+
+#define S_T6_WRAPSEL    15
+#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
+#define F_T6_WRAPSEL    V_T6_WRAPSEL(1U)
+
+#define S_T6_PEAK    9
+#define M_T6_PEAK    0x1fU
+#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
+#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
+
+#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630
+
+#define S_T6_T5VGAIN    0
+#define M_T6_T5VGAIN    0x7fU
+#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
+#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
+
+#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638
+#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638
+#define A_MAC_PORT_RX_LINKC_RECEIVER_IQAMP_CONTROL_1 0x363c
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_3 0x3640
+#define A_MAC_PORT_RX_LINKC_RECEIVER_IQAMP_CONTROL_2 0x3640
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3644
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DACAP_AND_DACAN 0x3648
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DACA_MIN_AND_DACAZ 0x364c
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DACA_MIN 0x364c
+#define A_MAC_PORT_RX_LINKC_RECEIVER_ADAC_CONTROL 0x3650
+#define A_MAC_PORT_RX_LINKC_RECEIVER_AC_COUPLING_CONTROL 0x3654
+#define A_MAC_PORT_RX_LINKC_RECEIVER_AC_COUPLING_VALUE 0x3658
+#define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x365c
+#define A_MAC_PORT_RX_LINKC_DFE_H1H2H3_LOCAL_OFFSET 0x365c
+#define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3660
+#define A_MAC_PORT_RX_LINKC_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3660
+#define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3664
+#define A_MAC_PORT_RX_LINKC_PEAKED_INTEGRATOR 0x3664
+#define A_MAC_PORT_RX_LINKC_CDR_ANALOG_SWITCH 0x3668
+#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c
+#define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670
+#define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674
+
+#define S_T6_ODEC    0
+#define M_T6_ODEC    0xfU
+#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
+#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
+
+#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678
+
+#define S_RX_LINKC_ACCCMP_RIS    11
+#define V_RX_LINKC_ACCCMP_RIS(x) ((x) << S_RX_LINKC_ACCCMP_RIS)
+#define F_RX_LINKC_ACCCMP_RIS    V_RX_LINKC_ACCCMP_RIS(1U)
+
+#define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_1 0x367c
+#define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_2 0x3680
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_EVN1_EVN2 0x3684
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_CHANNEL 0x3684
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_ODD1_ODD2 0x3688
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_VALUE 0x3688
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_EVN3_EVN4 0x368c
+#define A_MAC_PORT_RX_LINKC_H_COEFFICIENBT_BIST 0x368c
+#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_ODD3_ODD4 0x3690
+#define A_MAC_PORT_RX_LINKC_AC_CAPACITOR_BIST 0x3690
+
+#define S_RX_LINKC_ACCCMP_BIST    13
+#define V_RX_LINKC_ACCCMP_BIST(x) ((x) << S_RX_LINKC_ACCCMP_BIST)
+#define F_RX_LINKC_ACCCMP_BIST    V_RX_LINKC_ACCCMP_BIST(1U)
+
+#define A_MAC_PORT_RX_LINKC_DFE_E0_AND_E1_OFFSET 0x3694
+#define A_MAC_PORT_RX_LINKC_RECEIVER_LOFF_CONTROL 0x3698
+#define A_MAC_PORT_RX_LINKC_RECEIVER_LOFF_CONTROL_REGISTER 0x3698
+#define A_MAC_PORT_RX_LINKC_RECEIVER_SIGDET_CONTROL 0x369c
+#define A_MAC_PORT_RX_LINKC_RECEIVER_ANALOG_CONTROL_SWITCH 0x36a0
+#define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4
+#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8
+#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac
+
+#define S_T6_EMMD    3
+#define M_T6_EMMD    0x3U
+#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
+#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
+
+#define S_T6_EMBRDY    2
+#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
+#define F_T6_EMBRDY    V_T6_EMBRDY(1U)
+
+#define S_T6_EMBUMP    1
+#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
+#define F_T6_EMBUMP    V_T6_EMBUMP(1U)
+
+#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0
+#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4
+#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8
+#define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_3 0x36bc
+#define A_MAC_PORT_RX_LINKC_DFE_TAP_ENABLE 0x36c0
+#define A_MAC_PORT_RX_LINKC_DFE_TAP_CONTROL 0x36c0
+
+#define S_RX_LINKC_INDEX_DFE_TC    0
+#define M_RX_LINKC_INDEX_DFE_TC    0xfU
+#define V_RX_LINKC_INDEX_DFE_TC(x) ((x) << S_RX_LINKC_INDEX_DFE_TC)
+#define G_RX_LINKC_INDEX_DFE_TC(x) (((x) >> S_RX_LINKC_INDEX_DFE_TC) & M_RX_LINKC_INDEX_DFE_TC)
+
+#define A_MAC_PORT_RX_LINKC_DFE_H1 0x36c4
+#define A_MAC_PORT_RX_LINKC_DFE_TAP 0x36c4
+
+#define S_RX_LINKC_INDEX_DFE_TAP    0
+#define M_RX_LINKC_INDEX_DFE_TAP    0xfU
+#define V_RX_LINKC_INDEX_DFE_TAP(x) ((x) << S_RX_LINKC_INDEX_DFE_TAP)
+#define G_RX_LINKC_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKC_INDEX_DFE_TAP) & M_RX_LINKC_INDEX_DFE_TAP)
+
+#define A_MAC_PORT_RX_LINKC_DFE_H2 0x36c8
+#define A_MAC_PORT_RX_LINKC_DFE_H3 0x36cc
+#define A_MAC_PORT_RX_LINKC_DFE_H4 0x36d0
+#define A_MAC_PORT_RX_LINKC_DFE_H5 0x36d4
+#define A_MAC_PORT_RX_LINKC_DFE_H6_AND_H7 0x36d8
+#define A_MAC_PORT_RX_LINKC_DFE_H8_AND_H9 0x36dc
+#define A_MAC_PORT_RX_LINKC_DFE_H10_AND_H11 0x36e0
+#define A_MAC_PORT_RX_LINKC_DFE_H12 0x36e4
+#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS_2 0x36e4
+#define A_MAC_PORT_RX_LINKC_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x36e8
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DCD_CONTROL 0x36ec
+#define A_MAC_PORT_RX_LINKC_RECEIVER_DCC_CONTROL 0x36f0
+
+#define S_RX_LINKC_DCCSTEP_RXCTL    10
+#define M_RX_LINKC_DCCSTEP_RXCTL    0x3U
+#define V_RX_LINKC_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKC_DCCSTEP_RXCTL)
+#define G_RX_LINKC_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKC_DCCSTEP_RXCTL) & M_RX_LINKC_DCCSTEP_RXCTL)
+
+#define S_RX_LINKC_DCCLOCK_RXCTL    8
+#define V_RX_LINKC_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKC_DCCLOCK_RXCTL)
+#define F_RX_LINKC_DCCLOCK_RXCTL    V_RX_LINKC_DCCLOCK_RXCTL(1U)
+
+#define A_MAC_PORT_RX_LINKC_RECEIVER_QCC_CONTROL 0x36f4
+#define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_2 0x36f8
+#define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x36f8
+#define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_1 0x36fc
+#define A_MAC_PORT_RX_LINKD_RECEIVER_CONFIGURATION_MODE 0x3700
+#define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c
+
+#define S_T6_TMSCAL    8
+#define M_T6_TMSCAL    0x3U
+#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
+#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
+
+#define S_T6_APADJ    7
+#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
+#define F_T6_APADJ    V_T6_APADJ(1U)
+
+#define S_T6_RSEL    6
+#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
+#define F_T6_RSEL    V_T6_RSEL(1U)
+
+#define S_T6_PHOFFS    0
+#define M_T6_PHOFFS    0x3fU
+#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
+#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
+
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718
+#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c
+#define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720
+
+#define S_T6_SPIFMT    8
+#define M_T6_SPIFMT    0xfU
+#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
+#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
+
+#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724
+#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728
+#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c
+
+#define S_T6_WRAPSEL    15
+#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
+#define F_T6_WRAPSEL    V_T6_WRAPSEL(1U)
+
+#define S_T6_PEAK    9
+#define M_T6_PEAK    0x1fU
+#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
+#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
+
+#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730
+
+#define S_T6_T5VGAIN    0
+#define M_T6_T5VGAIN    0x7fU
+#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
+#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
+
+#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738
+#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738
+#define A_MAC_PORT_RX_LINKD_RECEIVER_IQAMP_CONTROL_1 0x373c
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_3 0x3740
+#define A_MAC_PORT_RX_LINKD_RECEIVER_IQAMP_CONTROL_2 0x3740
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3744
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DACAP_AND_DACAN 0x3748
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DACA_MIN_AND_DACAZ 0x374c
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DACA_MIN 0x374c
+#define A_MAC_PORT_RX_LINKD_RECEIVER_ADAC_CONTROL 0x3750
+#define A_MAC_PORT_RX_LINKD_RECEIVER_AC_COUPLING_CONTROL 0x3754
+#define A_MAC_PORT_RX_LINKD_RECEIVER_AC_COUPLING_VALUE 0x3758
+#define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x375c
+#define A_MAC_PORT_RX_LINKD_DFE_H1H2H3_LOCAL_OFFSET 0x375c
+#define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3760
+#define A_MAC_PORT_RX_LINKD_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3760
+#define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3764
+#define A_MAC_PORT_RX_LINKD_PEAKED_INTEGRATOR 0x3764
+#define A_MAC_PORT_RX_LINKD_CDR_ANALOG_SWITCH 0x3768
+#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c
+#define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770
+#define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774
+
+#define S_T6_ODEC    0
+#define M_T6_ODEC    0xfU
+#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
+#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
+
+#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778
+
+#define S_RX_LINKD_ACCCMP_RIS    11
+#define V_RX_LINKD_ACCCMP_RIS(x) ((x) << S_RX_LINKD_ACCCMP_RIS)
+#define F_RX_LINKD_ACCCMP_RIS    V_RX_LINKD_ACCCMP_RIS(1U)
+
+#define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_1 0x377c
+#define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_2 0x3780
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_EVN1_EVN2 0x3784
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_CHANNEL 0x3784
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_ODD1_ODD2 0x3788
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_VALUE 0x3788
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_EVN3_EVN4 0x378c
+#define A_MAC_PORT_RX_LINKD_H_COEFFICIENBT_BIST 0x378c
+#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_ODD3_ODD4 0x3790
+#define A_MAC_PORT_RX_LINKD_AC_CAPACITOR_BIST 0x3790
+
+#define S_RX_LINKD_ACCCMP_BIST    13
+#define V_RX_LINKD_ACCCMP_BIST(x) ((x) << S_RX_LINKD_ACCCMP_BIST)
+#define F_RX_LINKD_ACCCMP_BIST    V_RX_LINKD_ACCCMP_BIST(1U)
+
+#define A_MAC_PORT_RX_LINKD_DFE_E0_AND_E1_OFFSET 0x3794
+#define A_MAC_PORT_RX_LINKD_RECEIVER_LOFF_CONTROL 0x3798
+#define A_MAC_PORT_RX_LINKD_RECEIVER_LOFF_CONTROL_REGISTER 0x3798
+#define A_MAC_PORT_RX_LINKD_RECEIVER_SIGDET_CONTROL 0x379c
+#define A_MAC_PORT_RX_LINKD_RECEIVER_ANALOG_CONTROL_SWITCH 0x37a0
+#define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4
+#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8
+#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac
+
+#define S_T6_EMMD    3
+#define M_T6_EMMD    0x3U
+#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
+#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
+
+#define S_T6_EMBRDY    2
+#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
+#define F_T6_EMBRDY    V_T6_EMBRDY(1U)
+
+#define S_T6_EMBUMP    1
+#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
+#define F_T6_EMBUMP    V_T6_EMBUMP(1U)
+
+#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0
+#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4
+#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8
+#define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_3 0x37bc
+#define A_MAC_PORT_RX_LINKD_DFE_TAP_ENABLE 0x37c0
+#define A_MAC_PORT_RX_LINKD_DFE_TAP_CONTROL 0x37c0
+
+#define S_RX_LINKD_INDEX_DFE_TC    0
+#define M_RX_LINKD_INDEX_DFE_TC    0xfU
+#define V_RX_LINKD_INDEX_DFE_TC(x) ((x) << S_RX_LINKD_INDEX_DFE_TC)
+#define G_RX_LINKD_INDEX_DFE_TC(x) (((x) >> S_RX_LINKD_INDEX_DFE_TC) & M_RX_LINKD_INDEX_DFE_TC)
+
+#define A_MAC_PORT_RX_LINKD_DFE_H1 0x37c4
+#define A_MAC_PORT_RX_LINKD_DFE_TAP 0x37c4
+
+#define S_RX_LINKD_INDEX_DFE_TAP    0
+#define M_RX_LINKD_INDEX_DFE_TAP    0xfU
+#define V_RX_LINKD_INDEX_DFE_TAP(x) ((x) << S_RX_LINKD_INDEX_DFE_TAP)
+#define G_RX_LINKD_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKD_INDEX_DFE_TAP) & M_RX_LINKD_INDEX_DFE_TAP)
+
+#define A_MAC_PORT_RX_LINKD_DFE_H2 0x37c8
+#define A_MAC_PORT_RX_LINKD_DFE_H3 0x37cc
+#define A_MAC_PORT_RX_LINKD_DFE_H4 0x37d0
+#define A_MAC_PORT_RX_LINKD_DFE_H5 0x37d4
+#define A_MAC_PORT_RX_LINKD_DFE_H6_AND_H7 0x37d8
+#define A_MAC_PORT_RX_LINKD_DFE_H8_AND_H9 0x37dc
+#define A_MAC_PORT_RX_LINKD_DFE_H10_AND_H11 0x37e0
+#define A_MAC_PORT_RX_LINKD_DFE_H12 0x37e4
+#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS_2 0x37e4
+#define A_MAC_PORT_RX_LINKD_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x37e8
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DCD_CONTROL 0x37ec
+#define A_MAC_PORT_RX_LINKD_RECEIVER_DCC_CONTROL 0x37f0
+
+#define S_RX_LINKD_DCCSTEP_RXCTL    10
+#define M_RX_LINKD_DCCSTEP_RXCTL    0x3U
+#define V_RX_LINKD_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKD_DCCSTEP_RXCTL)
+#define G_RX_LINKD_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKD_DCCSTEP_RXCTL) & M_RX_LINKD_DCCSTEP_RXCTL)
+
+#define S_RX_LINKD_DCCLOCK_RXCTL    8
+#define V_RX_LINKD_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKD_DCCLOCK_RXCTL)
+#define F_RX_LINKD_DCCLOCK_RXCTL    V_RX_LINKD_DCCLOCK_RXCTL(1U)
+
+#define A_MAC_PORT_RX_LINKD_RECEIVER_QCC_CONTROL 0x37f4
+#define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_2 0x37f8
+#define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x37f8
+#define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_1 0x37fc
+#define A_MAC_PORT_ANALOG_TEST_MUX 0x3814
+#define A_MAC_PORT_BANDGAP_CONTROL 0x382c
+
+#define S_T5BGCTL    0
+#define M_T5BGCTL    0xfU
+#define V_T5BGCTL(x) ((x) << S_T5BGCTL)
+#define G_T5BGCTL(x) (((x) >> S_T5BGCTL) & M_T5BGCTL)
+
+#define A_MAC_PORT_PLLREFSEL_CONTROL 0x3854
+
+#define S_REFSEL    0
+#define M_REFSEL    0x7U
+#define V_REFSEL(x) ((x) << S_REFSEL)
+#define G_REFSEL(x) (((x) >> S_REFSEL) & M_REFSEL)
+
+#define A_MAC_PORT_REFISINK_CONTROL 0x3858
+
+#define S_REFISINK    0
+#define M_REFISINK    0x3fU
+#define V_REFISINK(x) ((x) << S_REFISINK)
+#define G_REFISINK(x) (((x) >> S_REFISINK) & M_REFISINK)
+
+#define A_MAC_PORT_REFISRC_CONTROL 0x385c
+
+#define S_REFISRC    0
+#define M_REFISRC    0x3fU
+#define V_REFISRC(x) ((x) << S_REFISRC)
+#define G_REFISRC(x) (((x) >> S_REFISRC) & M_REFISRC)
+
+#define A_MAC_PORT_REFVREG_CONTROL 0x3860
+
+#define S_REFVREG    0
+#define M_REFVREG    0x3fU
+#define V_REFVREG(x) ((x) << S_REFVREG)
+#define G_REFVREG(x) (((x) >> S_REFVREG) & M_REFVREG)
+
+#define A_MAC_PORT_VBGENDOC_CONTROL 0x3864
+
+#define S_BGCLKSEL    2
+#define V_BGCLKSEL(x) ((x) << S_BGCLKSEL)
+#define F_BGCLKSEL    V_BGCLKSEL(1U)
+
+#define S_VBGENDOC    0
+#define M_VBGENDOC    0x3U
+#define V_VBGENDOC(x) ((x) << S_VBGENDOC)
+#define G_VBGENDOC(x) (((x) >> S_VBGENDOC) & M_VBGENDOC)
+
+#define A_MAC_PORT_VREFTUNE_CONTROL 0x3868
+
+#define S_VREFTUNE    0
+#define M_VREFTUNE    0xfU
+#define V_VREFTUNE(x) ((x) << S_VREFTUNE)
+#define G_VREFTUNE(x) (((x) >> S_VREFTUNE) & M_VREFTUNE)
+
+#define A_MAC_PORT_RESISTOR_CALIBRATION_CONTROL 0x3880
+
+#define S_RCCTL1    5
+#define V_RCCTL1(x) ((x) << S_RCCTL1)
+#define F_RCCTL1    V_RCCTL1(1U)
+
+#define S_RCCTL0    4
+#define V_RCCTL0(x) ((x) << S_RCCTL0)
+#define F_RCCTL0    V_RCCTL0(1U)
+
+#define S_RCAMP1    3
+#define V_RCAMP1(x) ((x) << S_RCAMP1)
+#define F_RCAMP1    V_RCAMP1(1U)
+
+#define S_RCAMP0    2
+#define V_RCAMP0(x) ((x) << S_RCAMP0)
+#define F_RCAMP0    V_RCAMP0(1U)
+
+#define S_RCAMPEN    1
+#define V_RCAMPEN(x) ((x) << S_RCAMPEN)
+#define F_RCAMPEN    V_RCAMPEN(1U)
+
+#define S_RCRST    0
+#define V_RCRST(x) ((x) << S_RCRST)
+#define F_RCRST    V_RCRST(1U)
+
+#define A_MAC_PORT_IMPEDENCE_CALIBRATION_CONTROL 0x3880
+
+#define S_FRCCAL_COMP    6
+#define V_FRCCAL_COMP(x) ((x) << S_FRCCAL_COMP)
+#define F_FRCCAL_COMP    V_FRCCAL_COMP(1U)
+
+#define S_IC_FRCERR    5
+#define V_IC_FRCERR(x) ((x) << S_IC_FRCERR)
+#define F_IC_FRCERR    V_IC_FRCERR(1U)
+
+#define S_CAL_BISTENAB    4
+#define V_CAL_BISTENAB(x) ((x) << S_CAL_BISTENAB)
+#define F_CAL_BISTENAB    V_CAL_BISTENAB(1U)
+
+#define S_RCAL_RESET    0
+#define V_RCAL_RESET(x) ((x) << S_RCAL_RESET)
+#define F_RCAL_RESET    V_RCAL_RESET(1U)
+
+#define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_1 0x3884
+
+#define S_RCERR    1
+#define V_RCERR(x) ((x) << S_RCERR)
+#define F_RCERR    V_RCERR(1U)
+
+#define S_RCCOMP    0
+#define V_RCCOMP(x) ((x) << S_RCCOMP)
+#define F_RCCOMP    V_RCCOMP(1U)
+
+#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_1 0x3884
+
+#define S_RCALBENAB    3
+#define V_RCALBENAB(x) ((x) << S_RCALBENAB)
+#define F_RCALBENAB    V_RCALBENAB(1U)
+
+#define S_RCALBUSY    2
+#define V_RCALBUSY(x) ((x) << S_RCALBUSY)
+#define F_RCALBUSY    V_RCALBUSY(1U)
+
+#define S_RCALERR    1
+#define V_RCALERR(x) ((x) << S_RCALERR)
+#define F_RCALERR    V_RCALERR(1U)
+
+#define S_RCALCOMP    0
+#define V_RCALCOMP(x) ((x) << S_RCALCOMP)
+#define F_RCALCOMP    V_RCALCOMP(1U)
+
+#define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_2 0x3888
+
+#define S_RESREG2    0
+#define M_RESREG2    0xffU
+#define V_RESREG2(x) ((x) << S_RESREG2)
+#define G_RESREG2(x) (((x) >> S_RESREG2) & M_RESREG2)
+
+#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_2 0x3888
+
+#define S_T6_RESREG2    0
+#define M_T6_RESREG2    0x3fU
+#define V_T6_RESREG2(x) ((x) << S_T6_RESREG2)
+#define G_T6_RESREG2(x) (((x) >> S_T6_RESREG2) & M_T6_RESREG2)
+
+#define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_3 0x388c
+
+#define S_RESREG3    0
+#define M_RESREG3    0xffU
+#define V_RESREG3(x) ((x) << S_RESREG3)
+#define G_RESREG3(x) (((x) >> S_RESREG3) & M_RESREG3)
+
+#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_3 0x388c
+
+#define S_T6_RESREG3    0
+#define M_T6_RESREG3    0x3fU
+#define V_T6_RESREG3(x) ((x) << S_T6_RESREG3)
+#define G_T6_RESREG3(x) (((x) >> S_T6_RESREG3) & M_T6_RESREG3)
+
+#define A_MAC_PORT_INEQUALITY_CONTROL_AND_RESULT 0x38c0
+
+#define S_ISGT    7
+#define V_ISGT(x) ((x) << S_ISGT)
+#define F_ISGT    V_ISGT(1U)
+
+#define S_ISLT    6
+#define V_ISLT(x) ((x) << S_ISLT)
+#define F_ISLT    V_ISLT(1U)
+
+#define S_ISEQ    5
+#define V_ISEQ(x) ((x) << S_ISEQ)
+#define F_ISEQ    V_ISEQ(1U)
+
+#define S_ISVAL    3
+#define M_ISVAL    0x3U
+#define V_ISVAL(x) ((x) << S_ISVAL)
+#define G_ISVAL(x) (((x) >> S_ISVAL) & M_ISVAL)
+
+#define S_GTORLT    1
+#define M_GTORLT    0x3U
+#define V_GTORLT(x) ((x) << S_GTORLT)
+#define G_GTORLT(x) (((x) >> S_GTORLT) & M_GTORLT)
+
+#define S_INEQ    0
+#define V_INEQ(x) ((x) << S_INEQ)
+#define F_INEQ    V_INEQ(1U)
+
+#define A_MAC_PORT_INEQUALITY_LOW_LIMIT 0x38c4
+
+#define S_LLIM    0
+#define M_LLIM    0xffffU
+#define V_LLIM(x) ((x) << S_LLIM)
+#define G_LLIM(x) (((x) >> S_LLIM) & M_LLIM)
+
+#define A_MAC_PORT_INEQUALITY_LOW_LIMIT_MASK 0x38c8
+
+#define S_LMSK    0
+#define M_LMSK    0xffffU
+#define V_LMSK(x) ((x) << S_LMSK)
+#define G_LMSK(x) (((x) >> S_LMSK) & M_LMSK)
+
+#define A_MAC_PORT_INEQUALITY_HIGH_LIMIT 0x38cc
+
+#define S_HLIM    0
+#define M_HLIM    0xffffU
+#define V_HLIM(x) ((x) << S_HLIM)
+#define G_HLIM(x) (((x) >> S_HLIM) & M_HLIM)
+
+#define A_MAC_PORT_INEQUALITY_HIGH_LIMIT_MASK 0x38d0
+
+#define S_HMSK    0
+#define M_HMSK    0xffffU
+#define V_HMSK(x) ((x) << S_HMSK)
+#define G_HMSK(x) (((x) >> S_HMSK) & M_HMSK)
+
+#define A_MAC_PORT_MACRO_TEST_CONTROL_6 0x38e8
+
+#define S_LBIST    7
+#define V_LBIST(x) ((x) << S_LBIST)
+#define F_LBIST    V_LBIST(1U)
+
+#define S_LOGICTEST    6
+#define V_LOGICTEST(x) ((x) << S_LOGICTEST)
+#define F_LOGICTEST    V_LOGICTEST(1U)
+
+#define S_MAVDHI    5
+#define V_MAVDHI(x) ((x) << S_MAVDHI)
+#define F_MAVDHI    V_MAVDHI(1U)
+
+#define S_AUXEN    4
+#define V_AUXEN(x) ((x) << S_AUXEN)
+#define F_AUXEN    V_AUXEN(1U)
+
+#define S_JTAGMD    3
+#define V_JTAGMD(x) ((x) << S_JTAGMD)
+#define F_JTAGMD    V_JTAGMD(1U)
+
+#define S_RXACMODE    2
+#define V_RXACMODE(x) ((x) << S_RXACMODE)
+#define F_RXACMODE    V_RXACMODE(1U)
+
+#define S_HSSACJPC    1
+#define V_HSSACJPC(x) ((x) << S_HSSACJPC)
+#define F_HSSACJPC    V_HSSACJPC(1U)
+
+#define S_HSSACJAC    0
+#define V_HSSACJAC(x) ((x) << S_HSSACJAC)
+#define F_HSSACJAC    V_HSSACJAC(1U)
+
+#define A_MAC_PORT_MACRO_TEST_CONTROL_5 0x38ec
+
+#define S_REFVALIDD    6
+#define V_REFVALIDD(x) ((x) << S_REFVALIDD)
+#define F_REFVALIDD    V_REFVALIDD(1U)
+
+#define S_REFVALIDC    5
+#define V_REFVALIDC(x) ((x) << S_REFVALIDC)
+#define F_REFVALIDC    V_REFVALIDC(1U)
+
+#define S_REFVALIDB    4
+#define V_REFVALIDB(x) ((x) << S_REFVALIDB)
+#define F_REFVALIDB    V_REFVALIDB(1U)
+
+#define S_REFVALIDA    3
+#define V_REFVALIDA(x) ((x) << S_REFVALIDA)
+#define F_REFVALIDA    V_REFVALIDA(1U)
+
+#define S_REFSELRESET    2
+#define V_REFSELRESET(x) ((x) << S_REFSELRESET)
+#define F_REFSELRESET    V_REFSELRESET(1U)
+
+#define S_SOFTRESET    1
+#define V_SOFTRESET(x) ((x) << S_SOFTRESET)
+#define F_SOFTRESET    V_SOFTRESET(1U)
+
+#define S_MACROTEST    0
+#define V_MACROTEST(x) ((x) << S_MACROTEST)
+#define F_MACROTEST    V_MACROTEST(1U)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900
+
+#define S_T6_T5_TX_RXLOOP    5
+#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
+#define F_T6_T5_TX_RXLOOP    V_T6_T5_TX_RXLOOP(1U)
+
+#define S_T6_T5_TX_BWSEL    2
+#define M_T6_T5_TX_BWSEL    0x3U
+#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
+#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904
+
+#define S_T6_ERROR    9
+#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
+#define F_T6_ERROR    V_T6_ERROR(1U)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918
+
+#define S_T6_CALSSTN    8
+#define M_T6_CALSSTN    0x3fU
+#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
+#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
+
+#define S_T6_CALSSTP    0
+#define M_T6_CALSSTP    0x3fU
+#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
+#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c
+
+#define S_T6_DRTOL    2
+#define M_T6_DRTOL    0x7U
+#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
+#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920
+
+#define S_T6_NXTT0    0
+#define M_T6_NXTT0    0x3fU
+#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
+#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928
+
+#define S_T6_NXTT2    0
+#define M_T6_NXTT2    0x3fU
+#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
+#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934
+
+#define S_T6_NXTPOL    0
+#define M_T6_NXTPOL    0xfU
+#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
+#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938
+
+#define S_T6_C0UPDT    6
+#define M_T6_C0UPDT    0x3U
+#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
+#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
+
+#define S_T6_C2UPDT    2
+#define M_T6_C2UPDT    0x3U
+#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
+#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
+
+#define S_T6_C1UPDT    0
+#define M_T6_C1UPDT    0x3U
+#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
+#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c
+
+#define S_T6_C0STAT    6
+#define M_T6_C0STAT    0x3U
+#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
+#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
+
+#define S_T6_C2STAT    2
+#define M_T6_C2STAT    0x3U
+#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
+#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
+
+#define S_T6_C1STAT    0
+#define M_T6_C1STAT    0x3U
+#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
+#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3944
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3948
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3948
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x394c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_APPLIED_TUNE_REGISTER 0x3950
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3958
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3960
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_4X_SEGMENT_APPLIED 0x3960
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3964
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_2X_SEGMENT_APPLIED 0x3964
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3968
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_1X_SEGMENT_APPLIED 0x3968
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x396c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3970
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3970
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3974
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c
+
+#define S_T6_XADDR    1
+#define M_T6_XADDR    0x1fU
+#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
+#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3988
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x398c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x398c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_OVERRIDE 0x3990
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_APPLIED 0x3994
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_TIME_OUT 0x3998
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c
+#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0
+
+#define S_T6_DCCTIMEEN    13
+#define M_T6_DCCTIMEEN    0x3U
+#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
+#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
+
+#define S_T6_DCCLOCK    11
+#define M_T6_DCCLOCK    0x3U
+#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
+#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
+
+#define S_T6_DCCOFFSET    8
+#define M_T6_DCCOFFSET    0x7U
+#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
+#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
+
+#define S_TX_LINK_BCST_DCCSTEP_CTL    6
+#define M_TX_LINK_BCST_DCCSTEP_CTL    0x3U
+#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL)
+#define G_TX_LINK_BCST_DCCSTEP_CTL(x) (((x) >> S_TX_LINK_BCST_DCCSTEP_CTL) & M_TX_LINK_BCST_DCCSTEP_CTL)
+
+#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_OVERRIDE 0x39a4
+#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_APPLIED 0x39a8
+#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_TIME_OUT 0x39ac
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_OVERRIDE 0x39c0
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_4X_OVERRIDE 0x39c8
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_2X_OVERRIDE 0x39cc
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_1X_OVERRIDE 0x39d0
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x39d8
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x39dc
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0
+
+#define S_T6_SDOVRD    0
+#define M_T6_SDOVRD    0xffffU
+#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
+#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc
+
+#define S_T6_SDOVRDEN    15
+#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
+#define F_T6_SDOVRDEN    V_T6_SDOVRDEN(1U)
+
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c
+
+#define S_T6_TMSCAL    8
+#define M_T6_TMSCAL    0x3U
+#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
+#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
+
+#define S_T6_APADJ    7
+#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
+#define F_T6_APADJ    V_T6_APADJ(1U)
+
+#define S_T6_RSEL    6
+#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
+#define F_T6_RSEL    V_T6_RSEL(1U)
+
+#define S_T6_PHOFFS    0
+#define M_T6_PHOFFS    0x3fU
+#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
+#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
+
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18
+#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c
+#define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20
+
+#define S_T6_SPIFMT    8
+#define M_T6_SPIFMT    0xfU
+#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
+#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
+
+#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24
+#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c
+
+#define S_T6_WRAPSEL    15
+#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
+#define F_T6_WRAPSEL    V_T6_WRAPSEL(1U)
+
+#define S_T6_PEAK    9
+#define M_T6_PEAK    0x1fU
+#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
+#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
+
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30
+
+#define S_T6_T5VGAIN    0
+#define M_T6_T5VGAIN    0x7fU
+#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
+#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
+
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_IQAMP_CONTROL_1 0x3a3c
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_3 0x3a40
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_IQAMP_CONTROL_2 0x3a40
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3a44
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACAP_AND_DACAN 0x3a48
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACA_MIN_AND_DACAZ 0x3a4c
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACA_MIN 0x3a4c
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_ADAC_CONTROL 0x3a50
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_AC_COUPLING_CONTROL 0x3a54
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_AC_COUPLING_VALUE 0x3a58
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x3a5c
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1H2H3_LOCAL_OFFSET 0x3a5c
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3a60
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3a60
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3a64
+#define A_MAC_PORT_RX_LINK_BCST_PEAKED_INTEGRATOR 0x3a64
+#define A_MAC_PORT_RX_LINK_BCST_CDR_ANALOG_SWITCH 0x3a68
+#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c
+#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70
+#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74
+
+#define S_T6_ODEC    0
+#define M_T6_ODEC    0xfU
+#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
+#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
+
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78
+
+#define S_RX_LINK_BCST_ACCCMP_RIS    11
+#define V_RX_LINK_BCST_ACCCMP_RIS(x) ((x) << S_RX_LINK_BCST_ACCCMP_RIS)
+#define F_RX_LINK_BCST_ACCCMP_RIS    V_RX_LINK_BCST_ACCCMP_RIS(1U)
+
+#define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_1 0x3a7c
+#define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_2 0x3a80
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_EVN1_EVN2 0x3a84
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_CHANNEL 0x3a84
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_ODD1_ODD2 0x3a88
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_VALUE 0x3a88
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_EVN3_EVN4 0x3a8c
+#define A_MAC_PORT_RX_LINK_BCST_H_COEFFICIENBT_BIST 0x3a8c
+#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_ODD3_ODD4 0x3a90
+#define A_MAC_PORT_RX_LINK_BCST_AC_CAPACITOR_BIST 0x3a90
+
+#define S_RX_LINK_BCST_ACCCMP_BIST    13
+#define V_RX_LINK_BCST_ACCCMP_BIST(x) ((x) << S_RX_LINK_BCST_ACCCMP_BIST)
+#define F_RX_LINK_BCST_ACCCMP_BIST    V_RX_LINK_BCST_ACCCMP_BIST(1U)
+
+#define A_MAC_PORT_RX_LINK_BCST_DFE_E0_AND_E1_OFFSET 0x3a94
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_LOFF_CONTROL 0x3a98
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_LOFF_CONTROL_REGISTER 0x3a98
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_SIGDET_CONTROL 0x3a9c
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_ANALOG_CONTROL_SWITCH 0x3aa0
+#define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4
+#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8
+#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac
+
+#define S_T6_EMMD    3
+#define M_T6_EMMD    0x3U
+#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
+#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
+
+#define S_T6_EMBRDY    2
+#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
+#define F_T6_EMBRDY    V_T6_EMBRDY(1U)
+
+#define S_T6_EMBUMP    1
+#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
+#define F_T6_EMBUMP    V_T6_EMBUMP(1U)
+
+#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0
+#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4
+#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8
+#define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_3 0x3abc
+#define A_MAC_PORT_RX_LINK_BCST_DFE_TAP_ENABLE 0x3ac0
+#define A_MAC_PORT_RX_LINK_BCST_DFE_TAP_CONTROL 0x3ac0
+
+#define S_RX_LINK_BCST_INDEX_DFE_TC    0
+#define M_RX_LINK_BCST_INDEX_DFE_TC    0xfU
+#define V_RX_LINK_BCST_INDEX_DFE_TC(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_TC)
+#define G_RX_LINK_BCST_INDEX_DFE_TC(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_TC) & M_RX_LINK_BCST_INDEX_DFE_TC)
+
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3ac4
+#define A_MAC_PORT_RX_LINK_BCST_DFE_TAP 0x3ac4
+
+#define S_RX_LINK_BCST_INDEX_DFE_TAP    0
+#define M_RX_LINK_BCST_INDEX_DFE_TAP    0xfU
+#define V_RX_LINK_BCST_INDEX_DFE_TAP(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_TAP)
+#define G_RX_LINK_BCST_INDEX_DFE_TAP(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_TAP) & M_RX_LINK_BCST_INDEX_DFE_TAP)
+
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3ac8
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H3 0x3acc
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3ad0
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H5 0x3ad4
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H6_AND_H7 0x3ad8
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H8_AND_H9 0x3adc
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H10_AND_H11 0x3ae0
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H12 0x3ae4
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS_2 0x3ae4
+#define A_MAC_PORT_RX_LINK_BCST_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x3ae8
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DCD_CONTROL 0x3aec
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DCC_CONTROL 0x3af0
+
+#define S_RX_LINK_BCST_DCCSTEP_RXCTL    10
+#define M_RX_LINK_BCST_DCCSTEP_RXCTL    0x3U
+#define V_RX_LINK_BCST_DCCSTEP_RXCTL(x) ((x) << S_RX_LINK_BCST_DCCSTEP_RXCTL)
+#define G_RX_LINK_BCST_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINK_BCST_DCCSTEP_RXCTL) & M_RX_LINK_BCST_DCCSTEP_RXCTL)
+
+#define S_RX_LINK_BCST_DCCLOCK_RXCTL    8
+#define V_RX_LINK_BCST_DCCLOCK_RXCTL(x) ((x) << S_RX_LINK_BCST_DCCLOCK_RXCTL)
+#define F_RX_LINK_BCST_DCCLOCK_RXCTL    V_RX_LINK_BCST_DCCLOCK_RXCTL(1U)
+
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_QCC_CONTROL 0x3af4
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_2 0x3af8
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x3af8
+#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_1 0x3afc
+#define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_0 0x3b00
+#define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_1 0x3b04
+#define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_2 0x3b08
+#define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_3 0x3b0c
+#define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_4 0x3b10
+#define A_MAC_PORT_PLLA_POWER_CONTROL 0x3b24
+
+#define S_SPWRENA    1
+#define V_SPWRENA(x) ((x) << S_SPWRENA)
+#define F_SPWRENA    V_SPWRENA(1U)
+
+#define S_NPWRENA    0
+#define V_NPWRENA(x) ((x) << S_NPWRENA)
+#define F_NPWRENA    V_NPWRENA(1U)
+
+#define A_MAC_PORT_PLLA_CHARGE_PUMP_CONTROL 0x3b28
+
+#define S_T5CPISEL    0
+#define M_T5CPISEL    0x7U
+#define V_T5CPISEL(x) ((x) << S_T5CPISEL)
+#define G_T5CPISEL(x) (((x) >> S_T5CPISEL) & M_T5CPISEL)
+
+#define A_MAC_PORT_PLLA_PLL_MICELLANEOUS_CONTROL 0x3b38
+#define A_MAC_PORT_PLLA_PCLK_CONTROL 0x3b3c
+
+#define S_SPEDIV    3
+#define M_SPEDIV    0x1fU
+#define V_SPEDIV(x) ((x) << S_SPEDIV)
+#define G_SPEDIV(x) (((x) >> S_SPEDIV) & M_SPEDIV)
+
+#define S_PCKSEL    0
+#define M_PCKSEL    0x7U
+#define V_PCKSEL(x) ((x) << S_PCKSEL)
+#define G_PCKSEL(x) (((x) >> S_PCKSEL) & M_PCKSEL)
+
+#define A_MAC_PORT_PLLA_EYE_METRICS_INTERVAL_CONTROL 0x3b40
+
+#define S_EMIL    2
+#define V_EMIL(x) ((x) << S_EMIL)
+#define F_EMIL    V_EMIL(1U)
+
+#define S_EMID    1
+#define V_EMID(x) ((x) << S_EMID)
+#define F_EMID    V_EMID(1U)
+
+#define S_EMIS    0
+#define V_EMIS(x) ((x) << S_EMIS)
+#define F_EMIS    V_EMIS(1U)
+
+#define A_MAC_PORT_PLLA_EYE_METRICS_INTERVAL_LIMIT_1 0x3b44
+
+#define S_EMIL1    0
+#define M_EMIL1    0xffU
+#define V_EMIL1(x) ((x) << S_EMIL1)
+#define G_EMIL1(x) (((x) >> S_EMIL1) & M_EMIL1)
+
+#define A_MAC_PORT_PLLA_EYE_METRICS_INTERVAL_LIMIT_2 0x3b48
+
+#define S_EMIL2    0
+#define M_EMIL2    0xffU
+#define V_EMIL2(x) ((x) << S_EMIL2)
+#define G_EMIL2(x) (((x) >> S_EMIL2) & M_EMIL2)
+
+#define A_MAC_PORT_PLLA_EYE_METRICS_INTERVAL_LIMIT_3 0x3b4c
+
+#define S_EMIL3    0
+#define M_EMIL3    0xffU
+#define V_EMIL3(x) ((x) << S_EMIL3)
+#define G_EMIL3(x) (((x) >> S_EMIL3) & M_EMIL3)
+
+#define A_MAC_PORT_PLLA_EYE_METRICS_INTERVAL_LIMIT_4 0x3b50
+
+#define S_EMIL4    0
+#define M_EMIL4    0xffU
+#define V_EMIL4(x) ((x) << S_EMIL4)
+#define G_EMIL4(x) (((x) >> S_EMIL4) & M_EMIL4)
+
+#define A_MAC_PORT_PLLA_MACRO_TEST_CONTROL_4 0x3bf0
+
+#define S_VBST    1
+#define M_VBST    0x7U
+#define V_VBST(x) ((x) << S_VBST)
+#define G_VBST(x) (((x) >> S_VBST) & M_VBST)
+
+#define S_PLLDIVA    4
+#define V_PLLDIVA(x) ((x) << S_PLLDIVA)
+#define F_PLLDIVA    V_PLLDIVA(1U)
+
+#define S_REFDIV    0
+#define M_REFDIV    0xfU
+#define V_REFDIV(x) ((x) << S_REFDIV)
+#define G_REFDIV(x) (((x) >> S_REFDIV) & M_REFDIV)
+
+#define A_MAC_PORT_PLLA_MACRO_TEST_CONTROL_3 0x3bf4
+
+#define S_RESYNC    6
+#define V_RESYNC(x) ((x) << S_RESYNC)
+#define F_RESYNC    V_RESYNC(1U)
+
+#define S_RXCLKSEL    5
+#define V_RXCLKSEL(x) ((x) << S_RXCLKSEL)
+#define F_RXCLKSEL    V_RXCLKSEL(1U)
+
+#define S_FRCBAND    4
+#define V_FRCBAND(x) ((x) << S_FRCBAND)
+#define F_FRCBAND    V_FRCBAND(1U)
+
+#define S_PLLBYP    3
+#define V_PLLBYP(x) ((x) << S_PLLBYP)
+#define F_PLLBYP    V_PLLBYP(1U)
+
+#define S_PDWNP    2
+#define V_PDWNP(x) ((x) << S_PDWNP)
+#define F_PDWNP    V_PDWNP(1U)
+
+#define S_VCOSEL    1
+#define V_VCOSEL(x) ((x) << S_VCOSEL)
+#define F_VCOSEL    V_VCOSEL(1U)
+
+#define S_DIVSEL8    0
+#define V_DIVSEL8(x) ((x) << S_DIVSEL8)
+#define F_DIVSEL8    V_DIVSEL8(1U)
+
+#define A_MAC_PORT_PLLA_MACRO_TEST_CONTROL_2 0x3bf8
+
+#define S_DIVSEL    0
+#define M_DIVSEL    0xffU
+#define V_DIVSEL(x) ((x) << S_DIVSEL)
+#define G_DIVSEL(x) (((x) >> S_DIVSEL) & M_DIVSEL)
+
+#define A_MAC_PORT_PLLA_MACRO_TEST_CONTROL_1 0x3bfc
+
+#define S_CONFIG    0
+#define M_CONFIG    0xffU
+#define V_CONFIG(x) ((x) << S_CONFIG)
+#define G_CONFIG(x) (((x) >> S_CONFIG) & M_CONFIG)
+
+#define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_0 0x3c00
+#define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_1 0x3c04
+#define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_2 0x3c08
+#define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_3 0x3c0c
+#define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_4 0x3c10
+#define A_MAC_PORT_PLLB_POWER_CONTROL 0x3c24
+#define A_MAC_PORT_PLLB_CHARGE_PUMP_CONTROL 0x3c28
+#define A_MAC_PORT_PLLB_PLL_MICELLANEOUS_CONTROL 0x3c38
+#define A_MAC_PORT_PLLB_PCLK_CONTROL 0x3c3c
+#define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_CONTROL 0x3c40
+#define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_LIMIT_1 0x3c44
+#define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_LIMIT_2 0x3c48
+#define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_LIMIT_3 0x3c4c
+#define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_LIMIT_4 0x3c50
+#define A_MAC_PORT_PLLB_MACRO_TEST_CONTROL_4 0x3cf0
+#define A_MAC_PORT_PLLB_MACRO_TEST_CONTROL_3 0x3cf4
+#define A_MAC_PORT_PLLB_MACRO_TEST_CONTROL_2 0x3cf8
+#define A_MAC_PORT_PLLB_MACRO_TEST_CONTROL_1 0x3cfc
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0
+
+#define S_STEP    0
+#define M_STEP    0x7U
+#define V_STEP(x) ((x) << S_STEP)
+#define G_STEP(x) (((x) >> S_STEP) & M_STEP)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
+
+#define S_C0INIT    0
+#define M_C0INIT    0x1fU
+#define V_C0INIT(x) ((x) << S_C0INIT)
+#define G_C0INIT(x) (((x) >> S_C0INIT) & M_C0INIT)
+
+#define S_C0PRESET    8
+#define M_C0PRESET    0x7fU
+#define V_C0PRESET(x) ((x) << S_C0PRESET)
+#define G_C0PRESET(x) (((x) >> S_C0PRESET) & M_C0PRESET)
+
+#define S_C0INIT1    0
+#define M_C0INIT1    0x7fU
+#define V_C0INIT1(x) ((x) << S_C0INIT1)
+#define G_C0INIT1(x) (((x) >> S_C0INIT1) & M_C0INIT1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
+
+#define S_C0MAX    8
+#define M_C0MAX    0x1fU
+#define V_C0MAX(x) ((x) << S_C0MAX)
+#define G_C0MAX(x) (((x) >> S_C0MAX) & M_C0MAX)
+
+#define S_C0MIN    0
+#define M_C0MIN    0x1fU
+#define V_C0MIN(x) ((x) << S_C0MIN)
+#define G_C0MIN(x) (((x) >> S_C0MIN) & M_C0MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
+
+#define S_T6_C0MAX    8
+#define M_T6_C0MAX    0x7fU
+#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
+#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
+
+#define S_T6_C0MIN    0
+#define M_T6_C0MIN    0x7fU
+#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
+#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
+
+#define S_C1INIT    0
+#define M_C1INIT    0x7fU
+#define V_C1INIT(x) ((x) << S_C1INIT)
+#define G_C1INIT(x) (((x) >> S_C1INIT) & M_C1INIT)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
+
+#define S_C1PRESET    8
+#define M_C1PRESET    0x7fU
+#define V_C1PRESET(x) ((x) << S_C1PRESET)
+#define G_C1PRESET(x) (((x) >> S_C1PRESET) & M_C1PRESET)
+
+#define S_C1INIT1    0
+#define M_C1INIT1    0x7fU
+#define V_C1INIT1(x) ((x) << S_C1INIT1)
+#define G_C1INIT1(x) (((x) >> S_C1INIT1) & M_C1INIT1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
+
+#define S_C1MAX    8
+#define M_C1MAX    0x7fU
+#define V_C1MAX(x) ((x) << S_C1MAX)
+#define G_C1MAX(x) (((x) >> S_C1MAX) & M_C1MAX)
+
+#define S_C1MIN    0
+#define M_C1MIN    0x7fU
+#define V_C1MIN(x) ((x) << S_C1MIN)
+#define G_C1MIN(x) (((x) >> S_C1MIN) & M_C1MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28
+
+#define S_C2INIT    0
+#define M_C2INIT    0x3fU
+#define V_C2INIT(x) ((x) << S_C2INIT)
+#define G_C2INIT(x) (((x) >> S_C2INIT) & M_C2INIT)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
+
+#define S_C2PRESET    8
+#define M_C2PRESET    0x7fU
+#define V_C2PRESET(x) ((x) << S_C2PRESET)
+#define G_C2PRESET(x) (((x) >> S_C2PRESET) & M_C2PRESET)
+
+#define S_C2INIT1    0
+#define M_C2INIT1    0x7fU
+#define V_C2INIT1(x) ((x) << S_C2INIT1)
+#define G_C2INIT1(x) (((x) >> S_C2INIT1) & M_C2INIT1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
+
+#define S_C2MAX    8
+#define M_C2MAX    0x3fU
+#define V_C2MAX(x) ((x) << S_C2MAX)
+#define G_C2MAX(x) (((x) >> S_C2MAX) & M_C2MAX)
+
+#define S_C2MIN    0
+#define M_C2MIN    0x3fU
+#define V_C2MIN(x) ((x) << S_C2MIN)
+#define G_C2MIN(x) (((x) >> S_C2MIN) & M_C2MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
+
+#define S_T6_C2MAX    8
+#define M_T6_C2MAX    0x7fU
+#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
+#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
+
+#define S_T6_C2MIN    0
+#define M_T6_C2MIN    0x7fU
+#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
+#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
+
+#define S_VMMAX    0
+#define M_VMMAX    0x7fU
+#define V_VMMAX(x) ((x) << S_VMMAX)
+#define G_VMMAX(x) (((x) >> S_VMMAX) & M_VMMAX)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
+
+#define S_V2MIN    0
+#define M_V2MIN    0x7fU
+#define V_V2MIN(x) ((x) << S_V2MIN)
+#define G_V2MIN(x) (((x) >> S_V2MIN) & M_V2MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_INIT_EXTENDED 0x48
+
+#define S_C3PRESET    8
+#define M_C3PRESET    0x7fU
+#define V_C3PRESET(x) ((x) << S_C3PRESET)
+#define G_C3PRESET(x) (((x) >> S_C3PRESET) & M_C3PRESET)
+
+#define S_C3INIT1    0
+#define M_C3INIT1    0x7fU
+#define V_C3INIT1(x) ((x) << S_C3INIT1)
+#define G_C3INIT1(x) (((x) >> S_C3INIT1) & M_C3INIT1)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50
+
+#define S_C3MAX    8
+#define M_C3MAX    0x7fU
+#define V_C3MAX(x) ((x) << S_C3MAX)
+#define G_C3MAX(x) (((x) >> S_C3MAX) & M_C3MAX)
+
+#define S_C3MIN    0
+#define M_C3MIN    0x7fU
+#define V_C3MIN(x) ((x) << S_C3MIN)
+#define G_C3MIN(x) (((x) >> S_C3MIN) & M_C3MIN)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c
+
+#define S_C0INIT2    0
+#define M_C0INIT2    0x7fU
+#define V_C0INIT2(x) ((x) << S_C0INIT2)
+#define G_C0INIT2(x) (((x) >> S_C0INIT2) & M_C0INIT2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60
+
+#define S_C1INIT2    0
+#define M_C1INIT2    0x7fU
+#define V_C1INIT2(x) ((x) << S_C1INIT2)
+#define G_C1INIT2(x) (((x) >> S_C1INIT2) & M_C1INIT2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68
+
+#define S_C2INIT2    0
+#define M_C2INIT2    0x7fU
+#define V_C2INIT2(x) ((x) << S_C2INIT2)
+#define G_C2INIT2(x) (((x) >> S_C2INIT2) & M_C2INIT2)
+
+#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70
+
+#define S_C3INIT2    0
+#define M_C3INIT2    0x7fU
+#define V_C3INIT2(x) ((x) << S_C3INIT2)
+#define G_C3INIT2(x) (((x) >> S_C3INIT2) & M_C3INIT2)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
+
+#define S_T6_C0MAX    8
+#define M_T6_C0MAX    0x7fU
+#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
+#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
+
+#define S_T6_C0MIN    0
+#define M_T6_C0MIN    0x7fU
+#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
+#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
+
+#define S_T6_C2MAX    8
+#define M_T6_C2MAX    0x7fU
+#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
+#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
+
+#define S_T6_C2MIN    0
+#define M_T6_C2MIN    0x7fU
+#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
+#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
+
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_INIT_EXTENDED 0x48
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68
+#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
+
+#define S_T6_C0MAX    8
+#define M_T6_C0MAX    0x7fU
+#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
+#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
+
+#define S_T6_C0MIN    0
+#define M_T6_C0MIN    0x7fU
+#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
+#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
+
+#define S_T6_C2MAX    8
+#define M_T6_C2MAX    0x7fU
+#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
+#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
+
+#define S_T6_C2MIN    0
+#define M_T6_C2MIN    0x7fU
+#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
+#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
+
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_INIT_EXTENDED 0x48
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68
+#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
+
+#define S_T6_C0MAX    8
+#define M_T6_C0MAX    0x7fU
+#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
+#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
+
+#define S_T6_C0MIN    0
+#define M_T6_C0MIN    0x7fU
+#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
+#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
+
+#define S_T6_C2MAX    8
+#define M_T6_C2MAX    0x7fU
+#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
+#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
+
+#define S_T6_C2MIN    0
+#define M_T6_C2MIN    0x7fU
+#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
+#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
+
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_INIT_EXTENDED 0x48
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68
+#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
+
+#define S_T6_C0MAX    8
+#define M_T6_C0MAX    0x7fU
+#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
+#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
+
+#define S_T6_C0MIN    0
+#define M_T6_C0MIN    0x7fU
+#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
+#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
+
+#define S_T6_C2MAX    8
+#define M_T6_C2MAX    0x7fU
+#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
+#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
+
+#define S_T6_C2MIN    0
+#define M_T6_C2MIN    0x7fU
+#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
+#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
+
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_INIT_EXTENDED 0x48
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68
+#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70
+#define A_T6_MAC_PORT_RX_LINKA_DFE_TAP_ENABLE 0x2a00
+
+#define S_RX_LINKA_INDEX_DFE_EN    1
+#define M_RX_LINKA_INDEX_DFE_EN    0x7fffU
+#define V_RX_LINKA_INDEX_DFE_EN(x) ((x) << S_RX_LINKA_INDEX_DFE_EN)
+#define G_RX_LINKA_INDEX_DFE_EN(x) (((x) >> S_RX_LINKA_INDEX_DFE_EN) & M_RX_LINKA_INDEX_DFE_EN)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H1 0x2a04
+
+#define S_T6_H1OSN    13
+#define M_T6_H1OSN    0x7U
+#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
+#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
+
+#define S_T6_H1OMAG    8
+#define M_T6_H1OMAG    0x1fU
+#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
+#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H2 0x2a08
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H3 0x2a0c
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H4 0x2a10
+
+#define S_H4SN    4
+#define M_H4SN    0x3U
+#define V_H4SN(x) ((x) << S_H4SN)
+#define G_H4SN(x) (((x) >> S_H4SN) & M_H4SN)
+
+#define S_H4MAG    0
+#define M_H4MAG    0xfU
+#define V_H4MAG(x) ((x) << S_H4MAG)
+#define G_H4MAG(x) (((x) >> S_H4MAG) & M_H4MAG)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H5 0x2a14
+
+#define S_H5GS    6
+#define M_H5GS    0x3U
+#define V_H5GS(x) ((x) << S_H5GS)
+#define G_H5GS(x) (((x) >> S_H5GS) & M_H5GS)
+
+#define S_H5SN    4
+#define M_H5SN    0x3U
+#define V_H5SN(x) ((x) << S_H5SN)
+#define G_H5SN(x) (((x) >> S_H5SN) & M_H5SN)
+
+#define S_H5MAG    0
+#define M_H5MAG    0xfU
+#define V_H5MAG(x) ((x) << S_H5MAG)
+#define G_H5MAG(x) (((x) >> S_H5MAG) & M_H5MAG)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H6_AND_H7 0x2a18
+
+#define S_H7SN    12
+#define M_H7SN    0x3U
+#define V_H7SN(x) ((x) << S_H7SN)
+#define G_H7SN(x) (((x) >> S_H7SN) & M_H7SN)
+
+#define S_H6SN    4
+#define M_H6SN    0x3U
+#define V_H6SN(x) ((x) << S_H6SN)
+#define G_H6SN(x) (((x) >> S_H6SN) & M_H6SN)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H8_AND_H9 0x2a1c
+
+#define S_H9SN    12
+#define M_H9SN    0x3U
+#define V_H9SN(x) ((x) << S_H9SN)
+#define G_H9SN(x) (((x) >> S_H9SN) & M_H9SN)
+
+#define S_H8SN    4
+#define M_H8SN    0x3U
+#define V_H8SN(x) ((x) << S_H8SN)
+#define G_H8SN(x) (((x) >> S_H8SN) & M_H8SN)
+
+#define A_T6_MAC_PORT_RX_LINKA_DFE_H10_AND_H11 0x2a20
+
+#define S_H11SN    12
+#define M_H11SN    0x3U
+#define V_H11SN(x) ((x) << S_H11SN)
+#define G_H11SN(x) (((x) >> S_H11SN) & M_H11SN)
+
+#define S_H10SN    4
+#define M_H10SN    0x3U
+#define V_H10SN(x) ((x) << S_H10SN)
+#define G_H10SN(x) (((x) >> S_H10SN) & M_H10SN)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H12_13 0x2a24
+
+#define S_H13GS    13
+#define M_H13GS    0x7U
+#define V_H13GS(x) ((x) << S_H13GS)
+#define G_H13GS(x) (((x) >> S_H13GS) & M_H13GS)
+
+#define S_H13SN    10
+#define M_H13SN    0x7U
+#define V_H13SN(x) ((x) << S_H13SN)
+#define G_H13SN(x) (((x) >> S_H13SN) & M_H13SN)
+
+#define S_H13MAG    8
+#define M_H13MAG    0x3U
+#define V_H13MAG(x) ((x) << S_H13MAG)
+#define G_H13MAG(x) (((x) >> S_H13MAG) & M_H13MAG)
+
+#define S_H12SN    4
+#define M_H12SN    0x3U
+#define V_H12SN(x) ((x) << S_H12SN)
+#define G_H12SN(x) (((x) >> S_H12SN) & M_H12SN)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H14_15 0x2a28
+
+#define S_H15GS    13
+#define M_H15GS    0x7U
+#define V_H15GS(x) ((x) << S_H15GS)
+#define G_H15GS(x) (((x) >> S_H15GS) & M_H15GS)
+
+#define S_H15SN    10
+#define M_H15SN    0x7U
+#define V_H15SN(x) ((x) << S_H15SN)
+#define G_H15SN(x) (((x) >> S_H15SN) & M_H15SN)
+
+#define S_H15MAG    8
+#define M_H15MAG    0x3U
+#define V_H15MAG(x) ((x) << S_H15MAG)
+#define G_H15MAG(x) (((x) >> S_H15MAG) & M_H15MAG)
+
+#define S_H14GS    6
+#define M_H14GS    0x3U
+#define V_H14GS(x) ((x) << S_H14GS)
+#define G_H14GS(x) (((x) >> S_H14GS) & M_H14GS)
+
+#define S_H14SN    4
+#define M_H14SN    0x3U
+#define V_H14SN(x) ((x) << S_H14SN)
+#define G_H14SN(x) (((x) >> S_H14SN) & M_H14SN)
+
+#define S_H14MAG    0
+#define M_H14MAG    0xfU
+#define V_H14MAG(x) ((x) << S_H14MAG)
+#define G_H14MAG(x) (((x) >> S_H14MAG) & M_H14MAG)
+
+#define A_MAC_PORT_RX_LINKA_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2a2c
+
+#define S_H1ODELTA    8
+#define M_H1ODELTA    0x1fU
+#define V_H1ODELTA(x) ((x) << S_H1ODELTA)
+#define G_H1ODELTA(x) (((x) >> S_H1ODELTA) & M_H1ODELTA)
+
+#define S_H1EDELTA    0
+#define M_H1EDELTA    0x3fU
+#define V_H1EDELTA(x) ((x) << S_H1EDELTA)
+#define G_H1EDELTA(x) (((x) >> S_H1EDELTA) & M_H1EDELTA)
+
+#define A_T6_MAC_PORT_RX_LINKB_DFE_TAP_ENABLE 0x2b00
+
+#define S_RX_LINKB_INDEX_DFE_EN    1
+#define M_RX_LINKB_INDEX_DFE_EN    0x7fffU
+#define V_RX_LINKB_INDEX_DFE_EN(x) ((x) << S_RX_LINKB_INDEX_DFE_EN)
+#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN)
+
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04
+
+#define S_T6_H1OSN    13
+#define M_T6_H1OSN    0x7U
+#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
+#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
+
+#define S_T6_H1OMAG    8
+#define M_T6_H1OMAG    0x1fU
+#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
+#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
+
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H5 0x2b14
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H6_AND_H7 0x2b18
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H8_AND_H9 0x2b1c
+#define A_T6_MAC_PORT_RX_LINKB_DFE_H10_AND_H11 0x2b20
+#define A_MAC_PORT_RX_LINKB_DFE_H12_13 0x2b24
+#define A_MAC_PORT_RX_LINKB_DFE_H14_15 0x2b28
+#define A_MAC_PORT_RX_LINKB_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2b2c
+#define A_T6_MAC_PORT_RX_LINKC_DFE_TAP_ENABLE 0x2e00
+
+#define S_RX_LINKC_INDEX_DFE_EN    1
+#define M_RX_LINKC_INDEX_DFE_EN    0x7fffU
+#define V_RX_LINKC_INDEX_DFE_EN(x) ((x) << S_RX_LINKC_INDEX_DFE_EN)
+#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN)
+
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04
+
+#define S_T6_H1OSN    13
+#define M_T6_H1OSN    0x7U
+#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
+#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
+
+#define S_T6_H1OMAG    8
+#define M_T6_H1OMAG    0x1fU
+#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
+#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
+
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H5 0x2e14
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H6_AND_H7 0x2e18
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H8_AND_H9 0x2e1c
+#define A_T6_MAC_PORT_RX_LINKC_DFE_H10_AND_H11 0x2e20
+#define A_MAC_PORT_RX_LINKC_DFE_H12_13 0x2e24
+#define A_MAC_PORT_RX_LINKC_DFE_H14_15 0x2e28
+#define A_MAC_PORT_RX_LINKC_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2e2c
+#define A_T6_MAC_PORT_RX_LINKD_DFE_TAP_ENABLE 0x2f00
+
+#define S_RX_LINKD_INDEX_DFE_EN    1
+#define M_RX_LINKD_INDEX_DFE_EN    0x7fffU
+#define V_RX_LINKD_INDEX_DFE_EN(x) ((x) << S_RX_LINKD_INDEX_DFE_EN)
+#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN)
+
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04
+
+#define S_T6_H1OSN    13
+#define M_T6_H1OSN    0x7U
+#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
+#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
+
+#define S_T6_H1OMAG    8
+#define M_T6_H1OMAG    0x1fU
+#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
+#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
+
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H5 0x2f14
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H6_AND_H7 0x2f18
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H8_AND_H9 0x2f1c
+#define A_T6_MAC_PORT_RX_LINKD_DFE_H10_AND_H11 0x2f20
+#define A_MAC_PORT_RX_LINKD_DFE_H12_13 0x2f24
+#define A_MAC_PORT_RX_LINKD_DFE_H14_15 0x2f28
+#define A_MAC_PORT_RX_LINKD_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2f2c
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_TAP_ENABLE 0x3200
+
+#define S_RX_LINK_BCST_INDEX_DFE_EN    1
+#define M_RX_LINK_BCST_INDEX_DFE_EN    0x7fffU
+#define V_RX_LINK_BCST_INDEX_DFE_EN(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_EN)
+#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN)
+
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204
+
+#define S_T6_H1OSN    13
+#define M_T6_H1OSN    0x7U
+#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
+#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
+
+#define S_T6_H1OMAG    8
+#define M_T6_H1OMAG    0x1fU
+#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
+#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
+
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H5 0x3214
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H6_AND_H7 0x3218
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H8_AND_H9 0x321c
+#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H10_AND_H11 0x3220
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H12_13 0x3224
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H14_15 0x3228
+#define A_MAC_PORT_RX_LINK_BCST_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x322c
+
+/* registers for module MC_0 */
+#define MC_0_BASE_ADDR 0x40000
+
+#define A_MC_UPCTL_SCFG 0x40000
+
+#define S_BBFLAGS_TIMING    8
+#define M_BBFLAGS_TIMING    0xfU
+#define V_BBFLAGS_TIMING(x) ((x) << S_BBFLAGS_TIMING)
+#define G_BBFLAGS_TIMING(x) (((x) >> S_BBFLAGS_TIMING) & M_BBFLAGS_TIMING)
+
+#define S_NFIFO_NIF1_DIS    6
+#define V_NFIFO_NIF1_DIS(x) ((x) << S_NFIFO_NIF1_DIS)
+#define F_NFIFO_NIF1_DIS    V_NFIFO_NIF1_DIS(1U)
+
+#define A_MC_UPCTL_SCTL 0x40004
+#define A_MC_UPCTL_STAT 0x40008
+
+#define S_LP_TRIG    4
+#define M_LP_TRIG    0x7U
+#define V_LP_TRIG(x) ((x) << S_LP_TRIG)
+#define G_LP_TRIG(x) (((x) >> S_LP_TRIG) & M_LP_TRIG)
+
+#define A_MC_UPCTL_INTRSTAT 0x4000c
+
+#define S_PARITY_INTR    1
+#define V_PARITY_INTR(x) ((x) << S_PARITY_INTR)
+#define F_PARITY_INTR    V_PARITY_INTR(1U)
+
+#define S_ECC_INTR    0
+#define V_ECC_INTR(x) ((x) << S_ECC_INTR)
+#define F_ECC_INTR    V_ECC_INTR(1U)
+
+#define A_MC_UPCTL_MCMD 0x40040
+
+#define S_CMD_OPCODE0    0
+#define M_CMD_OPCODE0    0xfU
+#define V_CMD_OPCODE0(x) ((x) << S_CMD_OPCODE0)
+#define G_CMD_OPCODE0(x) (((x) >> S_CMD_OPCODE0) & M_CMD_OPCODE0)
+
+#define A_MC_LMC_MCSTAT 0x40040
+
+#define S_INIT_COMPLETE    31
+#define V_INIT_COMPLETE(x) ((x) << S_INIT_COMPLETE)
+#define F_INIT_COMPLETE    V_INIT_COMPLETE(1U)
+
+#define S_SELF_REF_MODE    30
+#define V_SELF_REF_MODE(x) ((x) << S_SELF_REF_MODE)
+#define F_SELF_REF_MODE    V_SELF_REF_MODE(1U)
+
+#define S_IDLE    29
+#define V_IDLE(x) ((x) << S_IDLE)
+#define F_IDLE    V_IDLE(1U)
+
+#define S_T6_DFI_INIT_COMPLETE    28
+#define V_T6_DFI_INIT_COMPLETE(x) ((x) << S_T6_DFI_INIT_COMPLETE)
+#define F_T6_DFI_INIT_COMPLETE    V_T6_DFI_INIT_COMPLETE(1U)
+
+#define S_PREFILL_COMPLETE    27
+#define V_PREFILL_COMPLETE(x) ((x) << S_PREFILL_COMPLETE)
+#define F_PREFILL_COMPLETE    V_PREFILL_COMPLETE(1U)
+
+#define A_MC_UPCTL_POWCTL 0x40044
+#define A_MC_UPCTL_POWSTAT 0x40048
+#define A_MC_UPCTL_CMDTSTAT 0x4004c
+
+#define S_CMD_TSTAT    0
+#define V_CMD_TSTAT(x) ((x) << S_CMD_TSTAT)
+#define F_CMD_TSTAT    V_CMD_TSTAT(1U)
+
+#define A_MC_UPCTL_CMDTSTATEN 0x40050
+
+#define S_CMD_TSTAT_EN    0
+#define V_CMD_TSTAT_EN(x) ((x) << S_CMD_TSTAT_EN)
+#define F_CMD_TSTAT_EN    V_CMD_TSTAT_EN(1U)
+
+#define A_MC_UPCTL_MRRCFG0 0x40060
+
+#define S_MRR_BYTE_SEL    0
+#define M_MRR_BYTE_SEL    0xfU
+#define V_MRR_BYTE_SEL(x) ((x) << S_MRR_BYTE_SEL)
+#define G_MRR_BYTE_SEL(x) (((x) >> S_MRR_BYTE_SEL) & M_MRR_BYTE_SEL)
+
+#define A_MC_UPCTL_MRRSTAT0 0x40064
+
+#define S_MRRSTAT_BEAT3    24
+#define M_MRRSTAT_BEAT3    0xffU
+#define V_MRRSTAT_BEAT3(x) ((x) << S_MRRSTAT_BEAT3)
+#define G_MRRSTAT_BEAT3(x) (((x) >> S_MRRSTAT_BEAT3) & M_MRRSTAT_BEAT3)
+
+#define S_MRRSTAT_BEAT2    16
+#define M_MRRSTAT_BEAT2    0xffU
+#define V_MRRSTAT_BEAT2(x) ((x) << S_MRRSTAT_BEAT2)
+#define G_MRRSTAT_BEAT2(x) (((x) >> S_MRRSTAT_BEAT2) & M_MRRSTAT_BEAT2)
+
+#define S_MRRSTAT_BEAT1    8
+#define M_MRRSTAT_BEAT1    0xffU
+#define V_MRRSTAT_BEAT1(x) ((x) << S_MRRSTAT_BEAT1)
+#define G_MRRSTAT_BEAT1(x) (((x) >> S_MRRSTAT_BEAT1) & M_MRRSTAT_BEAT1)
+
+#define S_MRRSTAT_BEAT0    0
+#define M_MRRSTAT_BEAT0    0xffU
+#define V_MRRSTAT_BEAT0(x) ((x) << S_MRRSTAT_BEAT0)
+#define G_MRRSTAT_BEAT0(x) (((x) >> S_MRRSTAT_BEAT0) & M_MRRSTAT_BEAT0)
+
+#define A_MC_UPCTL_MRRSTAT1 0x40068
+
+#define S_MRRSTAT_BEAT7    24
+#define M_MRRSTAT_BEAT7    0xffU
+#define V_MRRSTAT_BEAT7(x) ((x) << S_MRRSTAT_BEAT7)
+#define G_MRRSTAT_BEAT7(x) (((x) >> S_MRRSTAT_BEAT7) & M_MRRSTAT_BEAT7)
+
+#define S_MRRSTAT_BEAT6    16
+#define M_MRRSTAT_BEAT6    0xffU
+#define V_MRRSTAT_BEAT6(x) ((x) << S_MRRSTAT_BEAT6)
+#define G_MRRSTAT_BEAT6(x) (((x) >> S_MRRSTAT_BEAT6) & M_MRRSTAT_BEAT6)
+
+#define S_MRRSTAT_BEAT5    8
+#define M_MRRSTAT_BEAT5    0xffU
+#define V_MRRSTAT_BEAT5(x) ((x) << S_MRRSTAT_BEAT5)
+#define G_MRRSTAT_BEAT5(x) (((x) >> S_MRRSTAT_BEAT5) & M_MRRSTAT_BEAT5)
+
+#define S_MRRSTAT_BEAT4    0
+#define M_MRRSTAT_BEAT4    0xffU
+#define V_MRRSTAT_BEAT4(x) ((x) << S_MRRSTAT_BEAT4)
+#define G_MRRSTAT_BEAT4(x) (((x) >> S_MRRSTAT_BEAT4) & M_MRRSTAT_BEAT4)
+
+#define A_MC_UPCTL_MCFG1 0x4007c
+
+#define S_HW_EXIT_IDLE_EN    31
+#define V_HW_EXIT_IDLE_EN(x) ((x) << S_HW_EXIT_IDLE_EN)
+#define F_HW_EXIT_IDLE_EN    V_HW_EXIT_IDLE_EN(1U)
+
+#define S_HW_IDLE    16
+#define M_HW_IDLE    0xffU
+#define V_HW_IDLE(x) ((x) << S_HW_IDLE)
+#define G_HW_IDLE(x) (((x) >> S_HW_IDLE) & M_HW_IDLE)
+
+#define S_SR_IDLE    0
+#define M_SR_IDLE    0xffU
+#define V_SR_IDLE(x) ((x) << S_SR_IDLE)
+#define G_SR_IDLE(x) (((x) >> S_SR_IDLE) & M_SR_IDLE)
+
+#define A_MC_UPCTL_MCFG 0x40080
+
+#define S_MDDR_LPDDR2_CLK_STOP_IDLE    24
+#define M_MDDR_LPDDR2_CLK_STOP_IDLE    0xffU
+#define V_MDDR_LPDDR2_CLK_STOP_IDLE(x) ((x) << S_MDDR_LPDDR2_CLK_STOP_IDLE)
+#define G_MDDR_LPDDR2_CLK_STOP_IDLE(x) (((x) >> S_MDDR_LPDDR2_CLK_STOP_IDLE) & M_MDDR_LPDDR2_CLK_STOP_IDLE)
+
+#define S_MDDR_LPDDR2_EN    22
+#define M_MDDR_LPDDR2_EN    0x3U
+#define V_MDDR_LPDDR2_EN(x) ((x) << S_MDDR_LPDDR2_EN)
+#define G_MDDR_LPDDR2_EN(x) (((x) >> S_MDDR_LPDDR2_EN) & M_MDDR_LPDDR2_EN)
+
+#define S_MDDR_LPDDR2_BL    20
+#define M_MDDR_LPDDR2_BL    0x3U
+#define V_MDDR_LPDDR2_BL(x) ((x) << S_MDDR_LPDDR2_BL)
+#define G_MDDR_LPDDR2_BL(x) (((x) >> S_MDDR_LPDDR2_BL) & M_MDDR_LPDDR2_BL)
+
+#define S_LPDDR2_S4    6
+#define V_LPDDR2_S4(x) ((x) << S_LPDDR2_S4)
+#define F_LPDDR2_S4    V_LPDDR2_S4(1U)
+
+#define S_STAGGER_CS    4
+#define V_STAGGER_CS(x) ((x) << S_STAGGER_CS)
+#define F_STAGGER_CS    V_STAGGER_CS(1U)
+
+#define S_CKE_OR_EN    1
+#define V_CKE_OR_EN(x) ((x) << S_CKE_OR_EN)
+#define F_CKE_OR_EN    V_CKE_OR_EN(1U)
+
+#define A_MC_LMC_MCOPT1 0x40080
+
+#define S_MC_PROTOCOL    31
+#define V_MC_PROTOCOL(x) ((x) << S_MC_PROTOCOL)
+#define F_MC_PROTOCOL    V_MC_PROTOCOL(1U)
+
+#define S_DM_ENABLE    30
+#define V_DM_ENABLE(x) ((x) << S_DM_ENABLE)
+#define F_DM_ENABLE    V_DM_ENABLE(1U)
+
+#define S_T6_ECC_EN    29
+#define V_T6_ECC_EN(x) ((x) << S_T6_ECC_EN)
+#define F_T6_ECC_EN    V_T6_ECC_EN(1U)
+
+#define S_ECC_COR    28
+#define V_ECC_COR(x) ((x) << S_ECC_COR)
+#define F_ECC_COR    V_ECC_COR(1U)
+
+#define S_RDIMM    27
+#define V_RDIMM(x) ((x) << S_RDIMM)
+#define F_RDIMM    V_RDIMM(1U)
+
+#define S_PMUM    25
+#define M_PMUM    0x3U
+#define V_PMUM(x) ((x) << S_PMUM)
+#define G_PMUM(x) (((x) >> S_PMUM) & M_PMUM)
+
+#define S_WIDTH0    24
+#define V_WIDTH0(x) ((x) << S_WIDTH0)
+#define F_WIDTH0    V_WIDTH0(1U)
+
+#define S_PORT_ID_CHK_EN    23
+#define V_PORT_ID_CHK_EN(x) ((x) << S_PORT_ID_CHK_EN)
+#define F_PORT_ID_CHK_EN    V_PORT_ID_CHK_EN(1U)
+
+#define S_UIOS    22
+#define V_UIOS(x) ((x) << S_UIOS)
+#define F_UIOS    V_UIOS(1U)
+
+#define S_QUADCS_RDIMM    21
+#define V_QUADCS_RDIMM(x) ((x) << S_QUADCS_RDIMM)
+#define F_QUADCS_RDIMM    V_QUADCS_RDIMM(1U)
+
+#define S_ZQCL_EN    20
+#define V_ZQCL_EN(x) ((x) << S_ZQCL_EN)
+#define F_ZQCL_EN    V_ZQCL_EN(1U)
+
+#define S_WIDTH1    19
+#define V_WIDTH1(x) ((x) << S_WIDTH1)
+#define F_WIDTH1    V_WIDTH1(1U)
+
+#define S_WD_DLY    18
+#define V_WD_DLY(x) ((x) << S_WD_DLY)
+#define F_WD_DLY    V_WD_DLY(1U)
+
+#define S_QDEPTH    16
+#define M_QDEPTH    0x3U
+#define V_QDEPTH(x) ((x) << S_QDEPTH)
+#define G_QDEPTH(x) (((x) >> S_QDEPTH) & M_QDEPTH)
+
+#define S_RWOO    15
+#define V_RWOO(x) ((x) << S_RWOO)
+#define F_RWOO    V_RWOO(1U)
+
+#define S_WOOO    14
+#define V_WOOO(x) ((x) << S_WOOO)
+#define F_WOOO    V_WOOO(1U)
+
+#define S_DCOO    13
+#define V_DCOO(x) ((x) << S_DCOO)
+#define F_DCOO    V_DCOO(1U)
+
+#define S_DEF_REF    12
+#define V_DEF_REF(x) ((x) << S_DEF_REF)
+#define F_DEF_REF    V_DEF_REF(1U)
+
+#define S_DEV_TYPE    11
+#define V_DEV_TYPE(x) ((x) << S_DEV_TYPE)
+#define F_DEV_TYPE    V_DEV_TYPE(1U)
+
+#define S_CA_PTY_DLY    10
+#define V_CA_PTY_DLY(x) ((x) << S_CA_PTY_DLY)
+#define F_CA_PTY_DLY    V_CA_PTY_DLY(1U)
+
+#define S_ECC_MUX    8
+#define M_ECC_MUX    0x3U
+#define V_ECC_MUX(x) ((x) << S_ECC_MUX)
+#define G_ECC_MUX(x) (((x) >> S_ECC_MUX) & M_ECC_MUX)
+
+#define S_CE_THRESHOLD    0
+#define M_CE_THRESHOLD    0xffU
+#define V_CE_THRESHOLD(x) ((x) << S_CE_THRESHOLD)
+#define G_CE_THRESHOLD(x) (((x) >> S_CE_THRESHOLD) & M_CE_THRESHOLD)
+
+#define A_MC_UPCTL_PPCFG 0x40084
+#define A_MC_LMC_MCOPT2 0x40084
+
+#define S_SELF_REF_EN    31
+#define V_SELF_REF_EN(x) ((x) << S_SELF_REF_EN)
+#define F_SELF_REF_EN    V_SELF_REF_EN(1U)
+
+#define S_XSR_PREVENT    30
+#define V_XSR_PREVENT(x) ((x) << S_XSR_PREVENT)
+#define F_XSR_PREVENT    V_XSR_PREVENT(1U)
+
+#define S_INIT_START    29
+#define V_INIT_START(x) ((x) << S_INIT_START)
+#define F_INIT_START    V_INIT_START(1U)
+
+#define S_MC_ENABLE    28
+#define V_MC_ENABLE(x) ((x) << S_MC_ENABLE)
+#define F_MC_ENABLE    V_MC_ENABLE(1U)
+
+#define S_CLK_DISABLE    24
+#define M_CLK_DISABLE    0xfU
+#define V_CLK_DISABLE(x) ((x) << S_CLK_DISABLE)
+#define G_CLK_DISABLE(x) (((x) >> S_CLK_DISABLE) & M_CLK_DISABLE)
+
+#define S_RESET_RANK    20
+#define M_RESET_RANK    0xfU
+#define V_RESET_RANK(x) ((x) << S_RESET_RANK)
+#define G_RESET_RANK(x) (((x) >> S_RESET_RANK) & M_RESET_RANK)
+
+#define S_MCIF_COMP_PTY_EN    19
+#define V_MCIF_COMP_PTY_EN(x) ((x) << S_MCIF_COMP_PTY_EN)
+#define F_MCIF_COMP_PTY_EN    V_MCIF_COMP_PTY_EN(1U)
+
+#define S_CKE_OE    17
+#define V_CKE_OE(x) ((x) << S_CKE_OE)
+#define F_CKE_OE    V_CKE_OE(1U)
+
+#define S_RESET_OE    16
+#define V_RESET_OE(x) ((x) << S_RESET_OE)
+#define F_RESET_OE    V_RESET_OE(1U)
+
+#define S_DFI_PHYUD_CNTL    14
+#define V_DFI_PHYUD_CNTL(x) ((x) << S_DFI_PHYUD_CNTL)
+#define F_DFI_PHYUD_CNTL    V_DFI_PHYUD_CNTL(1U)
+
+#define S_DFI_PHYUD_ACK    13
+#define V_DFI_PHYUD_ACK(x) ((x) << S_DFI_PHYUD_ACK)
+#define F_DFI_PHYUD_ACK    V_DFI_PHYUD_ACK(1U)
+
+#define S_T6_DFI_INIT_START    12
+#define V_T6_DFI_INIT_START(x) ((x) << S_T6_DFI_INIT_START)
+#define F_T6_DFI_INIT_START    V_T6_DFI_INIT_START(1U)
+
+#define S_PM_ENABLE    8
+#define M_PM_ENABLE    0xfU
+#define V_PM_ENABLE(x) ((x) << S_PM_ENABLE)
+#define G_PM_ENABLE(x) (((x) >> S_PM_ENABLE) & M_PM_ENABLE)
+
+#define S_RD_DEFREF_CNT    4
+#define M_RD_DEFREF_CNT    0xfU
+#define V_RD_DEFREF_CNT(x) ((x) << S_RD_DEFREF_CNT)
+#define G_RD_DEFREF_CNT(x) (((x) >> S_RD_DEFREF_CNT) & M_RD_DEFREF_CNT)
+
+#define A_MC_UPCTL_MSTAT 0x40088
+
+#define S_SELF_REFRESH    2
+#define V_SELF_REFRESH(x) ((x) << S_SELF_REFRESH)
+#define F_SELF_REFRESH    V_SELF_REFRESH(1U)
+
+#define S_CLOCK_STOP    1
+#define V_CLOCK_STOP(x) ((x) << S_CLOCK_STOP)
+#define F_CLOCK_STOP    V_CLOCK_STOP(1U)
+
+#define A_MC_UPCTL_LPDDR2ZQCFG 0x4008c
+
+#define S_ZQCL_OP    24
+#define M_ZQCL_OP    0xffU
+#define V_ZQCL_OP(x) ((x) << S_ZQCL_OP)
+#define G_ZQCL_OP(x) (((x) >> S_ZQCL_OP) & M_ZQCL_OP)
+
+#define S_ZQCL_MA    16
+#define M_ZQCL_MA    0xffU
+#define V_ZQCL_MA(x) ((x) << S_ZQCL_MA)
+#define G_ZQCL_MA(x) (((x) >> S_ZQCL_MA) & M_ZQCL_MA)
+
+#define S_ZQCS_OP    8
+#define M_ZQCS_OP    0xffU
+#define V_ZQCS_OP(x) ((x) << S_ZQCS_OP)
+#define G_ZQCS_OP(x) (((x) >> S_ZQCS_OP) & M_ZQCS_OP)
+
+#define S_ZQCS_MA    0
+#define M_ZQCS_MA    0xffU
+#define V_ZQCS_MA(x) ((x) << S_ZQCS_MA)
+#define G_ZQCS_MA(x) (((x) >> S_ZQCS_MA) & M_ZQCS_MA)
+
+#define A_MC_UPCTL_DTUPDES 0x40094
+
+#define S_DTU_ERR_B7    7
+#define V_DTU_ERR_B7(x) ((x) << S_DTU_ERR_B7)
+#define F_DTU_ERR_B7    V_DTU_ERR_B7(1U)
+
+#define A_MC_UPCTL_DTUNA 0x40098
+#define A_MC_UPCTL_DTUNE 0x4009c
+#define A_MC_UPCTL_DTUPRD0 0x400a0
+#define A_MC_UPCTL_DTUPRD1 0x400a4
+#define A_MC_UPCTL_DTUPRD2 0x400a8
+#define A_MC_UPCTL_DTUPRD3 0x400ac
+#define A_MC_UPCTL_DTUAWDT 0x400b0
+#define A_MC_UPCTL_TOGCNT1U 0x400c0
+#define A_MC_UPCTL_TINIT 0x400c4
+#define A_MC_UPCTL_TRSTH 0x400c8
+#define A_MC_UPCTL_TOGCNT100N 0x400cc
+#define A_MC_UPCTL_TREFI 0x400d0
+#define A_MC_UPCTL_TMRD 0x400d4
+#define A_MC_UPCTL_TRFC 0x400d8
+
+#define S_T_RFC0    0
+#define M_T_RFC0    0x1ffU
+#define V_T_RFC0(x) ((x) << S_T_RFC0)
+#define G_T_RFC0(x) (((x) >> S_T_RFC0) & M_T_RFC0)
+
+#define A_MC_UPCTL_TRP 0x400dc
+
+#define S_PREA_EXTRA    16
+#define M_PREA_EXTRA    0x3U
+#define V_PREA_EXTRA(x) ((x) << S_PREA_EXTRA)
+#define G_PREA_EXTRA(x) (((x) >> S_PREA_EXTRA) & M_PREA_EXTRA)
+
+#define A_MC_UPCTL_TRTW 0x400e0
+
+#define S_T_RTW0    0
+#define M_T_RTW0    0xfU
+#define V_T_RTW0(x) ((x) << S_T_RTW0)
+#define G_T_RTW0(x) (((x) >> S_T_RTW0) & M_T_RTW0)
+
+#define A_MC_UPCTL_TAL 0x400e4
+#define A_MC_UPCTL_TCL 0x400e8
+#define A_MC_UPCTL_TCWL 0x400ec
+#define A_MC_UPCTL_TRAS 0x400f0
+#define A_MC_UPCTL_TRC 0x400f4
+#define A_MC_UPCTL_TRCD 0x400f8
+#define A_MC_UPCTL_TRRD 0x400fc
+#define A_MC_UPCTL_TRTP 0x40100
+
+#define S_T_RTP0    0
+#define M_T_RTP0    0xfU
+#define V_T_RTP0(x) ((x) << S_T_RTP0)
+#define G_T_RTP0(x) (((x) >> S_T_RTP0) & M_T_RTP0)
+
+#define A_MC_LMC_CFGR0 0x40100
+
+#define S_ROW_WIDTH    12
+#define M_ROW_WIDTH    0x7U
+#define V_ROW_WIDTH(x) ((x) << S_ROW_WIDTH)
+#define G_ROW_WIDTH(x) (((x) >> S_ROW_WIDTH) & M_ROW_WIDTH)
+
+#define S_ADDR_MODE    8
+#define M_ADDR_MODE    0xfU
+#define V_ADDR_MODE(x) ((x) << S_ADDR_MODE)
+#define G_ADDR_MODE(x) (((x) >> S_ADDR_MODE) & M_ADDR_MODE)
+
+#define S_MIRROR    4
+#define V_MIRROR(x) ((x) << S_MIRROR)
+#define F_MIRROR    V_MIRROR(1U)
+
+#define S_RANK_ENABLE    0
+#define V_RANK_ENABLE(x) ((x) << S_RANK_ENABLE)
+#define F_RANK_ENABLE    V_RANK_ENABLE(1U)
+
+#define A_MC_UPCTL_TWR 0x40104
+
+#define S_U_T_WR    0
+#define M_U_T_WR    0x1fU
+#define V_U_T_WR(x) ((x) << S_U_T_WR)
+#define G_U_T_WR(x) (((x) >> S_U_T_WR) & M_U_T_WR)
+
+#define A_MC_UPCTL_TWTR 0x40108
+
+#define S_T_WTR0    0
+#define M_T_WTR0    0xfU
+#define V_T_WTR0(x) ((x) << S_T_WTR0)
+#define G_T_WTR0(x) (((x) >> S_T_WTR0) & M_T_WTR0)
+
+#define A_MC_UPCTL_TEXSR 0x4010c
+#define A_MC_UPCTL_TXP 0x40110
+#define A_MC_UPCTL_TXPDLL 0x40114
+#define A_MC_UPCTL_TZQCS 0x40118
+#define A_MC_UPCTL_TZQCSI 0x4011c
+#define A_MC_UPCTL_TDQS 0x40120
+#define A_MC_UPCTL_TCKSRE 0x40124
+
+#define S_T_CKSRE0    0
+#define M_T_CKSRE0    0x1fU
+#define V_T_CKSRE0(x) ((x) << S_T_CKSRE0)
+#define G_T_CKSRE0(x) (((x) >> S_T_CKSRE0) & M_T_CKSRE0)
+
+#define A_MC_UPCTL_TCKSRX 0x40128
+
+#define S_T_CKSRX0    0
+#define M_T_CKSRX0    0x1fU
+#define V_T_CKSRX0(x) ((x) << S_T_CKSRX0)
+#define G_T_CKSRX0(x) (((x) >> S_T_CKSRX0) & M_T_CKSRX0)
+
+#define A_MC_UPCTL_TCKE 0x4012c
+#define A_MC_UPCTL_TMOD 0x40130
+
+#define S_T_MOD0    0
+#define M_T_MOD0    0x1fU
+#define V_T_MOD0(x) ((x) << S_T_MOD0)
+#define G_T_MOD0(x) (((x) >> S_T_MOD0) & M_T_MOD0)
+
+#define A_MC_UPCTL_TRSTL 0x40134
+
+#define S_T_RSTL    0
+#define M_T_RSTL    0x7fU
+#define V_T_RSTL(x) ((x) << S_T_RSTL)
+#define G_T_RSTL(x) (((x) >> S_T_RSTL) & M_T_RSTL)
+
+#define A_MC_UPCTL_TZQCL 0x40138
+#define A_MC_UPCTL_TMRR 0x4013c
+
+#define S_T_MRR    0
+#define M_T_MRR    0xffU
+#define V_T_MRR(x) ((x) << S_T_MRR)
+#define G_T_MRR(x) (((x) >> S_T_MRR) & M_T_MRR)
+
+#define A_MC_UPCTL_TCKESR 0x40140
+
+#define S_T_CKESR    0
+#define M_T_CKESR    0xfU
+#define V_T_CKESR(x) ((x) << S_T_CKESR)
+#define G_T_CKESR(x) (((x) >> S_T_CKESR) & M_T_CKESR)
+
+#define A_MC_LMC_INITSEQ0 0x40140
+
+#define S_INIT_ENABLE    31
+#define V_INIT_ENABLE(x) ((x) << S_INIT_ENABLE)
+#define F_INIT_ENABLE    V_INIT_ENABLE(1U)
+
+#define S_WAIT    16
+#define CXGBE_M_WAIT    0xfffU
+#define CXGBE_V_WAIT(x) ((x) << S_WAIT)
+#define G_WAIT(x) (((x) >> S_WAIT) & CXGBE_M_WAIT)
+
+#define S_EN_MULTI_RANK_SEL    4
+#define V_EN_MULTI_RANK_SEL(x) ((x) << S_EN_MULTI_RANK_SEL)
+#define F_EN_MULTI_RANK_SEL    V_EN_MULTI_RANK_SEL(1U)
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_UPCTL_TDPD 0x40144
+
+#define S_T_DPD    0
+#define M_T_DPD    0x3ffU
+#define V_T_DPD(x) ((x) << S_T_DPD)
+#define G_T_DPD(x) (((x) >> S_T_DPD) & M_T_DPD)
+
+#define A_MC_LMC_CMD0 0x40144
+
+#define S_CMD    29
+#define M_CMD    0x7U
+#define V_CMD(x) ((x) << S_CMD)
+#define G_CMD(x) (((x) >> S_CMD) & M_CMD)
+
+#define S_CMD_ACTN    28
+#define V_CMD_ACTN(x) ((x) << S_CMD_ACTN)
+#define F_CMD_ACTN    V_CMD_ACTN(1U)
+
+#define S_BG1    23
+#define V_BG1(x) ((x) << S_BG1)
+#define F_BG1    V_BG1(1U)
+
+#define S_BANK    20
+#define M_BANK    0x7U
+#define V_BANK(x) ((x) << S_BANK)
+#define G_BANK(x) (((x) >> S_BANK) & M_BANK)
+
+#define A_MC_LMC_INITSEQ1 0x40148
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD1 0x4014c
+#define A_MC_LMC_INITSEQ2 0x40150
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD2 0x40154
+#define A_MC_LMC_INITSEQ3 0x40158
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD3 0x4015c
+#define A_MC_LMC_INITSEQ4 0x40160
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD4 0x40164
+#define A_MC_LMC_INITSEQ5 0x40168
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD5 0x4016c
+#define A_MC_LMC_INITSEQ6 0x40170
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD6 0x40174
+#define A_MC_LMC_INITSEQ7 0x40178
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD7 0x4017c
+#define A_MC_UPCTL_ECCCFG 0x40180
+#define A_MC_LMC_INITSEQ8 0x40180
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_UPCTL_ECCTST 0x40184
+
+#define S_ECC_TEST_MASK0    0
+#define M_ECC_TEST_MASK0    0x7fU
+#define V_ECC_TEST_MASK0(x) ((x) << S_ECC_TEST_MASK0)
+#define G_ECC_TEST_MASK0(x) (((x) >> S_ECC_TEST_MASK0) & M_ECC_TEST_MASK0)
+
+#define A_MC_LMC_CMD8 0x40184
+#define A_MC_UPCTL_ECCCLR 0x40188
+#define A_MC_LMC_INITSEQ9 0x40188
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_UPCTL_ECCLOG 0x4018c
+#define A_MC_LMC_CMD9 0x4018c
+#define A_MC_LMC_INITSEQ10 0x40190
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD10 0x40194
+#define A_MC_LMC_INITSEQ11 0x40198
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD11 0x4019c
+#define A_MC_LMC_INITSEQ12 0x401a0
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD12 0x401a4
+#define A_MC_LMC_INITSEQ13 0x401a8
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD13 0x401ac
+#define A_MC_LMC_INITSEQ14 0x401b0
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD14 0x401b4
+#define A_MC_LMC_INITSEQ15 0x401b8
+
+#define S_T6_RANK    0
+#define M_T6_RANK    0xfU
+#define V_T6_RANK(x) ((x) << S_T6_RANK)
+#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
+
+#define A_MC_LMC_CMD15 0x401bc
+#define A_MC_UPCTL_DTUWACTL 0x40200
+
+#define S_DTU_WR_ROW0    13
+#define M_DTU_WR_ROW0    0xffffU
+#define V_DTU_WR_ROW0(x) ((x) << S_DTU_WR_ROW0)
+#define G_DTU_WR_ROW0(x) (((x) >> S_DTU_WR_ROW0) & M_DTU_WR_ROW0)
+
+#define A_MC_LMC_SDTR0 0x40200
+
+#define S_REFI    16
+#define M_REFI    0xffffU
+#define V_REFI(x) ((x) << S_REFI)
+#define G_REFI(x) (((x) >> S_REFI) & M_REFI)
+
+#define S_T_RFC_XPR    0
+#define M_T_RFC_XPR    0xfffU
+#define V_T_RFC_XPR(x) ((x) << S_T_RFC_XPR)
+#define G_T_RFC_XPR(x) (((x) >> S_T_RFC_XPR) & M_T_RFC_XPR)
+
+#define A_MC_UPCTL_DTURACTL 0x40204
+
+#define S_DTU_RD_ROW0    13
+#define M_DTU_RD_ROW0    0xffffU
+#define V_DTU_RD_ROW0(x) ((x) << S_DTU_RD_ROW0)
+#define G_DTU_RD_ROW0(x) (((x) >> S_DTU_RD_ROW0) & M_DTU_RD_ROW0)
+
+#define A_MC_LMC_SDTR1 0x40204
+
+#define S_T_LEADOFF    31
+#define V_T_LEADOFF(x) ((x) << S_T_LEADOFF)
+#define F_T_LEADOFF    V_T_LEADOFF(1U)
+
+#define S_ODT_DELAY    30
+#define V_ODT_DELAY(x) ((x) << S_ODT_DELAY)
+#define F_ODT_DELAY    V_ODT_DELAY(1U)
+
+#define S_ODT_WIDTH    29
+#define V_ODT_WIDTH(x) ((x) << S_ODT_WIDTH)
+#define F_ODT_WIDTH    V_ODT_WIDTH(1U)
+
+#define S_T_WTRO    24
+#define M_T_WTRO    0xfU
+#define V_T_WTRO(x) ((x) << S_T_WTRO)
+#define G_T_WTRO(x) (((x) >> S_T_WTRO) & M_T_WTRO)
+
+#define S_T_RTWO    16
+#define M_T_RTWO    0xfU
+#define V_T_RTWO(x) ((x) << S_T_RTWO)
+#define G_T_RTWO(x) (((x) >> S_T_RTWO) & M_T_RTWO)
+
+#define S_T_RTW_ADJ    12
+#define M_T_RTW_ADJ    0xfU
+#define V_T_RTW_ADJ(x) ((x) << S_T_RTW_ADJ)
+#define G_T_RTW_ADJ(x) (((x) >> S_T_RTW_ADJ) & M_T_RTW_ADJ)
+
+#define S_T_WTWO    8
+#define M_T_WTWO    0xfU
+#define V_T_WTWO(x) ((x) << S_T_WTWO)
+#define G_T_WTWO(x) (((x) >> S_T_WTWO) & M_T_WTWO)
+
+#define S_T_RTRO    0
+#define M_T_RTRO    0xfU
+#define V_T_RTRO(x) ((x) << S_T_RTRO)
+#define G_T_RTRO(x) (((x) >> S_T_RTRO) & M_T_RTRO)
+
+#define A_MC_UPCTL_DTUCFG 0x40208
+#define A_MC_LMC_SDTR2 0x40208
+
+#define S_T6_T_CWL    28
+#define M_T6_T_CWL    0xfU
+#define V_T6_T_CWL(x) ((x) << S_T6_T_CWL)
+#define G_T6_T_CWL(x) (((x) >> S_T6_T_CWL) & M_T6_T_CWL)
+
+#define S_T_RCD0    24
+#define M_T_RCD0    0xfU
+#define V_T_RCD0(x) ((x) << S_T_RCD0)
+#define G_T_RCD0(x) (((x) >> S_T_RCD0) & M_T_RCD0)
+
+#define S_T_PL    20
+#define M_T_PL    0xfU
+#define V_T_PL(x) ((x) << S_T_PL)
+#define G_T_PL(x) (((x) >> S_T_PL) & M_T_PL)
+
+#define S_T_RP0    16
+#define M_T_RP0    0xfU
+#define V_T_RP0(x) ((x) << S_T_RP0)
+#define G_T_RP0(x) (((x) >> S_T_RP0) & M_T_RP0)
+
+#define S_T_RP1    15
+#define V_T_RP1(x) ((x) << S_T_RP1)
+#define F_T_RP1    V_T_RP1(1U)
+
+#define S_T_RCD1    14
+#define V_T_RCD1(x) ((x) << S_T_RCD1)
+#define F_T_RCD1    V_T_RCD1(1U)
+
+#define S_T6_T_RC    8
+#define M_T6_T_RC    0x3fU
+#define V_T6_T_RC(x) ((x) << S_T6_T_RC)
+#define G_T6_T_RC(x) (((x) >> S_T6_T_RC) & M_T6_T_RC)
+
+#define A_MC_UPCTL_DTUECTL 0x4020c
+#define A_MC_LMC_SDTR3 0x4020c
+
+#define S_T_WTR_S    28
+#define M_T_WTR_S    0xfU
+#define V_T_WTR_S(x) ((x) << S_T_WTR_S)
+#define G_T_WTR_S(x) (((x) >> S_T_WTR_S) & M_T_WTR_S)
+
+#define S_T6_T_WTR    24
+#define M_T6_T_WTR    0xfU
+#define V_T6_T_WTR(x) ((x) << S_T6_T_WTR)
+#define G_T6_T_WTR(x) (((x) >> S_T6_T_WTR) & M_T6_T_WTR)
+
+#define S_FAW_ADJ    20
+#define M_FAW_ADJ    0x3U
+#define V_FAW_ADJ(x) ((x) << S_FAW_ADJ)
+#define G_FAW_ADJ(x) (((x) >> S_FAW_ADJ) & M_FAW_ADJ)
+
+#define S_T6_T_RTP    16
+#define M_T6_T_RTP    0xfU
+#define V_T6_T_RTP(x) ((x) << S_T6_T_RTP)
+#define G_T6_T_RTP(x) (((x) >> S_T6_T_RTP) & M_T6_T_RTP)
+
+#define S_T_RRD_L    12
+#define M_T_RRD_L    0xfU
+#define V_T_RRD_L(x) ((x) << S_T_RRD_L)
+#define G_T_RRD_L(x) (((x) >> S_T_RRD_L) & M_T_RRD_L)
+
+#define S_T6_T_RRD    8
+#define M_T6_T_RRD    0xfU
+#define V_T6_T_RRD(x) ((x) << S_T6_T_RRD)
+#define G_T6_T_RRD(x) (((x) >> S_T6_T_RRD) & M_T6_T_RRD)
+
+#define S_T_XSDLL    0
+#define M_T_XSDLL    0xffU
+#define V_T_XSDLL(x) ((x) << S_T_XSDLL)
+#define G_T_XSDLL(x) (((x) >> S_T_XSDLL) & M_T_XSDLL)
+
+#define A_MC_UPCTL_DTUWD0 0x40210
+#define A_MC_LMC_SDTR4 0x40210
+
+#define S_T_RDDATA_EN    24
+#define M_T_RDDATA_EN    0x7fU
+#define V_T_RDDATA_EN(x) ((x) << S_T_RDDATA_EN)
+#define G_T_RDDATA_EN(x) (((x) >> S_T_RDDATA_EN) & M_T_RDDATA_EN)
+
+#define S_T_SYS_RDLAT    16
+#define M_T_SYS_RDLAT    0x3fU
+#define V_T_SYS_RDLAT(x) ((x) << S_T_SYS_RDLAT)
+#define G_T_SYS_RDLAT(x) (((x) >> S_T_SYS_RDLAT) & M_T_SYS_RDLAT)
+
+#define S_T_CCD_L    12
+#define M_T_CCD_L    0xfU
+#define V_T_CCD_L(x) ((x) << S_T_CCD_L)
+#define G_T_CCD_L(x) (((x) >> S_T_CCD_L) & M_T_CCD_L)
+
+#define S_T_CCD    8
+#define M_T_CCD    0x7U
+#define V_T_CCD(x) ((x) << S_T_CCD)
+#define G_T_CCD(x) (((x) >> S_T_CCD) & M_T_CCD)
+
+#define S_T_CPDED    5
+#define M_T_CPDED    0x7U
+#define V_T_CPDED(x) ((x) << S_T_CPDED)
+#define G_T_CPDED(x) (((x) >> S_T_CPDED) & M_T_CPDED)
+
+#define S_T6_T_MOD    0
+#define M_T6_T_MOD    0x1fU
+#define V_T6_T_MOD(x) ((x) << S_T6_T_MOD)
+#define G_T6_T_MOD(x) (((x) >> S_T6_T_MOD) & M_T6_T_MOD)
+
+#define A_MC_UPCTL_DTUWD1 0x40214
+#define A_MC_LMC_SDTR5 0x40214
+
+#define S_T_PHY_WRDATA    24
+#define M_T_PHY_WRDATA    0x7U
+#define V_T_PHY_WRDATA(x) ((x) << S_T_PHY_WRDATA)
+#define G_T_PHY_WRDATA(x) (((x) >> S_T_PHY_WRDATA) & M_T_PHY_WRDATA)
+
+#define S_T_PHY_WRLAT    16
+#define M_T_PHY_WRLAT    0x1fU
+#define V_T_PHY_WRLAT(x) ((x) << S_T_PHY_WRLAT)
+#define G_T_PHY_WRLAT(x) (((x) >> S_T_PHY_WRLAT) & M_T_PHY_WRLAT)
+
+#define A_MC_UPCTL_DTUWD2 0x40218
+#define A_MC_UPCTL_DTUWD3 0x4021c
+#define A_MC_UPCTL_DTUWDM 0x40220
+#define A_MC_UPCTL_DTURD0 0x40224
+#define A_MC_UPCTL_DTURD1 0x40228
+#define A_MC_LMC_DBG0 0x40228
+
+#define S_T_SYS_RDLAT_DBG    16
+#define M_T_SYS_RDLAT_DBG    0x1fU
+#define V_T_SYS_RDLAT_DBG(x) ((x) << S_T_SYS_RDLAT_DBG)
+#define G_T_SYS_RDLAT_DBG(x) (((x) >> S_T_SYS_RDLAT_DBG) & M_T_SYS_RDLAT_DBG)
+
+#define A_MC_UPCTL_DTURD2 0x4022c
+#define A_MC_UPCTL_DTURD3 0x40230
+#define A_MC_UPCTL_DTULFSRWD 0x40234
+#define A_MC_UPCTL_DTULFSRRD 0x40238
+#define A_MC_UPCTL_DTUEAF 0x4023c
+
+#define S_EA_ROW0    13
+#define M_EA_ROW0    0xffffU
+#define V_EA_ROW0(x) ((x) << S_EA_ROW0)
+#define G_EA_ROW0(x) (((x) >> S_EA_ROW0) & M_EA_ROW0)
+
+#define A_MC_UPCTL_DFITCTRLDELAY 0x40240
+
+#define S_TCTRL_DELAY    0
+#define M_TCTRL_DELAY    0xfU
+#define V_TCTRL_DELAY(x) ((x) << S_TCTRL_DELAY)
+#define G_TCTRL_DELAY(x) (((x) >> S_TCTRL_DELAY) & M_TCTRL_DELAY)
+
+#define A_MC_LMC_SMR0 0x40240
+
+#define S_SMR0_RFU0    13
+#define M_SMR0_RFU0    0x7U
+#define V_SMR0_RFU0(x) ((x) << S_SMR0_RFU0)
+#define G_SMR0_RFU0(x) (((x) >> S_SMR0_RFU0) & M_SMR0_RFU0)
+
+#define S_PPD    12
+#define V_PPD(x) ((x) << S_PPD)
+#define F_PPD    V_PPD(1U)
+
+#define S_WR_RTP    9
+#define M_WR_RTP    0x7U
+#define V_WR_RTP(x) ((x) << S_WR_RTP)
+#define G_WR_RTP(x) (((x) >> S_WR_RTP) & M_WR_RTP)
+
+#define S_SMR0_DLL    8
+#define V_SMR0_DLL(x) ((x) << S_SMR0_DLL)
+#define F_SMR0_DLL    V_SMR0_DLL(1U)
+
+#define S_TM    7
+#define V_TM(x) ((x) << S_TM)
+#define F_TM    V_TM(1U)
+
+#define S_CL31    4
+#define M_CL31    0x7U
+#define V_CL31(x) ((x) << S_CL31)
+#define G_CL31(x) (((x) >> S_CL31) & M_CL31)
+
+#define S_RBT    3
+#define V_RBT(x) ((x) << S_RBT)
+#define F_RBT    V_RBT(1U)
+
+#define S_CL0    2
+#define V_CL0(x) ((x) << S_CL0)
+#define F_CL0    V_CL0(1U)
+
+#define S_BL    0
+#define M_BL    0x3U
+#define V_BL(x) ((x) << S_BL)
+#define G_BL(x) (((x) >> S_BL) & M_BL)
+
+#define A_MC_UPCTL_DFIODTCFG 0x40244
+
+#define S_RANK3_ODT_WRITE_NSEL    26
+#define V_RANK3_ODT_WRITE_NSEL(x) ((x) << S_RANK3_ODT_WRITE_NSEL)
+#define F_RANK3_ODT_WRITE_NSEL    V_RANK3_ODT_WRITE_NSEL(1U)
+
+#define A_MC_LMC_SMR1 0x40244
+
+#define S_QOFF    12
+#define V_QOFF(x) ((x) << S_QOFF)
+#define F_QOFF    V_QOFF(1U)
+
+#define S_TDQS    11
+#define V_TDQS(x) ((x) << S_TDQS)
+#define F_TDQS    V_TDQS(1U)
+
+#define S_SMR1_RFU0    10
+#define V_SMR1_RFU0(x) ((x) << S_SMR1_RFU0)
+#define F_SMR1_RFU0    V_SMR1_RFU0(1U)
+
+#define S_RTT_NOM0    9
+#define V_RTT_NOM0(x) ((x) << S_RTT_NOM0)
+#define F_RTT_NOM0    V_RTT_NOM0(1U)
+
+#define S_SMR1_RFU1    8
+#define V_SMR1_RFU1(x) ((x) << S_SMR1_RFU1)
+#define F_SMR1_RFU1    V_SMR1_RFU1(1U)
+
+#define S_WR_LEVEL    7
+#define V_WR_LEVEL(x) ((x) << S_WR_LEVEL)
+#define F_WR_LEVEL    V_WR_LEVEL(1U)
+
+#define S_RTT_NOM1    6
+#define V_RTT_NOM1(x) ((x) << S_RTT_NOM1)
+#define F_RTT_NOM1    V_RTT_NOM1(1U)
+
+#define S_DIC0    5
+#define V_DIC0(x) ((x) << S_DIC0)
+#define F_DIC0    V_DIC0(1U)
+
+#define S_AL    3
+#define M_AL    0x3U
+#define V_AL(x) ((x) << S_AL)
+#define G_AL(x) (((x) >> S_AL) & M_AL)
+
+#define S_RTT_NOM2    2
+#define V_RTT_NOM2(x) ((x) << S_RTT_NOM2)
+#define F_RTT_NOM2    V_RTT_NOM2(1U)
+
+#define S_DIC1    1
+#define V_DIC1(x) ((x) << S_DIC1)
+#define F_DIC1    V_DIC1(1U)
+
+#define S_SMR1_DLL    0
+#define V_SMR1_DLL(x) ((x) << S_SMR1_DLL)
+#define F_SMR1_DLL    V_SMR1_DLL(1U)
+
+#define A_MC_UPCTL_DFIODTCFG1 0x40248
+
+#define S_ODT_LEN_B8_R    24
+#define M_ODT_LEN_B8_R    0x7U
+#define V_ODT_LEN_B8_R(x) ((x) << S_ODT_LEN_B8_R)
+#define G_ODT_LEN_B8_R(x) (((x) >> S_ODT_LEN_B8_R) & M_ODT_LEN_B8_R)
+
+#define S_ODT_LEN_BL8_W    16
+#define M_ODT_LEN_BL8_W    0x7U
+#define V_ODT_LEN_BL8_W(x) ((x) << S_ODT_LEN_BL8_W)
+#define G_ODT_LEN_BL8_W(x) (((x) >> S_ODT_LEN_BL8_W) & M_ODT_LEN_BL8_W)
+
+#define S_ODT_LAT_R    8
+#define M_ODT_LAT_R    0x1fU
+#define V_ODT_LAT_R(x) ((x) << S_ODT_LAT_R)
+#define G_ODT_LAT_R(x) (((x) >> S_ODT_LAT_R) & M_ODT_LAT_R)
+
+#define S_ODT_LAT_W    0
+#define M_ODT_LAT_W    0x1fU
+#define V_ODT_LAT_W(x) ((x) << S_ODT_LAT_W)
+#define G_ODT_LAT_W(x) (((x) >> S_ODT_LAT_W) & M_ODT_LAT_W)
+
+#define A_MC_LMC_SMR2 0x40248
+
+#define S_WR_CRC    12
+#define V_WR_CRC(x) ((x) << S_WR_CRC)
+#define F_WR_CRC    V_WR_CRC(1U)
+
+#define S_RD_CRC    11
+#define V_RD_CRC(x) ((x) << S_RD_CRC)
+#define F_RD_CRC    V_RD_CRC(1U)
+
+#define S_RTT_WR    9
+#define M_RTT_WR    0x3U
+#define V_RTT_WR(x) ((x) << S_RTT_WR)
+#define G_RTT_WR(x) (((x) >> S_RTT_WR) & M_RTT_WR)
+
+#define S_SMR2_RFU0    8
+#define V_SMR2_RFU0(x) ((x) << S_SMR2_RFU0)
+#define F_SMR2_RFU0    V_SMR2_RFU0(1U)
+
+#define S_SRT_ASR1    7
+#define V_SRT_ASR1(x) ((x) << S_SRT_ASR1)
+#define F_SRT_ASR1    V_SRT_ASR1(1U)
+
+#define S_ASR0    6
+#define V_ASR0(x) ((x) << S_ASR0)
+#define F_ASR0    V_ASR0(1U)
+
+#define S_CWL    3
+#define M_CWL    0x7U
+#define V_CWL(x) ((x) << S_CWL)
+#define G_CWL(x) (((x) >> S_CWL) & M_CWL)
+
+#define S_PASR    0
+#define M_PASR    0x7U
+#define V_PASR(x) ((x) << S_PASR)
+#define G_PASR(x) (((x) >> S_PASR) & M_PASR)
+
+#define A_MC_UPCTL_DFIODTRANKMAP 0x4024c
+
+#define S_ODT_RANK_MAP3    12
+#define M_ODT_RANK_MAP3    0xfU
+#define V_ODT_RANK_MAP3(x) ((x) << S_ODT_RANK_MAP3)
+#define G_ODT_RANK_MAP3(x) (((x) >> S_ODT_RANK_MAP3) & M_ODT_RANK_MAP3)
+
+#define S_ODT_RANK_MAP2    8
+#define M_ODT_RANK_MAP2    0xfU
+#define V_ODT_RANK_MAP2(x) ((x) << S_ODT_RANK_MAP2)
+#define G_ODT_RANK_MAP2(x) (((x) >> S_ODT_RANK_MAP2) & M_ODT_RANK_MAP2)
+
+#define S_ODT_RANK_MAP1    4
+#define M_ODT_RANK_MAP1    0xfU
+#define V_ODT_RANK_MAP1(x) ((x) << S_ODT_RANK_MAP1)
+#define G_ODT_RANK_MAP1(x) (((x) >> S_ODT_RANK_MAP1) & M_ODT_RANK_MAP1)
+
+#define S_ODT_RANK_MAP0    0
+#define M_ODT_RANK_MAP0    0xfU
+#define V_ODT_RANK_MAP0(x) ((x) << S_ODT_RANK_MAP0)
+#define G_ODT_RANK_MAP0(x) (((x) >> S_ODT_RANK_MAP0) & M_ODT_RANK_MAP0)
+
+#define A_MC_LMC_SMR3 0x4024c
+
+#define S_MPR_RD_FMT    11
+#define M_MPR_RD_FMT    0x3U
+#define V_MPR_RD_FMT(x) ((x) << S_MPR_RD_FMT)
+#define G_MPR_RD_FMT(x) (((x) >> S_MPR_RD_FMT) & M_MPR_RD_FMT)
+
+#define S_SMR3_RFU0    9
+#define M_SMR3_RFU0    0x3U
+#define V_SMR3_RFU0(x) ((x) << S_SMR3_RFU0)
+#define G_SMR3_RFU0(x) (((x) >> S_SMR3_RFU0) & M_SMR3_RFU0)
+
+#define S_FGR_MODE    6
+#define M_FGR_MODE    0x7U
+#define V_FGR_MODE(x) ((x) << S_FGR_MODE)
+#define G_FGR_MODE(x) (((x) >> S_FGR_MODE) & M_FGR_MODE)
+
+#define S_MRS_RDO    5
+#define V_MRS_RDO(x) ((x) << S_MRS_RDO)
+#define F_MRS_RDO    V_MRS_RDO(1U)
+
+#define S_DRAM_ADR    4
+#define V_DRAM_ADR(x) ((x) << S_DRAM_ADR)
+#define F_DRAM_ADR    V_DRAM_ADR(1U)
+
+#define S_GD_MODE    3
+#define V_GD_MODE(x) ((x) << S_GD_MODE)
+#define F_GD_MODE    V_GD_MODE(1U)
+
+#define S_MPR    2
+#define V_MPR(x) ((x) << S_MPR)
+#define F_MPR    V_MPR(1U)
+
+#define S_MPR_SEL    0
+#define M_MPR_SEL    0x3U
+#define V_MPR_SEL(x) ((x) << S_MPR_SEL)
+#define G_MPR_SEL(x) (((x) >> S_MPR_SEL) & M_MPR_SEL)
+
+#define A_MC_UPCTL_DFITPHYWRDATA 0x40250
+
+#define S_TPHY_WRDATA    0
+#define M_TPHY_WRDATA    0x1fU
+#define V_TPHY_WRDATA(x) ((x) << S_TPHY_WRDATA)
+#define G_TPHY_WRDATA(x) (((x) >> S_TPHY_WRDATA) & M_TPHY_WRDATA)
+
+#define A_MC_LMC_SMR4 0x40250
+
+#define S_WR_PRE    12
+#define V_WR_PRE(x) ((x) << S_WR_PRE)
+#define F_WR_PRE    V_WR_PRE(1U)
+
+#define S_RD_PRE    11
+#define V_RD_PRE(x) ((x) << S_RD_PRE)
+#define F_RD_PRE    V_RD_PRE(1U)
+
+#define S_RPT_MODE    10
+#define V_RPT_MODE(x) ((x) << S_RPT_MODE)
+#define F_RPT_MODE    V_RPT_MODE(1U)
+
+#define S_FESR_MODE    9
+#define V_FESR_MODE(x) ((x) << S_FESR_MODE)
+#define F_FESR_MODE    V_FESR_MODE(1U)
+
+#define S_CS_LAT_MODE    6
+#define M_CS_LAT_MODE    0x7U
+#define V_CS_LAT_MODE(x) ((x) << S_CS_LAT_MODE)
+#define G_CS_LAT_MODE(x) (((x) >> S_CS_LAT_MODE) & M_CS_LAT_MODE)
+
+#define S_ALERT_STAT    5
+#define V_ALERT_STAT(x) ((x) << S_ALERT_STAT)
+#define F_ALERT_STAT    V_ALERT_STAT(1U)
+
+#define S_IVM_MODE    4
+#define V_IVM_MODE(x) ((x) << S_IVM_MODE)
+#define F_IVM_MODE    V_IVM_MODE(1U)
+
+#define S_TCR_MODE    3
+#define V_TCR_MODE(x) ((x) << S_TCR_MODE)
+#define F_TCR_MODE    V_TCR_MODE(1U)
+
+#define S_TCR_RANGE    2
+#define V_TCR_RANGE(x) ((x) << S_TCR_RANGE)
+#define F_TCR_RANGE    V_TCR_RANGE(1U)
+
+#define S_MPD_MODE    1
+#define V_MPD_MODE(x) ((x) << S_MPD_MODE)
+#define F_MPD_MODE    V_MPD_MODE(1U)
+
+#define S_SMR4_RFU    0
+#define V_SMR4_RFU(x) ((x) << S_SMR4_RFU)
+#define F_SMR4_RFU    V_SMR4_RFU(1U)
+
+#define A_MC_UPCTL_DFITPHYWRLAT 0x40254
+
+#define S_TPHY_WRLAT    0
+#define M_TPHY_WRLAT    0x1fU
+#define V_TPHY_WRLAT(x) ((x) << S_TPHY_WRLAT)
+#define G_TPHY_WRLAT(x) (((x) >> S_TPHY_WRLAT) & M_TPHY_WRLAT)
+
+#define A_MC_LMC_SMR5 0x40254
+
+#define S_RD_DBI    11
+#define V_RD_DBI(x) ((x) << S_RD_DBI)
+#define F_RD_DBI    V_RD_DBI(1U)
+
+#define S_WR_DBI    10
+#define V_WR_DBI(x) ((x) << S_WR_DBI)
+#define F_WR_DBI    V_WR_DBI(1U)
+
+#define S_DM_MODE    9
+#define V_DM_MODE(x) ((x) << S_DM_MODE)
+#define F_DM_MODE    V_DM_MODE(1U)
+
+#define S_RTT_PARK    6
+#define M_RTT_PARK    0x7U
+#define V_RTT_PARK(x) ((x) << S_RTT_PARK)
+#define G_RTT_PARK(x) (((x) >> S_RTT_PARK) & M_RTT_PARK)
+
+#define S_SMR5_RFU    5
+#define V_SMR5_RFU(x) ((x) << S_SMR5_RFU)
+#define F_SMR5_RFU    V_SMR5_RFU(1U)
+
+#define S_PAR_ERR_STAT    4
+#define V_PAR_ERR_STAT(x) ((x) << S_PAR_ERR_STAT)
+#define F_PAR_ERR_STAT    V_PAR_ERR_STAT(1U)
+
+#define S_CRC_CLEAR    3
+#define V_CRC_CLEAR(x) ((x) << S_CRC_CLEAR)
+#define F_CRC_CLEAR    V_CRC_CLEAR(1U)
+
+#define S_PAR_LAT_MODE    0
+#define M_PAR_LAT_MODE    0x7U
+#define V_PAR_LAT_MODE(x) ((x) << S_PAR_LAT_MODE)
+#define G_PAR_LAT_MODE(x) (((x) >> S_PAR_LAT_MODE) & M_PAR_LAT_MODE)
+
+#define A_MC_LMC_SMR6 0x40258
+
+#define S_TCCD_L    10
+#define M_TCCD_L    0x7U
+#define V_TCCD_L(x) ((x) << S_TCCD_L)
+#define G_TCCD_L(x) (((x) >> S_TCCD_L) & M_TCCD_L)
+
+#define S_SRM6_RFU    7
+#define M_SRM6_RFU    0x7U
+#define V_SRM6_RFU(x) ((x) << S_SRM6_RFU)
+#define G_SRM6_RFU(x) (((x) >> S_SRM6_RFU) & M_SRM6_RFU)
+
+#define S_VREF_DQ_RANGE    6
+#define V_VREF_DQ_RANGE(x) ((x) << S_VREF_DQ_RANGE)
+#define F_VREF_DQ_RANGE    V_VREF_DQ_RANGE(1U)
+
+#define S_VREF_DQ_VALUE    0
+#define M_VREF_DQ_VALUE    0x3fU
+#define V_VREF_DQ_VALUE(x) ((x) << S_VREF_DQ_VALUE)
+#define G_VREF_DQ_VALUE(x) (((x) >> S_VREF_DQ_VALUE) & M_VREF_DQ_VALUE)
+
+#define A_MC_UPCTL_DFITRDDATAEN 0x40260
+
+#define S_TRDDATA_EN    0
+#define M_TRDDATA_EN    0x1fU
+#define V_TRDDATA_EN(x) ((x) << S_TRDDATA_EN)
+#define G_TRDDATA_EN(x) (((x) >> S_TRDDATA_EN) & M_TRDDATA_EN)
+
+#define A_MC_UPCTL_DFITPHYRDLAT 0x40264
+
+#define S_TPHY_RDLAT    0
+#define M_TPHY_RDLAT    0x3fU
+#define V_TPHY_RDLAT(x) ((x) << S_TPHY_RDLAT)
+#define G_TPHY_RDLAT(x) (((x) >> S_TPHY_RDLAT) & M_TPHY_RDLAT)
+
+#define A_MC_UPCTL_DFITPHYUPDTYPE0 0x40270
+
+#define S_TPHYUPD_TYPE0    0
+#define M_TPHYUPD_TYPE0    0xfffU
+#define V_TPHYUPD_TYPE0(x) ((x) << S_TPHYUPD_TYPE0)
+#define G_TPHYUPD_TYPE0(x) (((x) >> S_TPHYUPD_TYPE0) & M_TPHYUPD_TYPE0)
+
+#define A_MC_UPCTL_DFITPHYUPDTYPE1 0x40274
+
+#define S_TPHYUPD_TYPE1    0
+#define M_TPHYUPD_TYPE1    0xfffU
+#define V_TPHYUPD_TYPE1(x) ((x) << S_TPHYUPD_TYPE1)
+#define G_TPHYUPD_TYPE1(x) (((x) >> S_TPHYUPD_TYPE1) & M_TPHYUPD_TYPE1)
+
+#define A_MC_UPCTL_DFITPHYUPDTYPE2 0x40278
+
+#define S_TPHYUPD_TYPE2    0
+#define M_TPHYUPD_TYPE2    0xfffU
+#define V_TPHYUPD_TYPE2(x) ((x) << S_TPHYUPD_TYPE2)
+#define G_TPHYUPD_TYPE2(x) (((x) >> S_TPHYUPD_TYPE2) & M_TPHYUPD_TYPE2)
+
+#define A_MC_UPCTL_DFITPHYUPDTYPE3 0x4027c
+
+#define S_TPHYUPD_TYPE3    0
+#define M_TPHYUPD_TYPE3    0xfffU
+#define V_TPHYUPD_TYPE3(x) ((x) << S_TPHYUPD_TYPE3)
+#define G_TPHYUPD_TYPE3(x) (((x) >> S_TPHYUPD_TYPE3) & M_TPHYUPD_TYPE3)
+
+#define A_MC_UPCTL_DFITCTRLUPDMIN 0x40280
+
+#define S_TCTRLUPD_MIN    0
+#define M_TCTRLUPD_MIN    0xffffU
+#define V_TCTRLUPD_MIN(x) ((x) << S_TCTRLUPD_MIN)
+#define G_TCTRLUPD_MIN(x) (((x) >> S_TCTRLUPD_MIN) & M_TCTRLUPD_MIN)
+
+#define A_MC_LMC_ODTR0 0x40280
+
+#define S_RK0W    25
+#define V_RK0W(x) ((x) << S_RK0W)
+#define F_RK0W    V_RK0W(1U)
+
+#define S_RK0R    24
+#define V_RK0R(x) ((x) << S_RK0R)
+#define F_RK0R    V_RK0R(1U)
+
+#define A_MC_UPCTL_DFITCTRLUPDMAX 0x40284
+
+#define S_TCTRLUPD_MAX    0
+#define M_TCTRLUPD_MAX    0xffffU
+#define V_TCTRLUPD_MAX(x) ((x) << S_TCTRLUPD_MAX)
+#define G_TCTRLUPD_MAX(x) (((x) >> S_TCTRLUPD_MAX) & M_TCTRLUPD_MAX)
+
+#define A_MC_UPCTL_DFITCTRLUPDDLY 0x40288
+
+#define S_TCTRLUPD_DLY    0
+#define M_TCTRLUPD_DLY    0xfU
+#define V_TCTRLUPD_DLY(x) ((x) << S_TCTRLUPD_DLY)
+#define G_TCTRLUPD_DLY(x) (((x) >> S_TCTRLUPD_DLY) & M_TCTRLUPD_DLY)
+
+#define A_MC_UPCTL_DFIUPDCFG 0x40290
+
+#define S_DFI_PHYUPD_EN    1
+#define V_DFI_PHYUPD_EN(x) ((x) << S_DFI_PHYUPD_EN)
+#define F_DFI_PHYUPD_EN    V_DFI_PHYUPD_EN(1U)
+
+#define S_DFI_CTRLUPD_EN    0
+#define V_DFI_CTRLUPD_EN(x) ((x) << S_DFI_CTRLUPD_EN)
+#define F_DFI_CTRLUPD_EN    V_DFI_CTRLUPD_EN(1U)
+
+#define A_MC_UPCTL_DFITREFMSKI 0x40294
+
+#define S_TREFMSKI    0
+#define M_TREFMSKI    0xffU
+#define V_TREFMSKI(x) ((x) << S_TREFMSKI)
+#define G_TREFMSKI(x) (((x) >> S_TREFMSKI) & M_TREFMSKI)
+
+#define A_MC_UPCTL_DFITCTRLUPDI 0x40298
+#define A_MC_UPCTL_DFITRCFG0 0x402ac
+
+#define S_DFI_WRLVL_RANK_SEL    16
+#define M_DFI_WRLVL_RANK_SEL    0xfU
+#define V_DFI_WRLVL_RANK_SEL(x) ((x) << S_DFI_WRLVL_RANK_SEL)
+#define G_DFI_WRLVL_RANK_SEL(x) (((x) >> S_DFI_WRLVL_RANK_SEL) & M_DFI_WRLVL_RANK_SEL)
+
+#define S_DFI_RDLVL_EDGE    4
+#define M_DFI_RDLVL_EDGE    0x1ffU
+#define V_DFI_RDLVL_EDGE(x) ((x) << S_DFI_RDLVL_EDGE)
+#define G_DFI_RDLVL_EDGE(x) (((x) >> S_DFI_RDLVL_EDGE) & M_DFI_RDLVL_EDGE)
+
+#define S_DFI_RDLVL_RANK_SEL    0
+#define M_DFI_RDLVL_RANK_SEL    0xfU
+#define V_DFI_RDLVL_RANK_SEL(x) ((x) << S_DFI_RDLVL_RANK_SEL)
+#define G_DFI_RDLVL_RANK_SEL(x) (((x) >> S_DFI_RDLVL_RANK_SEL) & M_DFI_RDLVL_RANK_SEL)
+
+#define A_MC_UPCTL_DFITRSTAT0 0x402b0
+
+#define S_DFI_WRLVL_MODE    16
+#define M_DFI_WRLVL_MODE    0x3U
+#define V_DFI_WRLVL_MODE(x) ((x) << S_DFI_WRLVL_MODE)
+#define G_DFI_WRLVL_MODE(x) (((x) >> S_DFI_WRLVL_MODE) & M_DFI_WRLVL_MODE)
+
+#define S_DFI_RDLVL_GATE_MODE    8
+#define M_DFI_RDLVL_GATE_MODE    0x3U
+#define V_DFI_RDLVL_GATE_MODE(x) ((x) << S_DFI_RDLVL_GATE_MODE)
+#define G_DFI_RDLVL_GATE_MODE(x) (((x) >> S_DFI_RDLVL_GATE_MODE) & M_DFI_RDLVL_GATE_MODE)
+
+#define S_DFI_RDLVL_MODE    0
+#define M_DFI_RDLVL_MODE    0x3U
+#define V_DFI_RDLVL_MODE(x) ((x) << S_DFI_RDLVL_MODE)
+#define G_DFI_RDLVL_MODE(x) (((x) >> S_DFI_RDLVL_MODE) & M_DFI_RDLVL_MODE)
+
+#define A_MC_UPCTL_DFITRWRLVLEN 0x402b4
+
+#define S_DFI_WRLVL_EN    0
+#define M_DFI_WRLVL_EN    0x1ffU
+#define V_DFI_WRLVL_EN(x) ((x) << S_DFI_WRLVL_EN)
+#define G_DFI_WRLVL_EN(x) (((x) >> S_DFI_WRLVL_EN) & M_DFI_WRLVL_EN)
+
+#define A_MC_UPCTL_DFITRRDLVLEN 0x402b8
+
+#define S_DFI_RDLVL_EN    0
+#define M_DFI_RDLVL_EN    0x1ffU
+#define V_DFI_RDLVL_EN(x) ((x) << S_DFI_RDLVL_EN)
+#define G_DFI_RDLVL_EN(x) (((x) >> S_DFI_RDLVL_EN) & M_DFI_RDLVL_EN)
+
+#define A_MC_UPCTL_DFITRRDLVLGATEEN 0x402bc
+
+#define S_DFI_RDLVL_GATE_EN    0
+#define M_DFI_RDLVL_GATE_EN    0x1ffU
+#define V_DFI_RDLVL_GATE_EN(x) ((x) << S_DFI_RDLVL_GATE_EN)
+#define G_DFI_RDLVL_GATE_EN(x) (((x) >> S_DFI_RDLVL_GATE_EN) & M_DFI_RDLVL_GATE_EN)
+
+#define A_MC_UPCTL_DFISTSTAT0 0x402c0
+
+#define S_DFI_DATA_BYTE_DISABLE    16
+#define M_DFI_DATA_BYTE_DISABLE    0x1ffU
+#define V_DFI_DATA_BYTE_DISABLE(x) ((x) << S_DFI_DATA_BYTE_DISABLE)
+#define G_DFI_DATA_BYTE_DISABLE(x) (((x) >> S_DFI_DATA_BYTE_DISABLE) & M_DFI_DATA_BYTE_DISABLE)
+
+#define S_DFI_FREQ_RATIO    4
+#define M_DFI_FREQ_RATIO    0x3U
+#define V_DFI_FREQ_RATIO(x) ((x) << S_DFI_FREQ_RATIO)
+#define G_DFI_FREQ_RATIO(x) (((x) >> S_DFI_FREQ_RATIO) & M_DFI_FREQ_RATIO)
+
+#define S_DFI_INIT_START0    1
+#define V_DFI_INIT_START0(x) ((x) << S_DFI_INIT_START0)
+#define F_DFI_INIT_START0    V_DFI_INIT_START0(1U)
+
+#define S_DFI_INIT_COMPLETE    0
+#define V_DFI_INIT_COMPLETE(x) ((x) << S_DFI_INIT_COMPLETE)
+#define F_DFI_INIT_COMPLETE    V_DFI_INIT_COMPLETE(1U)
+
+#define A_MC_UPCTL_DFISTCFG0 0x402c4
+
+#define S_DFI_DATA_BYTE_DISABLE_EN    2
+#define V_DFI_DATA_BYTE_DISABLE_EN(x) ((x) << S_DFI_DATA_BYTE_DISABLE_EN)
+#define F_DFI_DATA_BYTE_DISABLE_EN    V_DFI_DATA_BYTE_DISABLE_EN(1U)
+
+#define S_DFI_FREQ_RATIO_EN    1
+#define V_DFI_FREQ_RATIO_EN(x) ((x) << S_DFI_FREQ_RATIO_EN)
+#define F_DFI_FREQ_RATIO_EN    V_DFI_FREQ_RATIO_EN(1U)
+
+#define S_DFI_INIT_START    0
+#define V_DFI_INIT_START(x) ((x) << S_DFI_INIT_START)
+#define F_DFI_INIT_START    V_DFI_INIT_START(1U)
+
+#define A_MC_UPCTL_DFISTCFG1 0x402c8
+
+#define S_DFI_DRAM_CLK_DISABLE_EN_DPD    1
+#define V_DFI_DRAM_CLK_DISABLE_EN_DPD(x) ((x) << S_DFI_DRAM_CLK_DISABLE_EN_DPD)
+#define F_DFI_DRAM_CLK_DISABLE_EN_DPD    V_DFI_DRAM_CLK_DISABLE_EN_DPD(1U)
+
+#define S_DFI_DRAM_CLK_DISABLE_EN    0
+#define V_DFI_DRAM_CLK_DISABLE_EN(x) ((x) << S_DFI_DRAM_CLK_DISABLE_EN)
+#define F_DFI_DRAM_CLK_DISABLE_EN    V_DFI_DRAM_CLK_DISABLE_EN(1U)
+
+#define A_MC_UPCTL_DFITDRAMCLKEN 0x402d0
+
+#define S_TDRAM_CLK_ENABLE    0
+#define M_TDRAM_CLK_ENABLE    0xfU
+#define V_TDRAM_CLK_ENABLE(x) ((x) << S_TDRAM_CLK_ENABLE)
+#define G_TDRAM_CLK_ENABLE(x) (((x) >> S_TDRAM_CLK_ENABLE) & M_TDRAM_CLK_ENABLE)
+
+#define A_MC_UPCTL_DFITDRAMCLKDIS 0x402d4
+
+#define S_TDRAM_CLK_DISABLE    0
+#define M_TDRAM_CLK_DISABLE    0xfU
+#define V_TDRAM_CLK_DISABLE(x) ((x) << S_TDRAM_CLK_DISABLE)
+#define G_TDRAM_CLK_DISABLE(x) (((x) >> S_TDRAM_CLK_DISABLE) & M_TDRAM_CLK_DISABLE)
+
+#define A_MC_UPCTL_DFISTCFG2 0x402d8
+
+#define S_PARITY_EN    1
+#define V_PARITY_EN(x) ((x) << S_PARITY_EN)
+#define F_PARITY_EN    V_PARITY_EN(1U)
+
+#define S_PARITY_INTR_EN    0
+#define V_PARITY_INTR_EN(x) ((x) << S_PARITY_INTR_EN)
+#define F_PARITY_INTR_EN    V_PARITY_INTR_EN(1U)
+
+#define A_MC_UPCTL_DFISTPARCLR 0x402dc
+
+#define S_PARITY_LOG_CLR    1
+#define V_PARITY_LOG_CLR(x) ((x) << S_PARITY_LOG_CLR)
+#define F_PARITY_LOG_CLR    V_PARITY_LOG_CLR(1U)
+
+#define S_PARITY_INTR_CLR    0
+#define V_PARITY_INTR_CLR(x) ((x) << S_PARITY_INTR_CLR)
+#define F_PARITY_INTR_CLR    V_PARITY_INTR_CLR(1U)
+
+#define A_MC_UPCTL_DFISTPARLOG 0x402e0
+#define A_MC_UPCTL_DFILPCFG0 0x402f0
+
+#define S_DFI_LP_WAKEUP_DPD    28
+#define M_DFI_LP_WAKEUP_DPD    0xfU
+#define V_DFI_LP_WAKEUP_DPD(x) ((x) << S_DFI_LP_WAKEUP_DPD)
+#define G_DFI_LP_WAKEUP_DPD(x) (((x) >> S_DFI_LP_WAKEUP_DPD) & M_DFI_LP_WAKEUP_DPD)
+
+#define S_DFI_LP_EN_DPD    24
+#define V_DFI_LP_EN_DPD(x) ((x) << S_DFI_LP_EN_DPD)
+#define F_DFI_LP_EN_DPD    V_DFI_LP_EN_DPD(1U)
+
+#define S_DFI_TLP_RESP    16
+#define M_DFI_TLP_RESP    0xfU
+#define V_DFI_TLP_RESP(x) ((x) << S_DFI_TLP_RESP)
+#define G_DFI_TLP_RESP(x) (((x) >> S_DFI_TLP_RESP) & M_DFI_TLP_RESP)
+
+#define S_DFI_LP_EN_SR    8
+#define V_DFI_LP_EN_SR(x) ((x) << S_DFI_LP_EN_SR)
+#define F_DFI_LP_EN_SR    V_DFI_LP_EN_SR(1U)
+
+#define S_DFI_LP_WAKEUP_PD    4
+#define M_DFI_LP_WAKEUP_PD    0xfU
+#define V_DFI_LP_WAKEUP_PD(x) ((x) << S_DFI_LP_WAKEUP_PD)
+#define G_DFI_LP_WAKEUP_PD(x) (((x) >> S_DFI_LP_WAKEUP_PD) & M_DFI_LP_WAKEUP_PD)
+
+#define S_DFI_LP_EN_PD    0
+#define V_DFI_LP_EN_PD(x) ((x) << S_DFI_LP_EN_PD)
+#define F_DFI_LP_EN_PD    V_DFI_LP_EN_PD(1U)
+
+#define A_MC_UPCTL_DFITRWRLVLRESP0 0x40300
+#define A_MC_UPCTL_DFITRWRLVLRESP1 0x40304
+#define A_MC_LMC_CALSTAT 0x40304
+
+#define S_PHYUPD_ERR    28
+#define M_PHYUPD_ERR    0xfU
+#define V_PHYUPD_ERR(x) ((x) << S_PHYUPD_ERR)
+#define G_PHYUPD_ERR(x) (((x) >> S_PHYUPD_ERR) & M_PHYUPD_ERR)
+
+#define S_PHYUPD_BUSY    27
+#define V_PHYUPD_BUSY(x) ((x) << S_PHYUPD_BUSY)
+#define F_PHYUPD_BUSY    V_PHYUPD_BUSY(1U)
+
+#define A_MC_UPCTL_DFITRWRLVLRESP2 0x40308
+
+#define S_DFI_WRLVL_RESP2    0
+#define M_DFI_WRLVL_RESP2    0xffU
+#define V_DFI_WRLVL_RESP2(x) ((x) << S_DFI_WRLVL_RESP2)
+#define G_DFI_WRLVL_RESP2(x) (((x) >> S_DFI_WRLVL_RESP2) & M_DFI_WRLVL_RESP2)
+
+#define A_MC_UPCTL_DFITRRDLVLRESP0 0x4030c
+#define A_MC_UPCTL_DFITRRDLVLRESP1 0x40310
+#define A_MC_UPCTL_DFITRRDLVLRESP2 0x40314
+
+#define S_DFI_RDLVL_RESP2    0
+#define M_DFI_RDLVL_RESP2    0xffU
+#define V_DFI_RDLVL_RESP2(x) ((x) << S_DFI_RDLVL_RESP2)
+#define G_DFI_RDLVL_RESP2(x) (((x) >> S_DFI_RDLVL_RESP2) & M_DFI_RDLVL_RESP2)
+
+#define A_MC_UPCTL_DFITRWRLVLDELAY0 0x40318
+#define A_MC_UPCTL_DFITRWRLVLDELAY1 0x4031c
+#define A_MC_UPCTL_DFITRWRLVLDELAY2 0x40320
+
+#define S_DFI_WRLVL_DELAY2    0
+#define M_DFI_WRLVL_DELAY2    0xffU
+#define V_DFI_WRLVL_DELAY2(x) ((x) << S_DFI_WRLVL_DELAY2)
+#define G_DFI_WRLVL_DELAY2(x) (((x) >> S_DFI_WRLVL_DELAY2) & M_DFI_WRLVL_DELAY2)
+
+#define A_MC_UPCTL_DFITRRDLVLDELAY0 0x40324
+#define A_MC_UPCTL_DFITRRDLVLDELAY1 0x40328
+#define A_MC_UPCTL_DFITRRDLVLDELAY2 0x4032c
+
+#define S_DFI_RDLVL_DELAY2    0
+#define M_DFI_RDLVL_DELAY2    0xffU
+#define V_DFI_RDLVL_DELAY2(x) ((x) << S_DFI_RDLVL_DELAY2)
+#define G_DFI_RDLVL_DELAY2(x) (((x) >> S_DFI_RDLVL_DELAY2) & M_DFI_RDLVL_DELAY2)
+
+#define A_MC_UPCTL_DFITRRDLVLGATEDELAY0 0x40330
+#define A_MC_LMC_T_PHYUPD0 0x40330
+#define A_MC_UPCTL_DFITRRDLVLGATEDELAY1 0x40334
+#define A_MC_LMC_T_PHYUPD1 0x40334
+#define A_MC_UPCTL_DFITRRDLVLGATEDELAY2 0x40338
+
+#define S_DFI_RDLVL_GATE_DELAY2    0
+#define M_DFI_RDLVL_GATE_DELAY2    0xffU
+#define V_DFI_RDLVL_GATE_DELAY2(x) ((x) << S_DFI_RDLVL_GATE_DELAY2)
+#define G_DFI_RDLVL_GATE_DELAY2(x) (((x) >> S_DFI_RDLVL_GATE_DELAY2) & M_DFI_RDLVL_GATE_DELAY2)
+
+#define A_MC_LMC_T_PHYUPD2 0x40338
+#define A_MC_UPCTL_DFITRCMD 0x4033c
+
+#define S_DFITRCMD_START    31
+#define V_DFITRCMD_START(x) ((x) << S_DFITRCMD_START)
+#define F_DFITRCMD_START    V_DFITRCMD_START(1U)
+
+#define S_DFITRCMD_EN    4
+#define M_DFITRCMD_EN    0x1ffU
+#define V_DFITRCMD_EN(x) ((x) << S_DFITRCMD_EN)
+#define G_DFITRCMD_EN(x) (((x) >> S_DFITRCMD_EN) & M_DFITRCMD_EN)
+
+#define S_DFITRCMD_OPCODE    0
+#define M_DFITRCMD_OPCODE    0x3U
+#define V_DFITRCMD_OPCODE(x) ((x) << S_DFITRCMD_OPCODE)
+#define G_DFITRCMD_OPCODE(x) (((x) >> S_DFITRCMD_OPCODE) & M_DFITRCMD_OPCODE)
+
+#define A_MC_LMC_T_PHYUPD3 0x4033c
+#define A_MC_UPCTL_IPVR 0x403f8
+#define A_MC_UPCTL_IPTR 0x403fc
+#define A_MC_P_DDRPHY_RST_CTRL 0x41300
+
+#define S_PHY_DRAM_WL    17
+#define M_PHY_DRAM_WL    0x1fU
+#define V_PHY_DRAM_WL(x) ((x) << S_PHY_DRAM_WL)
+#define G_PHY_DRAM_WL(x) (((x) >> S_PHY_DRAM_WL) & M_PHY_DRAM_WL)
+
+#define S_PHY_CALIB_DONE    5
+#define V_PHY_CALIB_DONE(x) ((x) << S_PHY_CALIB_DONE)
+#define F_PHY_CALIB_DONE    V_PHY_CALIB_DONE(1U)
+
+#define S_CTL_CAL_REQ    4
+#define V_CTL_CAL_REQ(x) ((x) << S_CTL_CAL_REQ)
+#define F_CTL_CAL_REQ    V_CTL_CAL_REQ(1U)
+
+#define S_CTL_CKE    3
+#define V_CTL_CKE(x) ((x) << S_CTL_CKE)
+#define F_CTL_CKE    V_CTL_CKE(1U)
+
+#define S_CTL_RST_N    2
+#define V_CTL_RST_N(x) ((x) << S_CTL_RST_N)
+#define F_CTL_RST_N    V_CTL_RST_N(1U)
+
+#define S_PHY_CAL_REQ    21
+#define V_PHY_CAL_REQ(x) ((x) << S_PHY_CAL_REQ)
+#define F_PHY_CAL_REQ    V_PHY_CAL_REQ(1U)
+
+#define S_T6_PHY_DRAM_WL    17
+#define M_T6_PHY_DRAM_WL    0xfU
+#define V_T6_PHY_DRAM_WL(x) ((x) << S_T6_PHY_DRAM_WL)
+#define G_T6_PHY_DRAM_WL(x) (((x) >> S_T6_PHY_DRAM_WL) & M_T6_PHY_DRAM_WL)
+
+#define A_MC_P_PERFORMANCE_CTRL 0x41304
+
+#define S_BUF_USE_TH    12
+#define M_BUF_USE_TH    0x7U
+#define V_BUF_USE_TH(x) ((x) << S_BUF_USE_TH)
+#define G_BUF_USE_TH(x) (((x) >> S_BUF_USE_TH) & M_BUF_USE_TH)
+
+#define S_MC_IDLE_TH    8
+#define M_MC_IDLE_TH    0xfU
+#define V_MC_IDLE_TH(x) ((x) << S_MC_IDLE_TH)
+#define G_MC_IDLE_TH(x) (((x) >> S_MC_IDLE_TH) & M_MC_IDLE_TH)
+
+#define S_RMW_DEFER_EN    7
+#define V_RMW_DEFER_EN(x) ((x) << S_RMW_DEFER_EN)
+#define F_RMW_DEFER_EN    V_RMW_DEFER_EN(1U)
+
+#define S_DDR3_BRBC_MODE    6
+#define V_DDR3_BRBC_MODE(x) ((x) << S_DDR3_BRBC_MODE)
+#define F_DDR3_BRBC_MODE    V_DDR3_BRBC_MODE(1U)
+
+#define S_RMW_DWRITE_EN    5
+#define V_RMW_DWRITE_EN(x) ((x) << S_RMW_DWRITE_EN)
+#define F_RMW_DWRITE_EN    V_RMW_DWRITE_EN(1U)
+
+#define S_RMW_MERGE_EN    4
+#define V_RMW_MERGE_EN(x) ((x) << S_RMW_MERGE_EN)
+#define F_RMW_MERGE_EN    V_RMW_MERGE_EN(1U)
+
+#define S_SYNC_PAB_EN    3
+#define V_SYNC_PAB_EN(x) ((x) << S_SYNC_PAB_EN)
+#define F_SYNC_PAB_EN    V_SYNC_PAB_EN(1U)
+
+#define A_MC_P_ECC_CTRL 0x41308
+#define A_MC_P_PAR_ENABLE 0x4130c
+#define A_MC_P_PAR_CAUSE 0x41310
+#define A_MC_P_INT_ENABLE 0x41314
+#define A_MC_P_INT_CAUSE 0x41318
+#define A_MC_P_ECC_STATUS 0x4131c
+#define A_MC_P_PHY_CTRL 0x41320
+#define A_MC_P_STATIC_CFG_STATUS 0x41324
+
+#define S_STATIC_AWEN    23
+#define V_STATIC_AWEN(x) ((x) << S_STATIC_AWEN)
+#define F_STATIC_AWEN    V_STATIC_AWEN(1U)
+
+#define S_STATIC_SWLAT    18
+#define M_STATIC_SWLAT    0x1fU
+#define V_STATIC_SWLAT(x) ((x) << S_STATIC_SWLAT)
+#define G_STATIC_SWLAT(x) (((x) >> S_STATIC_SWLAT) & M_STATIC_SWLAT)
+
+#define S_STATIC_WLAT    17
+#define V_STATIC_WLAT(x) ((x) << S_STATIC_WLAT)
+#define F_STATIC_WLAT    V_STATIC_WLAT(1U)
+
+#define S_STATIC_ALIGN    16
+#define V_STATIC_ALIGN(x) ((x) << S_STATIC_ALIGN)
+#define F_STATIC_ALIGN    V_STATIC_ALIGN(1U)
+
+#define S_STATIC_SLAT    11
+#define M_STATIC_SLAT    0x1fU
+#define V_STATIC_SLAT(x) ((x) << S_STATIC_SLAT)
+#define G_STATIC_SLAT(x) (((x) >> S_STATIC_SLAT) & M_STATIC_SLAT)
+
+#define S_STATIC_LAT    10
+#define V_STATIC_LAT(x) ((x) << S_STATIC_LAT)
+#define F_STATIC_LAT    V_STATIC_LAT(1U)
+
+#define S_STATIC_PP64    26
+#define V_STATIC_PP64(x) ((x) << S_STATIC_PP64)
+#define F_STATIC_PP64    V_STATIC_PP64(1U)
+
+#define S_STATIC_PPEN    25
+#define V_STATIC_PPEN(x) ((x) << S_STATIC_PPEN)
+#define F_STATIC_PPEN    V_STATIC_PPEN(1U)
+
+#define S_STATIC_OOOEN    24
+#define V_STATIC_OOOEN(x) ((x) << S_STATIC_OOOEN)
+#define F_STATIC_OOOEN    V_STATIC_OOOEN(1U)
+
+#define A_MC_P_CORE_PCTL_STAT 0x41328
+#define A_MC_P_DEBUG_CNT 0x4132c
+#define A_MC_CE_ERR_DATA_RDATA 0x41330
+#define A_MC_CE_COR_DATA_RDATA 0x41350
+#define A_MC_UE_ERR_DATA_RDATA 0x41370
+#define A_MC_UE_COR_DATA_RDATA 0x41390
+#define A_MC_CE_ADDR 0x413b0
+#define A_MC_UE_ADDR 0x413b4
+#define A_MC_P_DEEP_SLEEP 0x413b8
+
+#define S_SLEEPSTATUS    1
+#define V_SLEEPSTATUS(x) ((x) << S_SLEEPSTATUS)
+#define F_SLEEPSTATUS    V_SLEEPSTATUS(1U)
+
+#define S_SLEEPREQ    0
+#define V_SLEEPREQ(x) ((x) << S_SLEEPREQ)
+#define F_SLEEPREQ    V_SLEEPREQ(1U)
+
+#define A_MC_P_FPGA_BONUS 0x413bc
+#define A_MC_P_DEBUG_CFG 0x413c0
+#define A_MC_P_DEBUG_RPT 0x413c4
+#define A_MC_P_PHY_ADR_CK_EN 0x413c8
+
+#define S_ADR_CK_EN    0
+#define V_ADR_CK_EN(x) ((x) << S_ADR_CK_EN)
+#define F_ADR_CK_EN    V_ADR_CK_EN(1U)
+
+#define A_MC_CE_ERR_ECC_DATA0 0x413d0
+#define A_MC_CE_ERR_ECC_DATA1 0x413d4
+#define A_MC_UE_ERR_ECC_DATA0 0x413d8
+#define A_MC_UE_ERR_ECC_DATA1 0x413dc
+#define A_MC_P_RMW_PRIO 0x413f0
+
+#define S_WR_HI_TH    24
+#define M_WR_HI_TH    0xffU
+#define V_WR_HI_TH(x) ((x) << S_WR_HI_TH)
+#define G_WR_HI_TH(x) (((x) >> S_WR_HI_TH) & M_WR_HI_TH)
+
+#define S_WR_MID_TH    16
+#define M_WR_MID_TH    0xffU
+#define V_WR_MID_TH(x) ((x) << S_WR_MID_TH)
+#define G_WR_MID_TH(x) (((x) >> S_WR_MID_TH) & M_WR_MID_TH)
+
+#define S_RD_HI_TH    8
+#define M_RD_HI_TH    0xffU
+#define V_RD_HI_TH(x) ((x) << S_RD_HI_TH)
+#define G_RD_HI_TH(x) (((x) >> S_RD_HI_TH) & M_RD_HI_TH)
+
+#define S_RD_MID_TH    0
+#define M_RD_MID_TH    0xffU
+#define V_RD_MID_TH(x) ((x) << S_RD_MID_TH)
+#define G_RD_MID_TH(x) (((x) >> S_RD_MID_TH) & M_RD_MID_TH)
+
+#define A_MC_P_BIST_CMD 0x41400
+
+#define S_BURST_LEN    16
+#define M_BURST_LEN    0x3U
+#define V_BURST_LEN(x) ((x) << S_BURST_LEN)
+#define G_BURST_LEN(x) (((x) >> S_BURST_LEN) & M_BURST_LEN)
+
+#define A_MC_P_BIST_CMD_ADDR 0x41404
+#define A_MC_P_BIST_CMD_LEN 0x41408
+#define A_MC_P_BIST_DATA_PATTERN 0x4140c
+#define A_MC_P_BIST_USER_WDATA0 0x41414
+#define A_MC_P_BIST_USER_WMASK0 0x41414
+#define A_MC_P_BIST_USER_WDATA1 0x41418
+#define A_MC_P_BIST_USER_WMASK1 0x41418
+#define A_MC_P_BIST_USER_WDATA2 0x4141c
+
+#define S_USER_DATA_MASK    8
+#define M_USER_DATA_MASK    0x1ffU
+#define V_USER_DATA_MASK(x) ((x) << S_USER_DATA_MASK)
+#define G_USER_DATA_MASK(x) (((x) >> S_USER_DATA_MASK) & M_USER_DATA_MASK)
+
+#define A_MC_P_BIST_USER_WMASK2 0x4141c
+
+#define S_MASK_128_1    9
+#define V_MASK_128_1(x) ((x) << S_MASK_128_1)
+#define F_MASK_128_1    V_MASK_128_1(1U)
+
+#define S_MASK_128_0    8
+#define V_MASK_128_0(x) ((x) << S_MASK_128_0)
+#define F_MASK_128_0    V_MASK_128_0(1U)
+
+#define S_USER_MASK_ECC    0
+#define M_USER_MASK_ECC    0xffU
+#define V_USER_MASK_ECC(x) ((x) << S_USER_MASK_ECC)
+#define G_USER_MASK_ECC(x) (((x) >> S_USER_MASK_ECC) & M_USER_MASK_ECC)
+
+#define A_MC_P_BIST_NUM_ERR 0x41480
+#define A_MC_P_BIST_ERR_FIRST_ADDR 0x41484
+#define A_MC_P_BIST_STATUS_RDATA 0x41488
+#define A_MC_P_BIST_CRC_SEED 0x414d0
+#define A_MC_DDRPHY_DP18_DATA_BIT_ENABLE0 0x44000
+
+#define S_DATA_BIT_ENABLE_0_15    0
+#define M_DATA_BIT_ENABLE_0_15    0xffffU
+#define V_DATA_BIT_ENABLE_0_15(x) ((x) << S_DATA_BIT_ENABLE_0_15)
+#define G_DATA_BIT_ENABLE_0_15(x) (((x) >> S_DATA_BIT_ENABLE_0_15) & M_DATA_BIT_ENABLE_0_15)
+
+#define A_MC_DDRPHY_DP18_DATA_BIT_ENABLE1 0x44004
+
+#define S_DATA_BIT_ENABLE_16_23    8
+#define M_DATA_BIT_ENABLE_16_23    0xffU
+#define V_DATA_BIT_ENABLE_16_23(x) ((x) << S_DATA_BIT_ENABLE_16_23)
+#define G_DATA_BIT_ENABLE_16_23(x) (((x) >> S_DATA_BIT_ENABLE_16_23) & M_DATA_BIT_ENABLE_16_23)
+
+#define S_DFT_FORCE_OUTPUTS    7
+#define V_DFT_FORCE_OUTPUTS(x) ((x) << S_DFT_FORCE_OUTPUTS)
+#define F_DFT_FORCE_OUTPUTS    V_DFT_FORCE_OUTPUTS(1U)
+
+#define S_DFT_PRBS7_GEN_EN    6
+#define V_DFT_PRBS7_GEN_EN(x) ((x) << S_DFT_PRBS7_GEN_EN)
+#define F_DFT_PRBS7_GEN_EN    V_DFT_PRBS7_GEN_EN(1U)
+
+#define S_WRAPSEL    5
+#define V_WRAPSEL(x) ((x) << S_WRAPSEL)
+#define F_WRAPSEL    V_WRAPSEL(1U)
+
+#define S_MRS_CMD_DATA_N0    3
+#define V_MRS_CMD_DATA_N0(x) ((x) << S_MRS_CMD_DATA_N0)
+#define F_MRS_CMD_DATA_N0    V_MRS_CMD_DATA_N0(1U)
+
+#define S_MRS_CMD_DATA_N1    2
+#define V_MRS_CMD_DATA_N1(x) ((x) << S_MRS_CMD_DATA_N1)
+#define F_MRS_CMD_DATA_N1    V_MRS_CMD_DATA_N1(1U)
+
+#define S_MRS_CMD_DATA_N2    1
+#define V_MRS_CMD_DATA_N2(x) ((x) << S_MRS_CMD_DATA_N2)
+#define F_MRS_CMD_DATA_N2    V_MRS_CMD_DATA_N2(1U)
+
+#define S_MRS_CMD_DATA_N3    0
+#define V_MRS_CMD_DATA_N3(x) ((x) << S_MRS_CMD_DATA_N3)
+#define F_MRS_CMD_DATA_N3    V_MRS_CMD_DATA_N3(1U)
+
+#define S_DP18_WRAPSEL    5
+#define V_DP18_WRAPSEL(x) ((x) << S_DP18_WRAPSEL)
+#define F_DP18_WRAPSEL    V_DP18_WRAPSEL(1U)
+
+#define S_HW_VALUE    4
+#define V_HW_VALUE(x) ((x) << S_HW_VALUE)
+#define F_HW_VALUE    V_HW_VALUE(1U)
+
+#define A_MC_DDRPHY_DP18_DATA_BIT_DIR0 0x44008
+
+#define S_DATA_BIT_DIR_0_15    0
+#define M_DATA_BIT_DIR_0_15    0xffffU
+#define V_DATA_BIT_DIR_0_15(x) ((x) << S_DATA_BIT_DIR_0_15)
+#define G_DATA_BIT_DIR_0_15(x) (((x) >> S_DATA_BIT_DIR_0_15) & M_DATA_BIT_DIR_0_15)
+
+#define A_MC_DDRPHY_DP18_DATA_BIT_DIR1 0x4400c
+
+#define S_DATA_BIT_DIR_16_23    8
+#define M_DATA_BIT_DIR_16_23    0xffU
+#define V_DATA_BIT_DIR_16_23(x) ((x) << S_DATA_BIT_DIR_16_23)
+#define G_DATA_BIT_DIR_16_23(x) (((x) >> S_DATA_BIT_DIR_16_23) & M_DATA_BIT_DIR_16_23)
+
+#define S_WL_ADVANCE_DISABLE    7
+#define V_WL_ADVANCE_DISABLE(x) ((x) << S_WL_ADVANCE_DISABLE)
+#define F_WL_ADVANCE_DISABLE    V_WL_ADVANCE_DISABLE(1U)
+
+#define S_DISABLE_PING_PONG    6
+#define V_DISABLE_PING_PONG(x) ((x) << S_DISABLE_PING_PONG)
+#define F_DISABLE_PING_PONG    V_DISABLE_PING_PONG(1U)
+
+#define S_DELAY_PING_PONG_HALF    5
+#define V_DELAY_PING_PONG_HALF(x) ((x) << S_DELAY_PING_PONG_HALF)
+#define F_DELAY_PING_PONG_HALF    V_DELAY_PING_PONG_HALF(1U)
+
+#define S_ADVANCE_PING_PONG    4
+#define V_ADVANCE_PING_PONG(x) ((x) << S_ADVANCE_PING_PONG)
+#define F_ADVANCE_PING_PONG    V_ADVANCE_PING_PONG(1U)
+
+#define S_ATEST_MUX_CTL0    3
+#define V_ATEST_MUX_CTL0(x) ((x) << S_ATEST_MUX_CTL0)
+#define F_ATEST_MUX_CTL0    V_ATEST_MUX_CTL0(1U)
+
+#define S_ATEST_MUX_CTL1    2
+#define V_ATEST_MUX_CTL1(x) ((x) << S_ATEST_MUX_CTL1)
+#define F_ATEST_MUX_CTL1    V_ATEST_MUX_CTL1(1U)
+
+#define S_ATEST_MUX_CTL2    1
+#define V_ATEST_MUX_CTL2(x) ((x) << S_ATEST_MUX_CTL2)
+#define F_ATEST_MUX_CTL2    V_ATEST_MUX_CTL2(1U)
+
+#define S_ATEST_MUX_CTL3    0
+#define V_ATEST_MUX_CTL3(x) ((x) << S_ATEST_MUX_CTL3)
+#define F_ATEST_MUX_CTL3    V_ATEST_MUX_CTL3(1U)
+
+#define A_MC_DDRPHY_DP18_READ_CLOCK_RANK_PAIR 0x44010
+
+#define S_QUAD0_CLK16_BIT0    15
+#define V_QUAD0_CLK16_BIT0(x) ((x) << S_QUAD0_CLK16_BIT0)
+#define F_QUAD0_CLK16_BIT0    V_QUAD0_CLK16_BIT0(1U)
+
+#define S_QUAD1_CLK16_BIT1    14
+#define V_QUAD1_CLK16_BIT1(x) ((x) << S_QUAD1_CLK16_BIT1)
+#define F_QUAD1_CLK16_BIT1    V_QUAD1_CLK16_BIT1(1U)
+
+#define S_QUAD2_CLK16_BIT2    13
+#define V_QUAD2_CLK16_BIT2(x) ((x) << S_QUAD2_CLK16_BIT2)
+#define F_QUAD2_CLK16_BIT2    V_QUAD2_CLK16_BIT2(1U)
+
+#define S_QUAD3_CLK16_BIT3    12
+#define V_QUAD3_CLK16_BIT3(x) ((x) << S_QUAD3_CLK16_BIT3)
+#define F_QUAD3_CLK16_BIT3    V_QUAD3_CLK16_BIT3(1U)
+
+#define S_QUAD0_CLK18_BIT4    11
+#define V_QUAD0_CLK18_BIT4(x) ((x) << S_QUAD0_CLK18_BIT4)
+#define F_QUAD0_CLK18_BIT4    V_QUAD0_CLK18_BIT4(1U)
+
+#define S_QUAD1_CLK18_BIT5    10
+#define V_QUAD1_CLK18_BIT5(x) ((x) << S_QUAD1_CLK18_BIT5)
+#define F_QUAD1_CLK18_BIT5    V_QUAD1_CLK18_BIT5(1U)
+
+#define S_QUAD2_CLK20_BIT6    9
+#define V_QUAD2_CLK20_BIT6(x) ((x) << S_QUAD2_CLK20_BIT6)
+#define F_QUAD2_CLK20_BIT6    V_QUAD2_CLK20_BIT6(1U)
+
+#define S_QUAD3_CLK20_BIT7    8
+#define V_QUAD3_CLK20_BIT7(x) ((x) << S_QUAD3_CLK20_BIT7)
+#define F_QUAD3_CLK20_BIT7    V_QUAD3_CLK20_BIT7(1U)
+
+#define S_QUAD2_CLK22_BIT8    7
+#define V_QUAD2_CLK22_BIT8(x) ((x) << S_QUAD2_CLK22_BIT8)
+#define F_QUAD2_CLK22_BIT8    V_QUAD2_CLK22_BIT8(1U)
+
+#define S_QUAD3_CLK22_BIT9    6
+#define V_QUAD3_CLK22_BIT9(x) ((x) << S_QUAD3_CLK22_BIT9)
+#define F_QUAD3_CLK22_BIT9    V_QUAD3_CLK22_BIT9(1U)
+
+#define S_CLK16_SINGLE_ENDED_BIT10    5
+#define V_CLK16_SINGLE_ENDED_BIT10(x) ((x) << S_CLK16_SINGLE_ENDED_BIT10)
+#define F_CLK16_SINGLE_ENDED_BIT10    V_CLK16_SINGLE_ENDED_BIT10(1U)
+
+#define S_CLK18_SINGLE_ENDED_BIT11    4
+#define V_CLK18_SINGLE_ENDED_BIT11(x) ((x) << S_CLK18_SINGLE_ENDED_BIT11)
+#define F_CLK18_SINGLE_ENDED_BIT11    V_CLK18_SINGLE_ENDED_BIT11(1U)
+
+#define S_CLK20_SINGLE_ENDED_BIT12    3
+#define V_CLK20_SINGLE_ENDED_BIT12(x) ((x) << S_CLK20_SINGLE_ENDED_BIT12)
+#define F_CLK20_SINGLE_ENDED_BIT12    V_CLK20_SINGLE_ENDED_BIT12(1U)
+
+#define S_CLK22_SINGLE_ENDED_BIT13    2
+#define V_CLK22_SINGLE_ENDED_BIT13(x) ((x) << S_CLK22_SINGLE_ENDED_BIT13)
+#define F_CLK22_SINGLE_ENDED_BIT13    V_CLK22_SINGLE_ENDED_BIT13(1U)
+
+#define A_MC_DDRPHY_DP18_WRCLK_EN_RP 0x44014
+
+#define S_QUAD2_CLK18_BIT14    1
+#define V_QUAD2_CLK18_BIT14(x) ((x) << S_QUAD2_CLK18_BIT14)
+#define F_QUAD2_CLK18_BIT14    V_QUAD2_CLK18_BIT14(1U)
+
+#define S_QUAD3_CLK18_BIT15    0
+#define V_QUAD3_CLK18_BIT15(x) ((x) << S_QUAD3_CLK18_BIT15)
+#define F_QUAD3_CLK18_BIT15    V_QUAD3_CLK18_BIT15(1U)
+
+#define A_MC_DDRPHY_DP18_RX_PEAK_AMP 0x44018
+
+#define S_PEAK_AMP_CTL_SIDE0    13
+#define M_PEAK_AMP_CTL_SIDE0    0x7U
+#define V_PEAK_AMP_CTL_SIDE0(x) ((x) << S_PEAK_AMP_CTL_SIDE0)
+#define G_PEAK_AMP_CTL_SIDE0(x) (((x) >> S_PEAK_AMP_CTL_SIDE0) & M_PEAK_AMP_CTL_SIDE0)
+
+#define S_PEAK_AMP_CTL_SIDE1    9
+#define M_PEAK_AMP_CTL_SIDE1    0x7U
+#define V_PEAK_AMP_CTL_SIDE1(x) ((x) << S_PEAK_AMP_CTL_SIDE1)
+#define G_PEAK_AMP_CTL_SIDE1(x) (((x) >> S_PEAK_AMP_CTL_SIDE1) & M_PEAK_AMP_CTL_SIDE1)
+
+#define S_SXMCVREF_0_3    4
+#define M_SXMCVREF_0_3    0xfU
+#define V_SXMCVREF_0_3(x) ((x) << S_SXMCVREF_0_3)
+#define G_SXMCVREF_0_3(x) (((x) >> S_SXMCVREF_0_3) & M_SXMCVREF_0_3)
+
+#define S_SXPODVREF    3
+#define V_SXPODVREF(x) ((x) << S_SXPODVREF)
+#define F_SXPODVREF    V_SXPODVREF(1U)
+
+#define S_DISABLE_TERMINATION    2
+#define V_DISABLE_TERMINATION(x) ((x) << S_DISABLE_TERMINATION)
+#define F_DISABLE_TERMINATION    V_DISABLE_TERMINATION(1U)
+
+#define S_READ_CENTERING_MODE    0
+#define M_READ_CENTERING_MODE    0x3U
+#define V_READ_CENTERING_MODE(x) ((x) << S_READ_CENTERING_MODE)
+#define G_READ_CENTERING_MODE(x) (((x) >> S_READ_CENTERING_MODE) & M_READ_CENTERING_MODE)
+
+#define A_MC_DDRPHY_DP18_SYSCLK_PR 0x4401c
+
+#define S_SYSCLK_PHASE_ALIGN_RESET    6
+#define V_SYSCLK_PHASE_ALIGN_RESET(x) ((x) << S_SYSCLK_PHASE_ALIGN_RESET)
+#define F_SYSCLK_PHASE_ALIGN_RESET    V_SYSCLK_PHASE_ALIGN_RESET(1U)
+
+#define A_MC_DDRPHY_DP18_DFT_DIG_EYE 0x44020
+
+#define S_DIGITAL_EYE_EN    15
+#define V_DIGITAL_EYE_EN(x) ((x) << S_DIGITAL_EYE_EN)
+#define F_DIGITAL_EYE_EN    V_DIGITAL_EYE_EN(1U)
+
+#define S_BUMP    14
+#define V_BUMP(x) ((x) << S_BUMP)
+#define F_BUMP    V_BUMP(1U)
+
+#define S_TRIG_PERIOD    13
+#define V_TRIG_PERIOD(x) ((x) << S_TRIG_PERIOD)
+#define F_TRIG_PERIOD    V_TRIG_PERIOD(1U)
+
+#define S_CNTL_POL    12
+#define V_CNTL_POL(x) ((x) << S_CNTL_POL)
+#define F_CNTL_POL    V_CNTL_POL(1U)
+
+#define S_CNTL_SRC    8
+#define V_CNTL_SRC(x) ((x) << S_CNTL_SRC)
+#define F_CNTL_SRC    V_CNTL_SRC(1U)
+
+#define S_DIGITAL_EYE_VALUE    0
+#define M_DIGITAL_EYE_VALUE    0xffU
+#define V_DIGITAL_EYE_VALUE(x) ((x) << S_DIGITAL_EYE_VALUE)
+#define G_DIGITAL_EYE_VALUE(x) (((x) >> S_DIGITAL_EYE_VALUE) & M_DIGITAL_EYE_VALUE)
+
+#define A_MC_DDRPHY_DP18_DQS_RD_PHASE_SELECT_RANK_PAIR 0x44024
+
+#define S_DQSCLK_SELECT0    14
+#define M_DQSCLK_SELECT0    0x3U
+#define V_DQSCLK_SELECT0(x) ((x) << S_DQSCLK_SELECT0)
+#define G_DQSCLK_SELECT0(x) (((x) >> S_DQSCLK_SELECT0) & M_DQSCLK_SELECT0)
+
+#define S_RDCLK_SELECT0    12
+#define M_RDCLK_SELECT0    0x3U
+#define V_RDCLK_SELECT0(x) ((x) << S_RDCLK_SELECT0)
+#define G_RDCLK_SELECT0(x) (((x) >> S_RDCLK_SELECT0) & M_RDCLK_SELECT0)
+
+#define S_DQSCLK_SELECT1    10
+#define M_DQSCLK_SELECT1    0x3U
+#define V_DQSCLK_SELECT1(x) ((x) << S_DQSCLK_SELECT1)
+#define G_DQSCLK_SELECT1(x) (((x) >> S_DQSCLK_SELECT1) & M_DQSCLK_SELECT1)
+
+#define S_RDCLK_SELECT1    8
+#define M_RDCLK_SELECT1    0x3U
+#define V_RDCLK_SELECT1(x) ((x) << S_RDCLK_SELECT1)
+#define G_RDCLK_SELECT1(x) (((x) >> S_RDCLK_SELECT1) & M_RDCLK_SELECT1)
+
+#define S_DQSCLK_SELECT2    6
+#define M_DQSCLK_SELECT2    0x3U
+#define V_DQSCLK_SELECT2(x) ((x) << S_DQSCLK_SELECT2)
+#define G_DQSCLK_SELECT2(x) (((x) >> S_DQSCLK_SELECT2) & M_DQSCLK_SELECT2)
+
+#define S_RDCLK_SELECT2    4
+#define M_RDCLK_SELECT2    0x3U
+#define V_RDCLK_SELECT2(x) ((x) << S_RDCLK_SELECT2)
+#define G_RDCLK_SELECT2(x) (((x) >> S_RDCLK_SELECT2) & M_RDCLK_SELECT2)
+
+#define S_DQSCLK_SELECT3    2
+#define M_DQSCLK_SELECT3    0x3U
+#define V_DQSCLK_SELECT3(x) ((x) << S_DQSCLK_SELECT3)
+#define G_DQSCLK_SELECT3(x) (((x) >> S_DQSCLK_SELECT3) & M_DQSCLK_SELECT3)
+
+#define S_RDCLK_SELECT3    0
+#define M_RDCLK_SELECT3    0x3U
+#define V_RDCLK_SELECT3(x) ((x) << S_RDCLK_SELECT3)
+#define G_RDCLK_SELECT3(x) (((x) >> S_RDCLK_SELECT3) & M_RDCLK_SELECT3)
+
+#define A_MC_DDRPHY_DP18_DRIFT_LIMITS 0x44028
+
+#define S_MIN_RD_EYE_SIZE    8
+#define M_MIN_RD_EYE_SIZE    0x3fU
+#define V_MIN_RD_EYE_SIZE(x) ((x) << S_MIN_RD_EYE_SIZE)
+#define G_MIN_RD_EYE_SIZE(x) (((x) >> S_MIN_RD_EYE_SIZE) & M_MIN_RD_EYE_SIZE)
+
+#define S_MAX_DQS_DRIFT    0
+#define M_MAX_DQS_DRIFT    0x3fU
+#define V_MAX_DQS_DRIFT(x) ((x) << S_MAX_DQS_DRIFT)
+#define G_MAX_DQS_DRIFT(x) (((x) >> S_MAX_DQS_DRIFT) & M_MAX_DQS_DRIFT)
+
+#define A_MC_DDRPHY_DP18_DEBUG_SEL 0x4402c
+
+#define S_HS_PROBE_A_SEL    11
+#define M_HS_PROBE_A_SEL    0x1fU
+#define V_HS_PROBE_A_SEL(x) ((x) << S_HS_PROBE_A_SEL)
+#define G_HS_PROBE_A_SEL(x) (((x) >> S_HS_PROBE_A_SEL) & M_HS_PROBE_A_SEL)
+
+#define S_HS_PROBE_B_SEL    6
+#define M_HS_PROBE_B_SEL    0x1fU
+#define V_HS_PROBE_B_SEL(x) ((x) << S_HS_PROBE_B_SEL)
+#define G_HS_PROBE_B_SEL(x) (((x) >> S_HS_PROBE_B_SEL) & M_HS_PROBE_B_SEL)
+
+#define S_RD_DEBUG_SEL    3
+#define M_RD_DEBUG_SEL    0x7U
+#define V_RD_DEBUG_SEL(x) ((x) << S_RD_DEBUG_SEL)
+#define G_RD_DEBUG_SEL(x) (((x) >> S_RD_DEBUG_SEL) & M_RD_DEBUG_SEL)
+
+#define S_WR_DEBUG_SEL    0
+#define M_WR_DEBUG_SEL    0x7U
+#define V_WR_DEBUG_SEL(x) ((x) << S_WR_DEBUG_SEL)
+#define G_WR_DEBUG_SEL(x) (((x) >> S_WR_DEBUG_SEL) & M_WR_DEBUG_SEL)
+
+#define S_DP18_HS_PROBE_A_SEL    11
+#define M_DP18_HS_PROBE_A_SEL    0x1fU
+#define V_DP18_HS_PROBE_A_SEL(x) ((x) << S_DP18_HS_PROBE_A_SEL)
+#define G_DP18_HS_PROBE_A_SEL(x) (((x) >> S_DP18_HS_PROBE_A_SEL) & M_DP18_HS_PROBE_A_SEL)
+
+#define S_DP18_HS_PROBE_B_SEL    6
+#define M_DP18_HS_PROBE_B_SEL    0x1fU
+#define V_DP18_HS_PROBE_B_SEL(x) ((x) << S_DP18_HS_PROBE_B_SEL)
+#define G_DP18_HS_PROBE_B_SEL(x) (((x) >> S_DP18_HS_PROBE_B_SEL) & M_DP18_HS_PROBE_B_SEL)
+
+#define A_MC_DDRPHY_DP18_READ_DELAY_OFFSET0_RANK_PAIR 0x44030
+
+#define S_OFFSET_BITS1_7    8
+#define M_OFFSET_BITS1_7    0x7fU
+#define V_OFFSET_BITS1_7(x) ((x) << S_OFFSET_BITS1_7)
+#define G_OFFSET_BITS1_7(x) (((x) >> S_OFFSET_BITS1_7) & M_OFFSET_BITS1_7)
+
+#define S_OFFSET_BITS9_15    0
+#define M_OFFSET_BITS9_15    0x7fU
+#define V_OFFSET_BITS9_15(x) ((x) << S_OFFSET_BITS9_15)
+#define G_OFFSET_BITS9_15(x) (((x) >> S_OFFSET_BITS9_15) & M_OFFSET_BITS9_15)
+
+#define A_MC_DDRPHY_DP18_READ_DELAY_OFFSET1_RANK_PAIR 0x44034
+#define A_MC_DDRPHY_DP18_RD_LVL_STATUS0 0x44038
+
+#define S_LEADING_EDGE_NOT_FOUND_0    0
+#define M_LEADING_EDGE_NOT_FOUND_0    0xffffU
+#define V_LEADING_EDGE_NOT_FOUND_0(x) ((x) << S_LEADING_EDGE_NOT_FOUND_0)
+#define G_LEADING_EDGE_NOT_FOUND_0(x) (((x) >> S_LEADING_EDGE_NOT_FOUND_0) & M_LEADING_EDGE_NOT_FOUND_0)
+
+#define A_MC_DDRPHY_DP18_RD_LVL_STATUS1 0x4403c
+
+#define S_LEADING_EDGE_NOT_FOUND_1    8
+#define M_LEADING_EDGE_NOT_FOUND_1    0xffU
+#define V_LEADING_EDGE_NOT_FOUND_1(x) ((x) << S_LEADING_EDGE_NOT_FOUND_1)
+#define G_LEADING_EDGE_NOT_FOUND_1(x) (((x) >> S_LEADING_EDGE_NOT_FOUND_1) & M_LEADING_EDGE_NOT_FOUND_1)
+
+#define A_MC_DDRPHY_DP18_RD_LVL_STATUS2 0x44040
+
+#define S_TRAILING_EDGE_NOT_FOUND    0
+#define M_TRAILING_EDGE_NOT_FOUND    0xffffU
+#define V_TRAILING_EDGE_NOT_FOUND(x) ((x) << S_TRAILING_EDGE_NOT_FOUND)
+#define G_TRAILING_EDGE_NOT_FOUND(x) (((x) >> S_TRAILING_EDGE_NOT_FOUND) & M_TRAILING_EDGE_NOT_FOUND)
+
+#define A_MC_DDRPHY_DP18_RD_LVL_STATUS3 0x44044
+
+#define S_TRAILING_EDGE_NOT_FOUND_16_23    8
+#define M_TRAILING_EDGE_NOT_FOUND_16_23    0xffU
+#define V_TRAILING_EDGE_NOT_FOUND_16_23(x) ((x) << S_TRAILING_EDGE_NOT_FOUND_16_23)
+#define G_TRAILING_EDGE_NOT_FOUND_16_23(x) (((x) >> S_TRAILING_EDGE_NOT_FOUND_16_23) & M_TRAILING_EDGE_NOT_FOUND_16_23)
+
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG5 0x44048
+
+#define S_DYN_POWER_CNTL_EN    15
+#define V_DYN_POWER_CNTL_EN(x) ((x) << S_DYN_POWER_CNTL_EN)
+#define F_DYN_POWER_CNTL_EN    V_DYN_POWER_CNTL_EN(1U)
+
+#define S_DYN_MCTERM_CNTL_EN    14
+#define V_DYN_MCTERM_CNTL_EN(x) ((x) << S_DYN_MCTERM_CNTL_EN)
+#define F_DYN_MCTERM_CNTL_EN    V_DYN_MCTERM_CNTL_EN(1U)
+
+#define S_DYN_RX_GATE_CNTL_EN    13
+#define V_DYN_RX_GATE_CNTL_EN(x) ((x) << S_DYN_RX_GATE_CNTL_EN)
+#define F_DYN_RX_GATE_CNTL_EN    V_DYN_RX_GATE_CNTL_EN(1U)
+
+#define S_CALGATE_ON    12
+#define V_CALGATE_ON(x) ((x) << S_CALGATE_ON)
+#define F_CALGATE_ON    V_CALGATE_ON(1U)
+
+#define S_PER_RDCLK_UPDATE_DIS    11
+#define V_PER_RDCLK_UPDATE_DIS(x) ((x) << S_PER_RDCLK_UPDATE_DIS)
+#define F_PER_RDCLK_UPDATE_DIS    V_PER_RDCLK_UPDATE_DIS(1U)
+
+#define S_DQS_ALIGN_BY_QUAD    4
+#define V_DQS_ALIGN_BY_QUAD(x) ((x) << S_DQS_ALIGN_BY_QUAD)
+#define F_DQS_ALIGN_BY_QUAD    V_DQS_ALIGN_BY_QUAD(1U)
+
+#define A_MC_DDRPHY_DP18_DQS_GATE_DELAY_RP 0x4404c
+
+#define S_DQS_GATE_DELAY_N0    12
+#define M_DQS_GATE_DELAY_N0    0x7U
+#define V_DQS_GATE_DELAY_N0(x) ((x) << S_DQS_GATE_DELAY_N0)
+#define G_DQS_GATE_DELAY_N0(x) (((x) >> S_DQS_GATE_DELAY_N0) & M_DQS_GATE_DELAY_N0)
+
+#define S_DQS_GATE_DELAY_N1    8
+#define M_DQS_GATE_DELAY_N1    0x7U
+#define V_DQS_GATE_DELAY_N1(x) ((x) << S_DQS_GATE_DELAY_N1)
+#define G_DQS_GATE_DELAY_N1(x) (((x) >> S_DQS_GATE_DELAY_N1) & M_DQS_GATE_DELAY_N1)
+
+#define S_DQS_GATE_DELAY_N2    4
+#define M_DQS_GATE_DELAY_N2    0x7U
+#define V_DQS_GATE_DELAY_N2(x) ((x) << S_DQS_GATE_DELAY_N2)
+#define G_DQS_GATE_DELAY_N2(x) (((x) >> S_DQS_GATE_DELAY_N2) & M_DQS_GATE_DELAY_N2)
+
+#define S_DQS_GATE_DELAY_N3    0
+#define M_DQS_GATE_DELAY_N3    0x7U
+#define V_DQS_GATE_DELAY_N3(x) ((x) << S_DQS_GATE_DELAY_N3)
+#define G_DQS_GATE_DELAY_N3(x) (((x) >> S_DQS_GATE_DELAY_N3) & M_DQS_GATE_DELAY_N3)
+
+#define A_MC_DDRPHY_DP18_RD_STATUS0 0x44050
+
+#define S_NO_EYE_DETECTED    15
+#define V_NO_EYE_DETECTED(x) ((x) << S_NO_EYE_DETECTED)
+#define F_NO_EYE_DETECTED    V_NO_EYE_DETECTED(1U)
+
+#define S_LEADING_EDGE_FOUND    14
+#define V_LEADING_EDGE_FOUND(x) ((x) << S_LEADING_EDGE_FOUND)
+#define F_LEADING_EDGE_FOUND    V_LEADING_EDGE_FOUND(1U)
+
+#define S_TRAILING_EDGE_FOUND    13
+#define V_TRAILING_EDGE_FOUND(x) ((x) << S_TRAILING_EDGE_FOUND)
+#define F_TRAILING_EDGE_FOUND    V_TRAILING_EDGE_FOUND(1U)
+
+#define S_INCOMPLETE_RD_CAL_N0    12
+#define V_INCOMPLETE_RD_CAL_N0(x) ((x) << S_INCOMPLETE_RD_CAL_N0)
+#define F_INCOMPLETE_RD_CAL_N0    V_INCOMPLETE_RD_CAL_N0(1U)
+
+#define S_INCOMPLETE_RD_CAL_N1    11
+#define V_INCOMPLETE_RD_CAL_N1(x) ((x) << S_INCOMPLETE_RD_CAL_N1)
+#define F_INCOMPLETE_RD_CAL_N1    V_INCOMPLETE_RD_CAL_N1(1U)
+
+#define S_INCOMPLETE_RD_CAL_N2    10
+#define V_INCOMPLETE_RD_CAL_N2(x) ((x) << S_INCOMPLETE_RD_CAL_N2)
+#define F_INCOMPLETE_RD_CAL_N2    V_INCOMPLETE_RD_CAL_N2(1U)
+
+#define S_INCOMPLETE_RD_CAL_N3    9
+#define V_INCOMPLETE_RD_CAL_N3(x) ((x) << S_INCOMPLETE_RD_CAL_N3)
+#define F_INCOMPLETE_RD_CAL_N3    V_INCOMPLETE_RD_CAL_N3(1U)
+
+#define S_COARSE_PATTERN_ERR_N0    8
+#define V_COARSE_PATTERN_ERR_N0(x) ((x) << S_COARSE_PATTERN_ERR_N0)
+#define F_COARSE_PATTERN_ERR_N0    V_COARSE_PATTERN_ERR_N0(1U)
+
+#define S_COARSE_PATTERN_ERR_N1    7
+#define V_COARSE_PATTERN_ERR_N1(x) ((x) << S_COARSE_PATTERN_ERR_N1)
+#define F_COARSE_PATTERN_ERR_N1    V_COARSE_PATTERN_ERR_N1(1U)
+
+#define S_COARSE_PATTERN_ERR_N2    6
+#define V_COARSE_PATTERN_ERR_N2(x) ((x) << S_COARSE_PATTERN_ERR_N2)
+#define F_COARSE_PATTERN_ERR_N2    V_COARSE_PATTERN_ERR_N2(1U)
+
+#define S_COARSE_PATTERN_ERR_N3    5
+#define V_COARSE_PATTERN_ERR_N3(x) ((x) << S_COARSE_PATTERN_ERR_N3)
+#define F_COARSE_PATTERN_ERR_N3    V_COARSE_PATTERN_ERR_N3(1U)
+
+#define S_EYE_CLIPPING    4
+#define V_EYE_CLIPPING(x) ((x) << S_EYE_CLIPPING)
+#define F_EYE_CLIPPING    V_EYE_CLIPPING(1U)
+
+#define S_NO_DQS    3
+#define V_NO_DQS(x) ((x) << S_NO_DQS)
+#define F_NO_DQS    V_NO_DQS(1U)
+
+#define S_NO_LOCK    2
+#define V_NO_LOCK(x) ((x) << S_NO_LOCK)
+#define F_NO_LOCK    V_NO_LOCK(1U)
+
+#define S_DRIFT_ERROR    1
+#define V_DRIFT_ERROR(x) ((x) << S_DRIFT_ERROR)
+#define F_DRIFT_ERROR    V_DRIFT_ERROR(1U)
+
+#define S_MIN_EYE    0
+#define V_MIN_EYE(x) ((x) << S_MIN_EYE)
+#define F_MIN_EYE    V_MIN_EYE(1U)
+
+#define A_MC_DDRPHY_DP18_RD_ERROR_MASK0 0x44054
+
+#define S_NO_EYE_DETECTED_MASK    15
+#define V_NO_EYE_DETECTED_MASK(x) ((x) << S_NO_EYE_DETECTED_MASK)
+#define F_NO_EYE_DETECTED_MASK    V_NO_EYE_DETECTED_MASK(1U)
+
+#define S_LEADING_EDGE_FOUND_MASK    14
+#define V_LEADING_EDGE_FOUND_MASK(x) ((x) << S_LEADING_EDGE_FOUND_MASK)
+#define F_LEADING_EDGE_FOUND_MASK    V_LEADING_EDGE_FOUND_MASK(1U)
+
+#define S_TRAILING_EDGE_FOUND_MASK    13
+#define V_TRAILING_EDGE_FOUND_MASK(x) ((x) << S_TRAILING_EDGE_FOUND_MASK)
+#define F_TRAILING_EDGE_FOUND_MASK    V_TRAILING_EDGE_FOUND_MASK(1U)
+
+#define S_INCOMPLETE_RD_CAL_N0_MASK    12
+#define V_INCOMPLETE_RD_CAL_N0_MASK(x) ((x) << S_INCOMPLETE_RD_CAL_N0_MASK)
+#define F_INCOMPLETE_RD_CAL_N0_MASK    V_INCOMPLETE_RD_CAL_N0_MASK(1U)
+
+#define S_INCOMPLETE_RD_CAL_N1_MASK    11
+#define V_INCOMPLETE_RD_CAL_N1_MASK(x) ((x) << S_INCOMPLETE_RD_CAL_N1_MASK)
+#define F_INCOMPLETE_RD_CAL_N1_MASK    V_INCOMPLETE_RD_CAL_N1_MASK(1U)
+
+#define S_INCOMPLETE_RD_CAL_N2_MASK    10
+#define V_INCOMPLETE_RD_CAL_N2_MASK(x) ((x) << S_INCOMPLETE_RD_CAL_N2_MASK)
+#define F_INCOMPLETE_RD_CAL_N2_MASK    V_INCOMPLETE_RD_CAL_N2_MASK(1U)
+
+#define S_INCOMPLETE_RD_CAL_N3_MASK    9
+#define V_INCOMPLETE_RD_CAL_N3_MASK(x) ((x) << S_INCOMPLETE_RD_CAL_N3_MASK)
+#define F_INCOMPLETE_RD_CAL_N3_MASK    V_INCOMPLETE_RD_CAL_N3_MASK(1U)
+
+#define S_COARSE_PATTERN_ERR_N0_MASK    8
+#define V_COARSE_PATTERN_ERR_N0_MASK(x) ((x) << S_COARSE_PATTERN_ERR_N0_MASK)
+#define F_COARSE_PATTERN_ERR_N0_MASK    V_COARSE_PATTERN_ERR_N0_MASK(1U)
+
+#define S_COARSE_PATTERN_ERR_N1_MASK    7
+#define V_COARSE_PATTERN_ERR_N1_MASK(x) ((x) << S_COARSE_PATTERN_ERR_N1_MASK)
+#define F_COARSE_PATTERN_ERR_N1_MASK    V_COARSE_PATTERN_ERR_N1_MASK(1U)
+
+#define S_COARSE_PATTERN_ERR_N2_MASK    6
+#define V_COARSE_PATTERN_ERR_N2_MASK(x) ((x) << S_COARSE_PATTERN_ERR_N2_MASK)
+#define F_COARSE_PATTERN_ERR_N2_MASK    V_COARSE_PATTERN_ERR_N2_MASK(1U)
+
+#define S_COARSE_PATTERN_ERR_N3_MASK    5
+#define V_COARSE_PATTERN_ERR_N3_MASK(x) ((x) << S_COARSE_PATTERN_ERR_N3_MASK)
+#define F_COARSE_PATTERN_ERR_N3_MASK    V_COARSE_PATTERN_ERR_N3_MASK(1U)
+
+#define S_EYE_CLIPPING_MASK    4
+#define V_EYE_CLIPPING_MASK(x) ((x) << S_EYE_CLIPPING_MASK)
+#define F_EYE_CLIPPING_MASK    V_EYE_CLIPPING_MASK(1U)
+
+#define S_NO_DQS_MASK    3
+#define V_NO_DQS_MASK(x) ((x) << S_NO_DQS_MASK)
+#define F_NO_DQS_MASK    V_NO_DQS_MASK(1U)
+
+#define S_NO_LOCK_MASK    2
+#define V_NO_LOCK_MASK(x) ((x) << S_NO_LOCK_MASK)
+#define F_NO_LOCK_MASK    V_NO_LOCK_MASK(1U)
+
+#define S_DRIFT_ERROR_MASK    1
+#define V_DRIFT_ERROR_MASK(x) ((x) << S_DRIFT_ERROR_MASK)
+#define F_DRIFT_ERROR_MASK    V_DRIFT_ERROR_MASK(1U)
+
+#define S_MIN_EYE_MASK    0
+#define V_MIN_EYE_MASK(x) ((x) << S_MIN_EYE_MASK)
+#define F_MIN_EYE_MASK    V_MIN_EYE_MASK(1U)
+
+#define A_MC_DDRPHY_DP18_WRCLK_CNTL 0x44058
+
+#define S_PRBS_WAIT    14
+#define M_PRBS_WAIT    0x3U
+#define V_PRBS_WAIT(x) ((x) << S_PRBS_WAIT)
+#define G_PRBS_WAIT(x) (((x) >> S_PRBS_WAIT) & M_PRBS_WAIT)
+
+#define S_PRBS_SYNC_EARLY    13
+#define V_PRBS_SYNC_EARLY(x) ((x) << S_PRBS_SYNC_EARLY)
+#define F_PRBS_SYNC_EARLY    V_PRBS_SYNC_EARLY(1U)
+
+#define S_RD_DELAY_EARLY    12
+#define V_RD_DELAY_EARLY(x) ((x) << S_RD_DELAY_EARLY)
+#define F_RD_DELAY_EARLY    V_RD_DELAY_EARLY(1U)
+
+#define S_SS_QUAD_CAL    10
+#define V_SS_QUAD_CAL(x) ((x) << S_SS_QUAD_CAL)
+#define F_SS_QUAD_CAL    V_SS_QUAD_CAL(1U)
+
+#define S_SS_QUAD    8
+#define M_SS_QUAD    0x3U
+#define V_SS_QUAD(x) ((x) << S_SS_QUAD)
+#define G_SS_QUAD(x) (((x) >> S_SS_QUAD) & M_SS_QUAD)
+
+#define S_SS_RD_DELAY    7
+#define V_SS_RD_DELAY(x) ((x) << S_SS_RD_DELAY)
+#define F_SS_RD_DELAY    V_SS_RD_DELAY(1U)
+
+#define S_FORCE_HI_Z    6
+#define V_FORCE_HI_Z(x) ((x) << S_FORCE_HI_Z)
+#define F_FORCE_HI_Z    V_FORCE_HI_Z(1U)
+
+#define A_MC_DDRPHY_DP18_WR_LVL_STATUS0 0x4405c
+
+#define S_CLK_LEVEL    14
+#define M_CLK_LEVEL    0x3U
+#define V_CLK_LEVEL(x) ((x) << S_CLK_LEVEL)
+#define G_CLK_LEVEL(x) (((x) >> S_CLK_LEVEL) & M_CLK_LEVEL)
+
+#define S_FINE_STEPPING    13
+#define V_FINE_STEPPING(x) ((x) << S_FINE_STEPPING)
+#define F_FINE_STEPPING    V_FINE_STEPPING(1U)
+
+#define S_DONE    12
+#define V_DONE(x) ((x) << S_DONE)
+#define F_DONE    V_DONE(1U)
+
+#define S_WL_ERR_CLK16_ST    11
+#define V_WL_ERR_CLK16_ST(x) ((x) << S_WL_ERR_CLK16_ST)
+#define F_WL_ERR_CLK16_ST    V_WL_ERR_CLK16_ST(1U)
+
+#define S_WL_ERR_CLK18_ST    10
+#define V_WL_ERR_CLK18_ST(x) ((x) << S_WL_ERR_CLK18_ST)
+#define F_WL_ERR_CLK18_ST    V_WL_ERR_CLK18_ST(1U)
+
+#define S_WL_ERR_CLK20_ST    9
+#define V_WL_ERR_CLK20_ST(x) ((x) << S_WL_ERR_CLK20_ST)
+#define F_WL_ERR_CLK20_ST    V_WL_ERR_CLK20_ST(1U)
+
+#define S_WL_ERR_CLK22_ST    8
+#define V_WL_ERR_CLK22_ST(x) ((x) << S_WL_ERR_CLK22_ST)
+#define F_WL_ERR_CLK22_ST    V_WL_ERR_CLK22_ST(1U)
+
+#define S_ZERO_DETECTED    7
+#define V_ZERO_DETECTED(x) ((x) << S_ZERO_DETECTED)
+#define F_ZERO_DETECTED    V_ZERO_DETECTED(1U)
+
+#define S_WR_LVL_DONE    12
+#define V_WR_LVL_DONE(x) ((x) << S_WR_LVL_DONE)
+#define F_WR_LVL_DONE    V_WR_LVL_DONE(1U)
+
+#define A_MC_DDRPHY_DP18_WR_CNTR_STATUS0 0x44060
+
+#define S_BIT_CENTERED    11
+#define M_BIT_CENTERED    0x1fU
+#define V_BIT_CENTERED(x) ((x) << S_BIT_CENTERED)
+#define G_BIT_CENTERED(x) (((x) >> S_BIT_CENTERED) & M_BIT_CENTERED)
+
+#define S_SMALL_STEP_LEFT    10
+#define V_SMALL_STEP_LEFT(x) ((x) << S_SMALL_STEP_LEFT)
+#define F_SMALL_STEP_LEFT    V_SMALL_STEP_LEFT(1U)
+
+#define S_BIG_STEP_RIGHT    9
+#define V_BIG_STEP_RIGHT(x) ((x) << S_BIG_STEP_RIGHT)
+#define F_BIG_STEP_RIGHT    V_BIG_STEP_RIGHT(1U)
+
+#define S_MATCH_STEP_RIGHT    8
+#define V_MATCH_STEP_RIGHT(x) ((x) << S_MATCH_STEP_RIGHT)
+#define F_MATCH_STEP_RIGHT    V_MATCH_STEP_RIGHT(1U)
+
+#define S_JUMP_BACK_RIGHT    7
+#define V_JUMP_BACK_RIGHT(x) ((x) << S_JUMP_BACK_RIGHT)
+#define F_JUMP_BACK_RIGHT    V_JUMP_BACK_RIGHT(1U)
+
+#define S_SMALL_STEP_RIGHT    6
+#define V_SMALL_STEP_RIGHT(x) ((x) << S_SMALL_STEP_RIGHT)
+#define F_SMALL_STEP_RIGHT    V_SMALL_STEP_RIGHT(1U)
+
+#define S_DDONE    5
+#define V_DDONE(x) ((x) << S_DDONE)
+#define F_DDONE    V_DDONE(1U)
+
+#define S_WR_CNTR_DONE    5
+#define V_WR_CNTR_DONE(x) ((x) << S_WR_CNTR_DONE)
+#define F_WR_CNTR_DONE    V_WR_CNTR_DONE(1U)
+
+#define A_MC_DDRPHY_DP18_WR_CNTR_STATUS1 0x44064
+
+#define S_FW_LEFT_SIDE    5
+#define M_FW_LEFT_SIDE    0x7ffU
+#define V_FW_LEFT_SIDE(x) ((x) << S_FW_LEFT_SIDE)
+#define G_FW_LEFT_SIDE(x) (((x) >> S_FW_LEFT_SIDE) & M_FW_LEFT_SIDE)
+
+#define A_MC_DDRPHY_DP18_WR_CNTR_STATUS2 0x44068
+
+#define S_FW_RIGHT_SIDE    5
+#define M_FW_RIGHT_SIDE    0x7ffU
+#define V_FW_RIGHT_SIDE(x) ((x) << S_FW_RIGHT_SIDE)
+#define G_FW_RIGHT_SIDE(x) (((x) >> S_FW_RIGHT_SIDE) & M_FW_RIGHT_SIDE)
+
+#define A_MC_DDRPHY_DP18_WR_ERROR0 0x4406c
+
+#define S_WL_ERR_CLK16    15
+#define V_WL_ERR_CLK16(x) ((x) << S_WL_ERR_CLK16)
+#define F_WL_ERR_CLK16    V_WL_ERR_CLK16(1U)
+
+#define S_WL_ERR_CLK18    14
+#define V_WL_ERR_CLK18(x) ((x) << S_WL_ERR_CLK18)
+#define F_WL_ERR_CLK18    V_WL_ERR_CLK18(1U)
+
+#define S_WL_ERR_CLK20    13
+#define V_WL_ERR_CLK20(x) ((x) << S_WL_ERR_CLK20)
+#define F_WL_ERR_CLK20    V_WL_ERR_CLK20(1U)
+
+#define S_WL_ERR_CLK22    12
+#define V_WL_ERR_CLK22(x) ((x) << S_WL_ERR_CLK22)
+#define F_WL_ERR_CLK22    V_WL_ERR_CLK22(1U)
+
+#define S_VALID_NS_BIG_L    7
+#define V_VALID_NS_BIG_L(x) ((x) << S_VALID_NS_BIG_L)
+#define F_VALID_NS_BIG_L    V_VALID_NS_BIG_L(1U)
+
+#define S_INVALID_NS_SMALL_L    6
+#define V_INVALID_NS_SMALL_L(x) ((x) << S_INVALID_NS_SMALL_L)
+#define F_INVALID_NS_SMALL_L    V_INVALID_NS_SMALL_L(1U)
+
+#define S_VALID_NS_BIG_R    5
+#define V_VALID_NS_BIG_R(x) ((x) << S_VALID_NS_BIG_R)
+#define F_VALID_NS_BIG_R    V_VALID_NS_BIG_R(1U)
+
+#define S_INVALID_NS_BIG_R    4
+#define V_INVALID_NS_BIG_R(x) ((x) << S_INVALID_NS_BIG_R)
+#define F_INVALID_NS_BIG_R    V_INVALID_NS_BIG_R(1U)
+
+#define S_VALID_NS_JUMP_BACK    3
+#define V_VALID_NS_JUMP_BACK(x) ((x) << S_VALID_NS_JUMP_BACK)
+#define F_VALID_NS_JUMP_BACK    V_VALID_NS_JUMP_BACK(1U)
+
+#define S_INVALID_NS_SMALL_R    2
+#define V_INVALID_NS_SMALL_R(x) ((x) << S_INVALID_NS_SMALL_R)
+#define F_INVALID_NS_SMALL_R    V_INVALID_NS_SMALL_R(1U)
+
+#define S_OFFSET_ERR    1
+#define V_OFFSET_ERR(x) ((x) << S_OFFSET_ERR)
+#define F_OFFSET_ERR    V_OFFSET_ERR(1U)
+
+#define A_MC_DDRPHY_DP18_WR_ERROR_MASK0 0x44070
+
+#define S_WL_ERR_CLK16_MASK    15
+#define V_WL_ERR_CLK16_MASK(x) ((x) << S_WL_ERR_CLK16_MASK)
+#define F_WL_ERR_CLK16_MASK    V_WL_ERR_CLK16_MASK(1U)
+
+#define S_WL_ERR_CLK18_MASK    14
+#define V_WL_ERR_CLK18_MASK(x) ((x) << S_WL_ERR_CLK18_MASK)
+#define F_WL_ERR_CLK18_MASK    V_WL_ERR_CLK18_MASK(1U)
+
+#define S_WL_ERR_CLK20_MASK    13
+#define V_WL_ERR_CLK20_MASK(x) ((x) << S_WL_ERR_CLK20_MASK)
+#define F_WL_ERR_CLK20_MASK    V_WL_ERR_CLK20_MASK(1U)
+
+#define S_WR_ERR_CLK22_MASK    12
+#define V_WR_ERR_CLK22_MASK(x) ((x) << S_WR_ERR_CLK22_MASK)
+#define F_WR_ERR_CLK22_MASK    V_WR_ERR_CLK22_MASK(1U)
+
+#define S_VALID_NS_BIG_L_MASK    7
+#define V_VALID_NS_BIG_L_MASK(x) ((x) << S_VALID_NS_BIG_L_MASK)
+#define F_VALID_NS_BIG_L_MASK    V_VALID_NS_BIG_L_MASK(1U)
+
+#define S_INVALID_NS_SMALL_L_MASK    6
+#define V_INVALID_NS_SMALL_L_MASK(x) ((x) << S_INVALID_NS_SMALL_L_MASK)
+#define F_INVALID_NS_SMALL_L_MASK    V_INVALID_NS_SMALL_L_MASK(1U)
+
+#define S_VALID_NS_BIG_R_MASK    5
+#define V_VALID_NS_BIG_R_MASK(x) ((x) << S_VALID_NS_BIG_R_MASK)
+#define F_VALID_NS_BIG_R_MASK    V_VALID_NS_BIG_R_MASK(1U)
+
+#define S_INVALID_NS_BIG_R_MASK    4
+#define V_INVALID_NS_BIG_R_MASK(x) ((x) << S_INVALID_NS_BIG_R_MASK)
+#define F_INVALID_NS_BIG_R_MASK    V_INVALID_NS_BIG_R_MASK(1U)
+
+#define S_VALID_NS_JUMP_BACK_MASK    3
+#define V_VALID_NS_JUMP_BACK_MASK(x) ((x) << S_VALID_NS_JUMP_BACK_MASK)
+#define F_VALID_NS_JUMP_BACK_MASK    V_VALID_NS_JUMP_BACK_MASK(1U)
+
+#define S_INVALID_NS_SMALL_R_MASK    2
+#define V_INVALID_NS_SMALL_R_MASK(x) ((x) << S_INVALID_NS_SMALL_R_MASK)
+#define F_INVALID_NS_SMALL_R_MASK    V_INVALID_NS_SMALL_R_MASK(1U)
+
+#define S_OFFSET_ERR_MASK    1
+#define V_OFFSET_ERR_MASK(x) ((x) << S_OFFSET_ERR_MASK)
+#define F_OFFSET_ERR_MASK    V_OFFSET_ERR_MASK(1U)
+
+#define S_DQS_REC_LOW_POWER    11
+#define V_DQS_REC_LOW_POWER(x) ((x) << S_DQS_REC_LOW_POWER)
+#define F_DQS_REC_LOW_POWER    V_DQS_REC_LOW_POWER(1U)
+
+#define S_DQ_REC_LOW_POWER    10
+#define V_DQ_REC_LOW_POWER(x) ((x) << S_DQ_REC_LOW_POWER)
+#define F_DQ_REC_LOW_POWER    V_DQ_REC_LOW_POWER(1U)
+
+#define S_ADVANCE_PR_VALUE    0
+#define V_ADVANCE_PR_VALUE(x) ((x) << S_ADVANCE_PR_VALUE)
+#define F_ADVANCE_PR_VALUE    V_ADVANCE_PR_VALUE(1U)
+
+#define A_MC_DDRPHY_DP18_DFT_WRAP_STATUS 0x44074
+
+#define S_CHECKER_RESET    14
+#define V_CHECKER_RESET(x) ((x) << S_CHECKER_RESET)
+#define F_CHECKER_RESET    V_CHECKER_RESET(1U)
+
+#define S_DP18_DFT_SYNC    6
+#define M_DP18_DFT_SYNC    0x3fU
+#define V_DP18_DFT_SYNC(x) ((x) << S_DP18_DFT_SYNC)
+#define G_DP18_DFT_SYNC(x) (((x) >> S_DP18_DFT_SYNC) & M_DP18_DFT_SYNC)
+
+#define S_ERROR    0
+#define M_ERROR    0x3fU
+#define V_ERROR(x) ((x) << S_ERROR)
+#define G_ERROR(x) (((x) >> S_ERROR) & M_ERROR)
+
+#define S_CHECKER_ENABLE    15
+#define V_CHECKER_ENABLE(x) ((x) << S_CHECKER_ENABLE)
+#define F_CHECKER_ENABLE    V_CHECKER_ENABLE(1U)
+
+#define S_DP18_DFT_ERROR    0
+#define M_DP18_DFT_ERROR    0x3fU
+#define V_DP18_DFT_ERROR(x) ((x) << S_DP18_DFT_ERROR)
+#define G_DP18_DFT_ERROR(x) (((x) >> S_DP18_DFT_ERROR) & M_DP18_DFT_ERROR)
+
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG0 0x44078
+
+#define S_SYSCLK_RDCLK_OFFSET    8
+#define M_SYSCLK_RDCLK_OFFSET    0x7fU
+#define V_SYSCLK_RDCLK_OFFSET(x) ((x) << S_SYSCLK_RDCLK_OFFSET)
+#define G_SYSCLK_RDCLK_OFFSET(x) (((x) >> S_SYSCLK_RDCLK_OFFSET) & M_SYSCLK_RDCLK_OFFSET)
+
+#define S_SYSCLK_DQSCLK_OFFSET    0
+#define M_SYSCLK_DQSCLK_OFFSET    0x7fU
+#define V_SYSCLK_DQSCLK_OFFSET(x) ((x) << S_SYSCLK_DQSCLK_OFFSET)
+#define G_SYSCLK_DQSCLK_OFFSET(x) (((x) >> S_SYSCLK_DQSCLK_OFFSET) & M_SYSCLK_DQSCLK_OFFSET)
+
+#define S_T6_SYSCLK_DQSCLK_OFFSET    8
+#define M_T6_SYSCLK_DQSCLK_OFFSET    0x7fU
+#define V_T6_SYSCLK_DQSCLK_OFFSET(x) ((x) << S_T6_SYSCLK_DQSCLK_OFFSET)
+#define G_T6_SYSCLK_DQSCLK_OFFSET(x) (((x) >> S_T6_SYSCLK_DQSCLK_OFFSET) & M_T6_SYSCLK_DQSCLK_OFFSET)
+
+#define S_T6_SYSCLK_RDCLK_OFFSET    0
+#define M_T6_SYSCLK_RDCLK_OFFSET    0x7fU
+#define V_T6_SYSCLK_RDCLK_OFFSET(x) ((x) << S_T6_SYSCLK_RDCLK_OFFSET)
+#define G_T6_SYSCLK_RDCLK_OFFSET(x) (((x) >> S_T6_SYSCLK_RDCLK_OFFSET) & M_T6_SYSCLK_RDCLK_OFFSET)
+
+#define A_MC_DDRPHY_DP18_WRCLK_AUX_CNTL 0x4407c
+#define A_MC_DDRPHY_DP18_DQSCLK_PR0_RANK_PAIR 0x440c0
+
+#define S_DQSCLK_ROT_CLK_N0_N2    8
+#define M_DQSCLK_ROT_CLK_N0_N2    0x7fU
+#define V_DQSCLK_ROT_CLK_N0_N2(x) ((x) << S_DQSCLK_ROT_CLK_N0_N2)
+#define G_DQSCLK_ROT_CLK_N0_N2(x) (((x) >> S_DQSCLK_ROT_CLK_N0_N2) & M_DQSCLK_ROT_CLK_N0_N2)
+
+#define S_DQSCLK_ROT_CLK_N1_N3    0
+#define M_DQSCLK_ROT_CLK_N1_N3    0x7fU
+#define V_DQSCLK_ROT_CLK_N1_N3(x) ((x) << S_DQSCLK_ROT_CLK_N1_N3)
+#define G_DQSCLK_ROT_CLK_N1_N3(x) (((x) >> S_DQSCLK_ROT_CLK_N1_N3) & M_DQSCLK_ROT_CLK_N1_N3)
+
+#define A_MC_DDRPHY_DP18_DQSCLK_PR1_RANK_PAIR 0x440c4
+#define A_MC_DDRPHY_DP18_PATTERN_POS_0 0x440c8
+
+#define S_MEMINTD00_POS    14
+#define M_MEMINTD00_POS    0x3U
+#define V_MEMINTD00_POS(x) ((x) << S_MEMINTD00_POS)
+#define G_MEMINTD00_POS(x) (((x) >> S_MEMINTD00_POS) & M_MEMINTD00_POS)
+
+#define S_MEMINTD01_PO    12
+#define M_MEMINTD01_PO    0x3U
+#define V_MEMINTD01_PO(x) ((x) << S_MEMINTD01_PO)
+#define G_MEMINTD01_PO(x) (((x) >> S_MEMINTD01_PO) & M_MEMINTD01_PO)
+
+#define S_MEMINTD02_POS    10
+#define M_MEMINTD02_POS    0x3U
+#define V_MEMINTD02_POS(x) ((x) << S_MEMINTD02_POS)
+#define G_MEMINTD02_POS(x) (((x) >> S_MEMINTD02_POS) & M_MEMINTD02_POS)
+
+#define S_MEMINTD03_POS    8
+#define M_MEMINTD03_POS    0x3U
+#define V_MEMINTD03_POS(x) ((x) << S_MEMINTD03_POS)
+#define G_MEMINTD03_POS(x) (((x) >> S_MEMINTD03_POS) & M_MEMINTD03_POS)
+
+#define S_MEMINTD04_POS    6
+#define M_MEMINTD04_POS    0x3U
+#define V_MEMINTD04_POS(x) ((x) << S_MEMINTD04_POS)
+#define G_MEMINTD04_POS(x) (((x) >> S_MEMINTD04_POS) & M_MEMINTD04_POS)
+
+#define S_MEMINTD05_POS    4
+#define M_MEMINTD05_POS    0x3U
+#define V_MEMINTD05_POS(x) ((x) << S_MEMINTD05_POS)
+#define G_MEMINTD05_POS(x) (((x) >> S_MEMINTD05_POS) & M_MEMINTD05_POS)
+
+#define S_MEMINTD06_POS    2
+#define M_MEMINTD06_POS    0x3U
+#define V_MEMINTD06_POS(x) ((x) << S_MEMINTD06_POS)
+#define G_MEMINTD06_POS(x) (((x) >> S_MEMINTD06_POS) & M_MEMINTD06_POS)
+
+#define S_MEMINTD07_POS    0
+#define M_MEMINTD07_POS    0x3U
+#define V_MEMINTD07_POS(x) ((x) << S_MEMINTD07_POS)
+#define G_MEMINTD07_POS(x) (((x) >> S_MEMINTD07_POS) & M_MEMINTD07_POS)
+
+#define A_MC_DDRPHY_DP18_PATTERN_POS_1 0x440cc
+
+#define S_MEMINTD08_POS    14
+#define M_MEMINTD08_POS    0x3U
+#define V_MEMINTD08_POS(x) ((x) << S_MEMINTD08_POS)
+#define G_MEMINTD08_POS(x) (((x) >> S_MEMINTD08_POS) & M_MEMINTD08_POS)
+
+#define S_MEMINTD09_POS    12
+#define M_MEMINTD09_POS    0x3U
+#define V_MEMINTD09_POS(x) ((x) << S_MEMINTD09_POS)
+#define G_MEMINTD09_POS(x) (((x) >> S_MEMINTD09_POS) & M_MEMINTD09_POS)
+
+#define S_MEMINTD10_POS    10
+#define M_MEMINTD10_POS    0x3U
+#define V_MEMINTD10_POS(x) ((x) << S_MEMINTD10_POS)
+#define G_MEMINTD10_POS(x) (((x) >> S_MEMINTD10_POS) & M_MEMINTD10_POS)
+
+#define S_MEMINTD11_POS    8
+#define M_MEMINTD11_POS    0x3U
+#define V_MEMINTD11_POS(x) ((x) << S_MEMINTD11_POS)
+#define G_MEMINTD11_POS(x) (((x) >> S_MEMINTD11_POS) & M_MEMINTD11_POS)
+
+#define S_MEMINTD12_POS    6
+#define M_MEMINTD12_POS    0x3U
+#define V_MEMINTD12_POS(x) ((x) << S_MEMINTD12_POS)
+#define G_MEMINTD12_POS(x) (((x) >> S_MEMINTD12_POS) & M_MEMINTD12_POS)
+
+#define S_MEMINTD13_POS    4
+#define M_MEMINTD13_POS    0x3U
+#define V_MEMINTD13_POS(x) ((x) << S_MEMINTD13_POS)
+#define G_MEMINTD13_POS(x) (((x) >> S_MEMINTD13_POS) & M_MEMINTD13_POS)
+
+#define S_MEMINTD14_POS    2
+#define M_MEMINTD14_POS    0x3U
+#define V_MEMINTD14_POS(x) ((x) << S_MEMINTD14_POS)
+#define G_MEMINTD14_POS(x) (((x) >> S_MEMINTD14_POS) & M_MEMINTD14_POS)
+
+#define S_MEMINTD15_POS    0
+#define M_MEMINTD15_POS    0x3U
+#define V_MEMINTD15_POS(x) ((x) << S_MEMINTD15_POS)
+#define G_MEMINTD15_POS(x) (((x) >> S_MEMINTD15_POS) & M_MEMINTD15_POS)
+
+#define A_MC_DDRPHY_DP18_PATTERN_POS_2 0x440d0
+
+#define S_MEMINTD16_POS    14
+#define M_MEMINTD16_POS    0x3U
+#define V_MEMINTD16_POS(x) ((x) << S_MEMINTD16_POS)
+#define G_MEMINTD16_POS(x) (((x) >> S_MEMINTD16_POS) & M_MEMINTD16_POS)
+
+#define S_MEMINTD17_POS    12
+#define M_MEMINTD17_POS    0x3U
+#define V_MEMINTD17_POS(x) ((x) << S_MEMINTD17_POS)
+#define G_MEMINTD17_POS(x) (((x) >> S_MEMINTD17_POS) & M_MEMINTD17_POS)
+
+#define S_MEMINTD18_POS    10
+#define M_MEMINTD18_POS    0x3U
+#define V_MEMINTD18_POS(x) ((x) << S_MEMINTD18_POS)
+#define G_MEMINTD18_POS(x) (((x) >> S_MEMINTD18_POS) & M_MEMINTD18_POS)
+
+#define S_MEMINTD19_POS    8
+#define M_MEMINTD19_POS    0x3U
+#define V_MEMINTD19_POS(x) ((x) << S_MEMINTD19_POS)
+#define G_MEMINTD19_POS(x) (((x) >> S_MEMINTD19_POS) & M_MEMINTD19_POS)
+
+#define S_MEMINTD20_POS    6
+#define M_MEMINTD20_POS    0x3U
+#define V_MEMINTD20_POS(x) ((x) << S_MEMINTD20_POS)
+#define G_MEMINTD20_POS(x) (((x) >> S_MEMINTD20_POS) & M_MEMINTD20_POS)
+
+#define S_MEMINTD21_POS    4
+#define M_MEMINTD21_POS    0x3U
+#define V_MEMINTD21_POS(x) ((x) << S_MEMINTD21_POS)
+#define G_MEMINTD21_POS(x) (((x) >> S_MEMINTD21_POS) & M_MEMINTD21_POS)
+
+#define S_MEMINTD22_POS    2
+#define M_MEMINTD22_POS    0x3U
+#define V_MEMINTD22_POS(x) ((x) << S_MEMINTD22_POS)
+#define G_MEMINTD22_POS(x) (((x) >> S_MEMINTD22_POS) & M_MEMINTD22_POS)
+
+#define S_MEMINTD23_POS    0
+#define M_MEMINTD23_POS    0x3U
+#define V_MEMINTD23_POS(x) ((x) << S_MEMINTD23_POS)
+#define G_MEMINTD23_POS(x) (((x) >> S_MEMINTD23_POS) & M_MEMINTD23_POS)
+
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG1 0x440d4
+
+#define S_DQS_ALIGN_SM    11
+#define M_DQS_ALIGN_SM    0x1fU
+#define V_DQS_ALIGN_SM(x) ((x) << S_DQS_ALIGN_SM)
+#define G_DQS_ALIGN_SM(x) (((x) >> S_DQS_ALIGN_SM) & M_DQS_ALIGN_SM)
+
+#define S_DQS_ALIGN_CNTR    7
+#define M_DQS_ALIGN_CNTR    0xfU
+#define V_DQS_ALIGN_CNTR(x) ((x) << S_DQS_ALIGN_CNTR)
+#define G_DQS_ALIGN_CNTR(x) (((x) >> S_DQS_ALIGN_CNTR) & M_DQS_ALIGN_CNTR)
+
+#define S_ITERATION_CNTR    6
+#define V_ITERATION_CNTR(x) ((x) << S_ITERATION_CNTR)
+#define F_ITERATION_CNTR    V_ITERATION_CNTR(1U)
+
+#define S_DQS_ALIGN_ITER_CNTR    0
+#define M_DQS_ALIGN_ITER_CNTR    0x3fU
+#define V_DQS_ALIGN_ITER_CNTR(x) ((x) << S_DQS_ALIGN_ITER_CNTR)
+#define G_DQS_ALIGN_ITER_CNTR(x) (((x) >> S_DQS_ALIGN_ITER_CNTR) & M_DQS_ALIGN_ITER_CNTR)
+
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG2 0x440d8
+
+#define S_CALIBRATE_BIT    13
+#define M_CALIBRATE_BIT    0x7U
+#define V_CALIBRATE_BIT(x) ((x) << S_CALIBRATE_BIT)
+#define G_CALIBRATE_BIT(x) (((x) >> S_CALIBRATE_BIT) & M_CALIBRATE_BIT)
+
+#define S_DQS_ALIGN_QUAD    11
+#define M_DQS_ALIGN_QUAD    0x3U
+#define V_DQS_ALIGN_QUAD(x) ((x) << S_DQS_ALIGN_QUAD)
+#define G_DQS_ALIGN_QUAD(x) (((x) >> S_DQS_ALIGN_QUAD) & M_DQS_ALIGN_QUAD)
+
+#define S_DQS_QUAD_CONFIG    8
+#define M_DQS_QUAD_CONFIG    0x7U
+#define V_DQS_QUAD_CONFIG(x) ((x) << S_DQS_QUAD_CONFIG)
+#define G_DQS_QUAD_CONFIG(x) (((x) >> S_DQS_QUAD_CONFIG) & M_DQS_QUAD_CONFIG)
+
+#define S_OPERATE_MODE    4
+#define M_OPERATE_MODE    0xfU
+#define V_OPERATE_MODE(x) ((x) << S_OPERATE_MODE)
+#define G_OPERATE_MODE(x) (((x) >> S_OPERATE_MODE) & M_OPERATE_MODE)
+
+#define S_EN_DQS_OFFSET    3
+#define V_EN_DQS_OFFSET(x) ((x) << S_EN_DQS_OFFSET)
+#define F_EN_DQS_OFFSET    V_EN_DQS_OFFSET(1U)
+
+#define S_DQS_ALIGN_JITTER    2
+#define V_DQS_ALIGN_JITTER(x) ((x) << S_DQS_ALIGN_JITTER)
+#define F_DQS_ALIGN_JITTER    V_DQS_ALIGN_JITTER(1U)
+
+#define S_DIS_CLK_GATE    1
+#define V_DIS_CLK_GATE(x) ((x) << S_DIS_CLK_GATE)
+#define F_DIS_CLK_GATE    V_DIS_CLK_GATE(1U)
+
+#define S_MAX_DQS_ITER    0
+#define V_MAX_DQS_ITER(x) ((x) << S_MAX_DQS_ITER)
+#define F_MAX_DQS_ITER    V_MAX_DQS_ITER(1U)
+
+#define A_MC_DDRPHY_DP18_DQSCLK_OFFSET 0x440dc
+
+#define S_DQS_OFFSET    8
+#define M_DQS_OFFSET    0x7fU
+#define V_DQS_OFFSET(x) ((x) << S_DQS_OFFSET)
+#define G_DQS_OFFSET(x) (((x) >> S_DQS_OFFSET) & M_DQS_OFFSET)
+
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_0_RP 0x440e0
+
+#define S_WR_DELAY    6
+#define M_WR_DELAY    0x3ffU
+#define V_WR_DELAY(x) ((x) << S_WR_DELAY)
+#define G_WR_DELAY(x) (((x) >> S_WR_DELAY) & M_WR_DELAY)
+
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_1_RP 0x440e4
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_2_RP 0x440e8
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_3_RP 0x440ec
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_4_RP 0x440f0
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_5_RP 0x440f4
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_6_RP 0x440f8
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_7_RP 0x440fc
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_8_RP 0x44100
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_9_RP 0x44104
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_10_RP 0x44108
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_11_RP 0x4410c
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_12_RP 0x44110
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_13_RP 0x44114
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_14_RP 0x44118
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_15_RP 0x4411c
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_16_RP 0x44120
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_17_RP 0x44124
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_18_RP 0x44128
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_19_RP 0x4412c
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_20_RP 0x44130
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_21_RP 0x44134
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_22_RP 0x44138
+#define A_MC_DDRPHY_DP18_WR_DELAY_VALUE_23_RP 0x4413c
+#define A_MC_DDRPHY_DP18_READ_DELAY0_RANK_PAIR 0x44140
+
+#define S_RD_DELAY_BITS0_6    9
+#define M_RD_DELAY_BITS0_6    0x7fU
+#define V_RD_DELAY_BITS0_6(x) ((x) << S_RD_DELAY_BITS0_6)
+#define G_RD_DELAY_BITS0_6(x) (((x) >> S_RD_DELAY_BITS0_6) & M_RD_DELAY_BITS0_6)
+
+#define S_RD_DELAY_BITS8_14    1
+#define M_RD_DELAY_BITS8_14    0x7fU
+#define V_RD_DELAY_BITS8_14(x) ((x) << S_RD_DELAY_BITS8_14)
+#define G_RD_DELAY_BITS8_14(x) (((x) >> S_RD_DELAY_BITS8_14) & M_RD_DELAY_BITS8_14)
+
+#define A_MC_DDRPHY_DP18_READ_DELAY1_RANK_PAIR 0x44144
+#define A_MC_DDRPHY_DP18_READ_DELAY2_RANK_PAIR 0x44148
+#define A_MC_DDRPHY_DP18_READ_DELAY3_RANK_PAIR 0x4414c
+#define A_MC_DDRPHY_DP18_READ_DELAY4_RANK_PAIR 0x44150
+#define A_MC_DDRPHY_DP18_READ_DELAY5_RANK_PAIR 0x44154
+#define A_MC_DDRPHY_DP18_READ_DELAY6_RANK_PAIR 0x44158
+#define A_MC_DDRPHY_DP18_READ_DELAY7_RANK_PAIR 0x4415c
+#define A_MC_DDRPHY_DP18_READ_DELAY8_RANK_PAIR 0x44160
+#define A_MC_DDRPHY_DP18_READ_DELAY9_RANK_PAIR 0x44164
+#define A_MC_DDRPHY_DP18_READ_DELAY10_RANK_PAIR 0x44168
+#define A_MC_DDRPHY_DP18_READ_DELAY11_RANK_PAIR 0x4416c
+#define A_MC_DDRPHY_DP18_INITIAL_DQS_ALIGN0_RANK_PAIR 0x44170
+
+#define S_INITIAL_DQS_ROT_N0_N2    8
+#define M_INITIAL_DQS_ROT_N0_N2    0x7fU
+#define V_INITIAL_DQS_ROT_N0_N2(x) ((x) << S_INITIAL_DQS_ROT_N0_N2)
+#define G_INITIAL_DQS_ROT_N0_N2(x) (((x) >> S_INITIAL_DQS_ROT_N0_N2) & M_INITIAL_DQS_ROT_N0_N2)
+
+#define S_INITIAL_DQS_ROT_N1_N3    0
+#define M_INITIAL_DQS_ROT_N1_N3    0x7fU
+#define V_INITIAL_DQS_ROT_N1_N3(x) ((x) << S_INITIAL_DQS_ROT_N1_N3)
+#define G_INITIAL_DQS_ROT_N1_N3(x) (((x) >> S_INITIAL_DQS_ROT_N1_N3) & M_INITIAL_DQS_ROT_N1_N3)
+
+#define A_MC_DDRPHY_DP18_INITIAL_DQS_ALIGN1_RANK_PAIR 0x44174
+#define A_MC_DDRPHY_DP18_WRCLK_STATUS 0x44178
+
+#define S_WRCLK_CALIB_DONE    15
+#define V_WRCLK_CALIB_DONE(x) ((x) << S_WRCLK_CALIB_DONE)
+#define F_WRCLK_CALIB_DONE    V_WRCLK_CALIB_DONE(1U)
+
+#define S_VALUE_UPDATED    14
+#define V_VALUE_UPDATED(x) ((x) << S_VALUE_UPDATED)
+#define F_VALUE_UPDATED    V_VALUE_UPDATED(1U)
+
+#define S_FAIL_PASS_V    13
+#define V_FAIL_PASS_V(x) ((x) << S_FAIL_PASS_V)
+#define F_FAIL_PASS_V    V_FAIL_PASS_V(1U)
+
+#define S_PASS_FAIL_V    12
+#define V_PASS_FAIL_V(x) ((x) << S_PASS_FAIL_V)
+#define F_PASS_FAIL_V    V_PASS_FAIL_V(1U)
+
+#define S_FP_PF_EDGE_NF    11
+#define V_FP_PF_EDGE_NF(x) ((x) << S_FP_PF_EDGE_NF)
+#define F_FP_PF_EDGE_NF    V_FP_PF_EDGE_NF(1U)
+
+#define S_NON_SYMETRIC    10
+#define V_NON_SYMETRIC(x) ((x) << S_NON_SYMETRIC)
+#define F_NON_SYMETRIC    V_NON_SYMETRIC(1U)
+
+#define S_FULL_RANGE    8
+#define V_FULL_RANGE(x) ((x) << S_FULL_RANGE)
+#define F_FULL_RANGE    V_FULL_RANGE(1U)
+
+#define S_QUAD3_EDGES    7
+#define V_QUAD3_EDGES(x) ((x) << S_QUAD3_EDGES)
+#define F_QUAD3_EDGES    V_QUAD3_EDGES(1U)
+
+#define S_QUAD2_EDGES    6
+#define V_QUAD2_EDGES(x) ((x) << S_QUAD2_EDGES)
+#define F_QUAD2_EDGES    V_QUAD2_EDGES(1U)
+
+#define S_QUAD1_EDGES    5
+#define V_QUAD1_EDGES(x) ((x) << S_QUAD1_EDGES)
+#define F_QUAD1_EDGES    V_QUAD1_EDGES(1U)
+
+#define S_QUAD0_EDGES    4
+#define V_QUAD0_EDGES(x) ((x) << S_QUAD0_EDGES)
+#define F_QUAD0_EDGES    V_QUAD0_EDGES(1U)
+
+#define S_QUAD3_CAVEAT    3
+#define V_QUAD3_CAVEAT(x) ((x) << S_QUAD3_CAVEAT)
+#define F_QUAD3_CAVEAT    V_QUAD3_CAVEAT(1U)
+
+#define S_QUAD2_CAVEAT    2
+#define V_QUAD2_CAVEAT(x) ((x) << S_QUAD2_CAVEAT)
+#define F_QUAD2_CAVEAT    V_QUAD2_CAVEAT(1U)
+
+#define S_QUAD1_CAVEAT    1
+#define V_QUAD1_CAVEAT(x) ((x) << S_QUAD1_CAVEAT)
+#define F_QUAD1_CAVEAT    V_QUAD1_CAVEAT(1U)
+
+#define S_QUAD0_CAVEAT    0
+#define V_QUAD0_CAVEAT(x) ((x) << S_QUAD0_CAVEAT)
+#define F_QUAD0_CAVEAT    V_QUAD0_CAVEAT(1U)
+
+#define A_MC_DDRPHY_DP18_WRCLK_EDGE 0x4417c
+
+#define S_FAIL_PASS_VALUE    8
+#define M_FAIL_PASS_VALUE    0x7fU
+#define V_FAIL_PASS_VALUE(x) ((x) << S_FAIL_PASS_VALUE)
+#define G_FAIL_PASS_VALUE(x) (((x) >> S_FAIL_PASS_VALUE) & M_FAIL_PASS_VALUE)
+
+#define S_PASS_FAIL_VALUE    0
+#define M_PASS_FAIL_VALUE    0xffU
+#define V_PASS_FAIL_VALUE(x) ((x) << S_PASS_FAIL_VALUE)
+#define G_PASS_FAIL_VALUE(x) (((x) >> S_PASS_FAIL_VALUE) & M_PASS_FAIL_VALUE)
+
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE0_RANK_PAIR 0x44180
+
+#define S_RD_EYE_SIZE_BITS2_7    8
+#define M_RD_EYE_SIZE_BITS2_7    0x3fU
+#define V_RD_EYE_SIZE_BITS2_7(x) ((x) << S_RD_EYE_SIZE_BITS2_7)
+#define G_RD_EYE_SIZE_BITS2_7(x) (((x) >> S_RD_EYE_SIZE_BITS2_7) & M_RD_EYE_SIZE_BITS2_7)
+
+#define S_RD_EYE_SIZE_BITS10_15    0
+#define M_RD_EYE_SIZE_BITS10_15    0x3fU
+#define V_RD_EYE_SIZE_BITS10_15(x) ((x) << S_RD_EYE_SIZE_BITS10_15)
+#define G_RD_EYE_SIZE_BITS10_15(x) (((x) >> S_RD_EYE_SIZE_BITS10_15) & M_RD_EYE_SIZE_BITS10_15)
+
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE1_RANK_PAIR 0x44184
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE2_RANK_PAIR 0x44188
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE3_RANK_PAIR 0x4418c
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE4_RANK_PAIR 0x44190
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE5_RANK_PAIR 0x44194
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE6_RANK_PAIR 0x44198
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE7_RANK_PAIR 0x4419c
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE8_RANK_PAIR 0x441a0
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE9_RANK_PAIR 0x441a4
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE10_RANK_PAIR 0x441a8
+#define A_MC_DDRPHY_DP18_READ_EYE_SIZE11_RANK_PAIR 0x441ac
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG3 0x441b4
+
+#define S_DESIRED_EDGE_CNTR_TARGET_HIGH    8
+#define M_DESIRED_EDGE_CNTR_TARGET_HIGH    0xffU
+#define V_DESIRED_EDGE_CNTR_TARGET_HIGH(x) ((x) << S_DESIRED_EDGE_CNTR_TARGET_HIGH)
+#define G_DESIRED_EDGE_CNTR_TARGET_HIGH(x) (((x) >> S_DESIRED_EDGE_CNTR_TARGET_HIGH) & M_DESIRED_EDGE_CNTR_TARGET_HIGH)
+
+#define S_DESIRED_EDGE_CNTR_TARGET_LOW    0
+#define M_DESIRED_EDGE_CNTR_TARGET_LOW    0xffU
+#define V_DESIRED_EDGE_CNTR_TARGET_LOW(x) ((x) << S_DESIRED_EDGE_CNTR_TARGET_LOW)
+#define G_DESIRED_EDGE_CNTR_TARGET_LOW(x) (((x) >> S_DESIRED_EDGE_CNTR_TARGET_LOW) & M_DESIRED_EDGE_CNTR_TARGET_LOW)
+
+#define A_MC_DDRPHY_DP18_RD_DIA_CONFIG4 0x441b8
+
+#define S_APPROACH_ALIGNMENT    15
+#define V_APPROACH_ALIGNMENT(x) ((x) << S_APPROACH_ALIGNMENT)
+#define F_APPROACH_ALIGNMENT    V_APPROACH_ALIGNMENT(1U)
+
+#define A_MC_DDRPHY_DP18_DELAY_LINE_PWR_CTL 0x441bc
+
+#define S_QUAD0_PWR_CTL    12
+#define M_QUAD0_PWR_CTL    0xfU
+#define V_QUAD0_PWR_CTL(x) ((x) << S_QUAD0_PWR_CTL)
+#define G_QUAD0_PWR_CTL(x) (((x) >> S_QUAD0_PWR_CTL) & M_QUAD0_PWR_CTL)
+
+#define S_QUAD1_PWR_CTL    8
+#define M_QUAD1_PWR_CTL    0xfU
+#define V_QUAD1_PWR_CTL(x) ((x) << S_QUAD1_PWR_CTL)
+#define G_QUAD1_PWR_CTL(x) (((x) >> S_QUAD1_PWR_CTL) & M_QUAD1_PWR_CTL)
+
+#define S_QUAD2_PWR_CTL    4
+#define M_QUAD2_PWR_CTL    0xfU
+#define V_QUAD2_PWR_CTL(x) ((x) << S_QUAD2_PWR_CTL)
+#define G_QUAD2_PWR_CTL(x) (((x) >> S_QUAD2_PWR_CTL) & M_QUAD2_PWR_CTL)
+
+#define S_QUAD3_PWR_CTL    0
+#define M_QUAD3_PWR_CTL    0xfU
+#define V_QUAD3_PWR_CTL(x) ((x) << S_QUAD3_PWR_CTL)
+#define G_QUAD3_PWR_CTL(x) (((x) >> S_QUAD3_PWR_CTL) & M_QUAD3_PWR_CTL)
+
+#define A_MC_DDRPHY_DP18_READ_TIMING_REFERENCE0 0x441c0
+
+#define S_REFERENCE_BITS1_7    8
+#define M_REFERENCE_BITS1_7    0x7fU
+#define V_REFERENCE_BITS1_7(x) ((x) << S_REFERENCE_BITS1_7)
+#define G_REFERENCE_BITS1_7(x) (((x) >> S_REFERENCE_BITS1_7) & M_REFERENCE_BITS1_7)
+
+#define S_REFERENCE_BITS9_15    0
+#define M_REFERENCE_BITS9_15    0x7fU
+#define V_REFERENCE_BITS9_15(x) ((x) << S_REFERENCE_BITS9_15)
+#define G_REFERENCE_BITS9_15(x) (((x) >> S_REFERENCE_BITS9_15) & M_REFERENCE_BITS9_15)
+
+#define A_MC_DDRPHY_DP18_READ_TIMING_REFERENCE1 0x441c4
+#define A_MC_DDRPHY_DP18_READ_DQS_TIMING_REFERENCE 0x441c8
+
+#define S_REFERENCE    8
+#define M_REFERENCE    0x7fU
+#define V_REFERENCE(x) ((x) << S_REFERENCE)
+#define G_REFERENCE(x) (((x) >> S_REFERENCE) & M_REFERENCE)
+
+#define A_MC_DDRPHY_DP18_SYSCLK_PR_VALUE 0x441cc
+#define A_MC_DDRPHY_DP18_WRCLK_PR 0x441d0
+#define A_MC_DDRPHY_DP18_IO_TX_CONFIG0 0x441d4
+
+#define S_INTERP_SIG_SLEW    12
+#define M_INTERP_SIG_SLEW    0xfU
+#define V_INTERP_SIG_SLEW(x) ((x) << S_INTERP_SIG_SLEW)
+#define G_INTERP_SIG_SLEW(x) (((x) >> S_INTERP_SIG_SLEW) & M_INTERP_SIG_SLEW)
+
+#define S_POST_CURSOR    8
+#define M_POST_CURSOR    0xfU
+#define V_POST_CURSOR(x) ((x) << S_POST_CURSOR)
+#define G_POST_CURSOR(x) (((x) >> S_POST_CURSOR) & M_POST_CURSOR)
+
+#define S_SLEW_CTL    4
+#define M_SLEW_CTL    0xfU
+#define V_SLEW_CTL(x) ((x) << S_SLEW_CTL)
+#define G_SLEW_CTL(x) (((x) >> S_SLEW_CTL) & M_SLEW_CTL)
+
+#define A_MC_DDRPHY_DP18_PLL_CONFIG0 0x441d8
+#define A_MC_DDRPHY_DP18_PLL_CONFIG1 0x441dc
+
+#define S_CE0DLTVCCA    7
+#define V_CE0DLTVCCA(x) ((x) << S_CE0DLTVCCA)
+#define F_CE0DLTVCCA    V_CE0DLTVCCA(1U)
+
+#define S_CE0DLTVCCD1    4
+#define V_CE0DLTVCCD1(x) ((x) << S_CE0DLTVCCD1)
+#define F_CE0DLTVCCD1    V_CE0DLTVCCD1(1U)
+
+#define S_CE0DLTVCCD2    3
+#define V_CE0DLTVCCD2(x) ((x) << S_CE0DLTVCCD2)
+#define F_CE0DLTVCCD2    V_CE0DLTVCCD2(1U)
+
+#define S_S0INSDLYTAP    2
+#define V_S0INSDLYTAP(x) ((x) << S_S0INSDLYTAP)
+#define F_S0INSDLYTAP    V_S0INSDLYTAP(1U)
+
+#define S_S1INSDLYTAP    1
+#define V_S1INSDLYTAP(x) ((x) << S_S1INSDLYTAP)
+#define F_S1INSDLYTAP    V_S1INSDLYTAP(1U)
+
+#define A_MC_DDRPHY_DP18_IO_TX_NFET_SLICE 0x441e0
+
+#define S_EN_SLICE_N_WR    8
+#define M_EN_SLICE_N_WR    0xffU
+#define V_EN_SLICE_N_WR(x) ((x) << S_EN_SLICE_N_WR)
+#define G_EN_SLICE_N_WR(x) (((x) >> S_EN_SLICE_N_WR) & M_EN_SLICE_N_WR)
+
+#define A_MC_DDRPHY_DP18_IO_TX_PFET_SLICE 0x441e4
+#define A_MC_DDRPHY_DP18_IO_TX_NFET_TERM 0x441e8
+
+#define S_EN_TERM_N_WR    8
+#define M_EN_TERM_N_WR    0xffU
+#define V_EN_TERM_N_WR(x) ((x) << S_EN_TERM_N_WR)
+#define G_EN_TERM_N_WR(x) (((x) >> S_EN_TERM_N_WR) & M_EN_TERM_N_WR)
+
+#define S_EN_TERM_N_WR_FFE    4
+#define M_EN_TERM_N_WR_FFE    0xfU
+#define V_EN_TERM_N_WR_FFE(x) ((x) << S_EN_TERM_N_WR_FFE)
+#define G_EN_TERM_N_WR_FFE(x) (((x) >> S_EN_TERM_N_WR_FFE) & M_EN_TERM_N_WR_FFE)
+
+#define A_MC_DDRPHY_DP18_IO_TX_PFET_TERM 0x441ec
+
+#define S_EN_TERM_P_WR    8
+#define M_EN_TERM_P_WR    0xffU
+#define V_EN_TERM_P_WR(x) ((x) << S_EN_TERM_P_WR)
+#define G_EN_TERM_P_WR(x) (((x) >> S_EN_TERM_P_WR) & M_EN_TERM_P_WR)
+
+#define S_EN_TERM_P_WR_FFE    4
+#define M_EN_TERM_P_WR_FFE    0xfU
+#define V_EN_TERM_P_WR_FFE(x) ((x) << S_EN_TERM_P_WR_FFE)
+#define G_EN_TERM_P_WR_FFE(x) (((x) >> S_EN_TERM_P_WR_FFE) & M_EN_TERM_P_WR_FFE)
+
+#define A_MC_DDRPHY_DP18_DATA_BIT_DISABLE0_RP 0x441f0
+
+#define S_DATA_BIT_DISABLE_0_15    0
+#define M_DATA_BIT_DISABLE_0_15    0xffffU
+#define V_DATA_BIT_DISABLE_0_15(x) ((x) << S_DATA_BIT_DISABLE_0_15)
+#define G_DATA_BIT_DISABLE_0_15(x) (((x) >> S_DATA_BIT_DISABLE_0_15) & M_DATA_BIT_DISABLE_0_15)
+
+#define A_MC_DDRPHY_DP18_DATA_BIT_DISABLE1_RP 0x441f4
+
+#define S_DATA_BIT_DISABLE_16_23    8
+#define M_DATA_BIT_DISABLE_16_23    0xffU
+#define V_DATA_BIT_DISABLE_16_23(x) ((x) << S_DATA_BIT_DISABLE_16_23)
+#define G_DATA_BIT_DISABLE_16_23(x) (((x) >> S_DATA_BIT_DISABLE_16_23) & M_DATA_BIT_DISABLE_16_23)
+
+#define A_MC_DDRPHY_DP18_DQ_WR_OFFSET_RP 0x441f8
+
+#define S_DQ_WR_OFFSET_N0    12
+#define M_DQ_WR_OFFSET_N0    0xfU
+#define V_DQ_WR_OFFSET_N0(x) ((x) << S_DQ_WR_OFFSET_N0)
+#define G_DQ_WR_OFFSET_N0(x) (((x) >> S_DQ_WR_OFFSET_N0) & M_DQ_WR_OFFSET_N0)
+
+#define S_DQ_WR_OFFSET_N1    8
+#define M_DQ_WR_OFFSET_N1    0xfU
+#define V_DQ_WR_OFFSET_N1(x) ((x) << S_DQ_WR_OFFSET_N1)
+#define G_DQ_WR_OFFSET_N1(x) (((x) >> S_DQ_WR_OFFSET_N1) & M_DQ_WR_OFFSET_N1)
+
+#define S_DQ_WR_OFFSET_N2    4
+#define M_DQ_WR_OFFSET_N2    0xfU
+#define V_DQ_WR_OFFSET_N2(x) ((x) << S_DQ_WR_OFFSET_N2)
+#define G_DQ_WR_OFFSET_N2(x) (((x) >> S_DQ_WR_OFFSET_N2) & M_DQ_WR_OFFSET_N2)
+
+#define S_DQ_WR_OFFSET_N3    0
+#define M_DQ_WR_OFFSET_N3    0xfU
+#define V_DQ_WR_OFFSET_N3(x) ((x) << S_DQ_WR_OFFSET_N3)
+#define G_DQ_WR_OFFSET_N3(x) (((x) >> S_DQ_WR_OFFSET_N3) & M_DQ_WR_OFFSET_N3)
+
+#define A_MC_DDRPHY_DP18_POWERDOWN_1 0x441fc
+
+#define S_EYEDAC_PD    13
+#define V_EYEDAC_PD(x) ((x) << S_EYEDAC_PD)
+#define F_EYEDAC_PD    V_EYEDAC_PD(1U)
+
+#define S_ANALOG_OUTPUT_STAB    9
+#define V_ANALOG_OUTPUT_STAB(x) ((x) << S_ANALOG_OUTPUT_STAB)
+#define F_ANALOG_OUTPUT_STAB    V_ANALOG_OUTPUT_STAB(1U)
+
+#define S_DP18_RX_PD    2
+#define M_DP18_RX_PD    0x3U
+#define V_DP18_RX_PD(x) ((x) << S_DP18_RX_PD)
+#define G_DP18_RX_PD(x) (((x) >> S_DP18_RX_PD) & M_DP18_RX_PD)
+
+#define S_DELAY_LINE_CTL_OVERRIDE    4
+#define V_DELAY_LINE_CTL_OVERRIDE(x) ((x) << S_DELAY_LINE_CTL_OVERRIDE)
+#define F_DELAY_LINE_CTL_OVERRIDE    V_DELAY_LINE_CTL_OVERRIDE(1U)
+
+#define S_VCC_REG_PD    0
+#define V_VCC_REG_PD(x) ((x) << S_VCC_REG_PD)
+#define F_VCC_REG_PD    V_VCC_REG_PD(1U)
+
+#define A_MC_ADR_DDRPHY_ADR_BIT_ENABLE 0x45000
+
+#define S_BIT_ENABLE_0_11    4
+#define M_BIT_ENABLE_0_11    0xfffU
+#define V_BIT_ENABLE_0_11(x) ((x) << S_BIT_ENABLE_0_11)
+#define G_BIT_ENABLE_0_11(x) (((x) >> S_BIT_ENABLE_0_11) & M_BIT_ENABLE_0_11)
+
+#define S_BIT_ENABLE_12_15    0
+#define M_BIT_ENABLE_12_15    0xfU
+#define V_BIT_ENABLE_12_15(x) ((x) << S_BIT_ENABLE_12_15)
+#define G_BIT_ENABLE_12_15(x) (((x) >> S_BIT_ENABLE_12_15) & M_BIT_ENABLE_12_15)
+
+#define A_MC_ADR_DDRPHY_ADR_DIFFPAIR_ENABLE 0x45004
+
+#define S_DI_ADR0_ADR1    15
+#define V_DI_ADR0_ADR1(x) ((x) << S_DI_ADR0_ADR1)
+#define F_DI_ADR0_ADR1    V_DI_ADR0_ADR1(1U)
+
+#define S_DI_ADR2_ADR3    14
+#define V_DI_ADR2_ADR3(x) ((x) << S_DI_ADR2_ADR3)
+#define F_DI_ADR2_ADR3    V_DI_ADR2_ADR3(1U)
+
+#define S_DI_ADR4_ADR5    13
+#define V_DI_ADR4_ADR5(x) ((x) << S_DI_ADR4_ADR5)
+#define F_DI_ADR4_ADR5    V_DI_ADR4_ADR5(1U)
+
+#define S_DI_ADR6_ADR7    12
+#define V_DI_ADR6_ADR7(x) ((x) << S_DI_ADR6_ADR7)
+#define F_DI_ADR6_ADR7    V_DI_ADR6_ADR7(1U)
+
+#define S_DI_ADR8_ADR9    11
+#define V_DI_ADR8_ADR9(x) ((x) << S_DI_ADR8_ADR9)
+#define F_DI_ADR8_ADR9    V_DI_ADR8_ADR9(1U)
+
+#define S_DI_ADR10_ADR11    10
+#define V_DI_ADR10_ADR11(x) ((x) << S_DI_ADR10_ADR11)
+#define F_DI_ADR10_ADR11    V_DI_ADR10_ADR11(1U)
+
+#define S_DI_ADR12_ADR13    9
+#define V_DI_ADR12_ADR13(x) ((x) << S_DI_ADR12_ADR13)
+#define F_DI_ADR12_ADR13    V_DI_ADR12_ADR13(1U)
+
+#define S_DI_ADR14_ADR15    8
+#define V_DI_ADR14_ADR15(x) ((x) << S_DI_ADR14_ADR15)
+#define F_DI_ADR14_ADR15    V_DI_ADR14_ADR15(1U)
+
+#define A_MC_ADR_DDRPHY_ADR_DELAY0 0x45010
+
+#define S_ADR_DELAY_BITS1_7    8
+#define M_ADR_DELAY_BITS1_7    0x7fU
+#define V_ADR_DELAY_BITS1_7(x) ((x) << S_ADR_DELAY_BITS1_7)
+#define G_ADR_DELAY_BITS1_7(x) (((x) >> S_ADR_DELAY_BITS1_7) & M_ADR_DELAY_BITS1_7)
+
+#define S_ADR_DELAY_BITS9_15    0
+#define M_ADR_DELAY_BITS9_15    0x7fU
+#define V_ADR_DELAY_BITS9_15(x) ((x) << S_ADR_DELAY_BITS9_15)
+#define G_ADR_DELAY_BITS9_15(x) (((x) >> S_ADR_DELAY_BITS9_15) & M_ADR_DELAY_BITS9_15)
+
+#define A_MC_ADR_DDRPHY_ADR_DELAY1 0x45014
+#define A_MC_ADR_DDRPHY_ADR_DELAY2 0x45018
+#define A_MC_ADR_DDRPHY_ADR_DELAY3 0x4501c
+#define A_MC_ADR_DDRPHY_ADR_DELAY4 0x45020
+#define A_MC_ADR_DDRPHY_ADR_DELAY5 0x45024
+#define A_MC_ADR_DDRPHY_ADR_DELAY6 0x45028
+#define A_MC_ADR_DDRPHY_ADR_DELAY7 0x4502c
+#define A_MC_ADR_DDRPHY_ADR_DFT_WRAP_STATUS_CONTROL 0x45030
+
+#define S_ADR_TEST_LANE_PAIR_FAIL    8
+#define M_ADR_TEST_LANE_PAIR_FAIL    0xffU
+#define V_ADR_TEST_LANE_PAIR_FAIL(x) ((x) << S_ADR_TEST_LANE_PAIR_FAIL)
+#define G_ADR_TEST_LANE_PAIR_FAIL(x) (((x) >> S_ADR_TEST_LANE_PAIR_FAIL) & M_ADR_TEST_LANE_PAIR_FAIL)
+
+#define S_ADR_TEST_DATA_EN    7
+#define V_ADR_TEST_DATA_EN(x) ((x) << S_ADR_TEST_DATA_EN)
+#define F_ADR_TEST_DATA_EN    V_ADR_TEST_DATA_EN(1U)
+
+#define S_DADR_TEST_MODE    5
+#define M_DADR_TEST_MODE    0x3U
+#define V_DADR_TEST_MODE(x) ((x) << S_DADR_TEST_MODE)
+#define G_DADR_TEST_MODE(x) (((x) >> S_DADR_TEST_MODE) & M_DADR_TEST_MODE)
+
+#define S_ADR_TEST_4TO1_MODE    4
+#define V_ADR_TEST_4TO1_MODE(x) ((x) << S_ADR_TEST_4TO1_MODE)
+#define F_ADR_TEST_4TO1_MODE    V_ADR_TEST_4TO1_MODE(1U)
+
+#define S_ADR_TEST_RESET    3
+#define V_ADR_TEST_RESET(x) ((x) << S_ADR_TEST_RESET)
+#define F_ADR_TEST_RESET    V_ADR_TEST_RESET(1U)
+
+#define S_ADR_TEST_GEN_EN    2
+#define V_ADR_TEST_GEN_EN(x) ((x) << S_ADR_TEST_GEN_EN)
+#define F_ADR_TEST_GEN_EN    V_ADR_TEST_GEN_EN(1U)
+
+#define S_ADR_TEST_CLEAR_ERROR    1
+#define V_ADR_TEST_CLEAR_ERROR(x) ((x) << S_ADR_TEST_CLEAR_ERROR)
+#define F_ADR_TEST_CLEAR_ERROR    V_ADR_TEST_CLEAR_ERROR(1U)
+
+#define S_ADR_TEST_CHECK_EN    0
+#define V_ADR_TEST_CHECK_EN(x) ((x) << S_ADR_TEST_CHECK_EN)
+#define F_ADR_TEST_CHECK_EN    V_ADR_TEST_CHECK_EN(1U)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN0 0x45040
+
+#define S_EN_SLICE_N_WR_0    8
+#define M_EN_SLICE_N_WR_0    0xffU
+#define V_EN_SLICE_N_WR_0(x) ((x) << S_EN_SLICE_N_WR_0)
+#define G_EN_SLICE_N_WR_0(x) (((x) >> S_EN_SLICE_N_WR_0) & M_EN_SLICE_N_WR_0)
+
+#define S_EN_SLICE_N_WR_FFE    4
+#define M_EN_SLICE_N_WR_FFE    0xfU
+#define V_EN_SLICE_N_WR_FFE(x) ((x) << S_EN_SLICE_N_WR_FFE)
+#define G_EN_SLICE_N_WR_FFE(x) (((x) >> S_EN_SLICE_N_WR_FFE) & M_EN_SLICE_N_WR_FFE)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN1 0x45044
+
+#define S_EN_SLICE_N_WR_1    8
+#define M_EN_SLICE_N_WR_1    0xffU
+#define V_EN_SLICE_N_WR_1(x) ((x) << S_EN_SLICE_N_WR_1)
+#define G_EN_SLICE_N_WR_1(x) (((x) >> S_EN_SLICE_N_WR_1) & M_EN_SLICE_N_WR_1)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN2 0x45048
+
+#define S_EN_SLICE_N_WR_2    8
+#define M_EN_SLICE_N_WR_2    0xffU
+#define V_EN_SLICE_N_WR_2(x) ((x) << S_EN_SLICE_N_WR_2)
+#define G_EN_SLICE_N_WR_2(x) (((x) >> S_EN_SLICE_N_WR_2) & M_EN_SLICE_N_WR_2)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN3 0x4504c
+
+#define S_EN_SLICE_N_WR_3    8
+#define M_EN_SLICE_N_WR_3    0xffU
+#define V_EN_SLICE_N_WR_3(x) ((x) << S_EN_SLICE_N_WR_3)
+#define G_EN_SLICE_N_WR_3(x) (((x) >> S_EN_SLICE_N_WR_3) & M_EN_SLICE_N_WR_3)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN0 0x45050
+
+#define S_EN_SLICE_P_WR    8
+#define M_EN_SLICE_P_WR    0xffU
+#define V_EN_SLICE_P_WR(x) ((x) << S_EN_SLICE_P_WR)
+#define G_EN_SLICE_P_WR(x) (((x) >> S_EN_SLICE_P_WR) & M_EN_SLICE_P_WR)
+
+#define S_EN_SLICE_P_WR_FFE    4
+#define M_EN_SLICE_P_WR_FFE    0xfU
+#define V_EN_SLICE_P_WR_FFE(x) ((x) << S_EN_SLICE_P_WR_FFE)
+#define G_EN_SLICE_P_WR_FFE(x) (((x) >> S_EN_SLICE_P_WR_FFE) & M_EN_SLICE_P_WR_FFE)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN1 0x45054
+#define A_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN2 0x45058
+#define A_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN3 0x4505c
+#define A_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE 0x45060
+
+#define S_POST_CURSOR0    12
+#define M_POST_CURSOR0    0xfU
+#define V_POST_CURSOR0(x) ((x) << S_POST_CURSOR0)
+#define G_POST_CURSOR0(x) (((x) >> S_POST_CURSOR0) & M_POST_CURSOR0)
+
+#define S_POST_CURSOR1    8
+#define M_POST_CURSOR1    0xfU
+#define V_POST_CURSOR1(x) ((x) << S_POST_CURSOR1)
+#define G_POST_CURSOR1(x) (((x) >> S_POST_CURSOR1) & M_POST_CURSOR1)
+
+#define S_POST_CURSOR2    4
+#define M_POST_CURSOR2    0xfU
+#define V_POST_CURSOR2(x) ((x) << S_POST_CURSOR2)
+#define G_POST_CURSOR2(x) (((x) >> S_POST_CURSOR2) & M_POST_CURSOR2)
+
+#define S_POST_CURSOR3    0
+#define M_POST_CURSOR3    0xfU
+#define V_POST_CURSOR3(x) ((x) << S_POST_CURSOR3)
+#define G_POST_CURSOR3(x) (((x) >> S_POST_CURSOR3) & M_POST_CURSOR3)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE 0x45068
+
+#define S_SLEW_CTL0    12
+#define M_SLEW_CTL0    0xfU
+#define V_SLEW_CTL0(x) ((x) << S_SLEW_CTL0)
+#define G_SLEW_CTL0(x) (((x) >> S_SLEW_CTL0) & M_SLEW_CTL0)
+
+#define S_SLEW_CTL1    8
+#define M_SLEW_CTL1    0xfU
+#define V_SLEW_CTL1(x) ((x) << S_SLEW_CTL1)
+#define G_SLEW_CTL1(x) (((x) >> S_SLEW_CTL1) & M_SLEW_CTL1)
+
+#define S_SLEW_CTL2    4
+#define M_SLEW_CTL2    0xfU
+#define V_SLEW_CTL2(x) ((x) << S_SLEW_CTL2)
+#define G_SLEW_CTL2(x) (((x) >> S_SLEW_CTL2) & M_SLEW_CTL2)
+
+#define S_SLEW_CTL3    0
+#define M_SLEW_CTL3    0xfU
+#define V_SLEW_CTL3(x) ((x) << S_SLEW_CTL3)
+#define G_SLEW_CTL3(x) (((x) >> S_SLEW_CTL3) & M_SLEW_CTL3)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP0 0x45080
+
+#define S_SLICE_SEL_REG_BITS0_1    14
+#define M_SLICE_SEL_REG_BITS0_1    0x3U
+#define V_SLICE_SEL_REG_BITS0_1(x) ((x) << S_SLICE_SEL_REG_BITS0_1)
+#define G_SLICE_SEL_REG_BITS0_1(x) (((x) >> S_SLICE_SEL_REG_BITS0_1) & M_SLICE_SEL_REG_BITS0_1)
+
+#define S_SLICE_SEL_REG_BITS2_3    12
+#define M_SLICE_SEL_REG_BITS2_3    0x3U
+#define V_SLICE_SEL_REG_BITS2_3(x) ((x) << S_SLICE_SEL_REG_BITS2_3)
+#define G_SLICE_SEL_REG_BITS2_3(x) (((x) >> S_SLICE_SEL_REG_BITS2_3) & M_SLICE_SEL_REG_BITS2_3)
+
+#define S_SLICE_SEL_REG_BITS4_5    10
+#define M_SLICE_SEL_REG_BITS4_5    0x3U
+#define V_SLICE_SEL_REG_BITS4_5(x) ((x) << S_SLICE_SEL_REG_BITS4_5)
+#define G_SLICE_SEL_REG_BITS4_5(x) (((x) >> S_SLICE_SEL_REG_BITS4_5) & M_SLICE_SEL_REG_BITS4_5)
+
+#define S_SLICE_SEL_REG_BITS6_7    8
+#define M_SLICE_SEL_REG_BITS6_7    0x3U
+#define V_SLICE_SEL_REG_BITS6_7(x) ((x) << S_SLICE_SEL_REG_BITS6_7)
+#define G_SLICE_SEL_REG_BITS6_7(x) (((x) >> S_SLICE_SEL_REG_BITS6_7) & M_SLICE_SEL_REG_BITS6_7)
+
+#define S_SLICE_SEL_REG_BITS8_9    6
+#define M_SLICE_SEL_REG_BITS8_9    0x3U
+#define V_SLICE_SEL_REG_BITS8_9(x) ((x) << S_SLICE_SEL_REG_BITS8_9)
+#define G_SLICE_SEL_REG_BITS8_9(x) (((x) >> S_SLICE_SEL_REG_BITS8_9) & M_SLICE_SEL_REG_BITS8_9)
+
+#define S_SLICE_SEL_REG_BITS10_11    4
+#define M_SLICE_SEL_REG_BITS10_11    0x3U
+#define V_SLICE_SEL_REG_BITS10_11(x) ((x) << S_SLICE_SEL_REG_BITS10_11)
+#define G_SLICE_SEL_REG_BITS10_11(x) (((x) >> S_SLICE_SEL_REG_BITS10_11) & M_SLICE_SEL_REG_BITS10_11)
+
+#define S_SLICE_SEL_REG_BITS12_13    2
+#define M_SLICE_SEL_REG_BITS12_13    0x3U
+#define V_SLICE_SEL_REG_BITS12_13(x) ((x) << S_SLICE_SEL_REG_BITS12_13)
+#define G_SLICE_SEL_REG_BITS12_13(x) (((x) >> S_SLICE_SEL_REG_BITS12_13) & M_SLICE_SEL_REG_BITS12_13)
+
+#define S_SLICE_SEL_REG_BITS14_15    0
+#define M_SLICE_SEL_REG_BITS14_15    0x3U
+#define V_SLICE_SEL_REG_BITS14_15(x) ((x) << S_SLICE_SEL_REG_BITS14_15)
+#define G_SLICE_SEL_REG_BITS14_15(x) (((x) >> S_SLICE_SEL_REG_BITS14_15) & M_SLICE_SEL_REG_BITS14_15)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP1 0x45084
+#define A_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP0 0x450a0
+
+#define S_POST_CUR_SEL_BITS0_1    14
+#define M_POST_CUR_SEL_BITS0_1    0x3U
+#define V_POST_CUR_SEL_BITS0_1(x) ((x) << S_POST_CUR_SEL_BITS0_1)
+#define G_POST_CUR_SEL_BITS0_1(x) (((x) >> S_POST_CUR_SEL_BITS0_1) & M_POST_CUR_SEL_BITS0_1)
+
+#define S_POST_CUR_SEL_BITS2_3    12
+#define M_POST_CUR_SEL_BITS2_3    0x3U
+#define V_POST_CUR_SEL_BITS2_3(x) ((x) << S_POST_CUR_SEL_BITS2_3)
+#define G_POST_CUR_SEL_BITS2_3(x) (((x) >> S_POST_CUR_SEL_BITS2_3) & M_POST_CUR_SEL_BITS2_3)
+
+#define S_POST_CUR_SEL_BITS4_5    10
+#define M_POST_CUR_SEL_BITS4_5    0x3U
+#define V_POST_CUR_SEL_BITS4_5(x) ((x) << S_POST_CUR_SEL_BITS4_5)
+#define G_POST_CUR_SEL_BITS4_5(x) (((x) >> S_POST_CUR_SEL_BITS4_5) & M_POST_CUR_SEL_BITS4_5)
+
+#define S_POST_CUR_SEL_BITS6_7    8
+#define M_POST_CUR_SEL_BITS6_7    0x3U
+#define V_POST_CUR_SEL_BITS6_7(x) ((x) << S_POST_CUR_SEL_BITS6_7)
+#define G_POST_CUR_SEL_BITS6_7(x) (((x) >> S_POST_CUR_SEL_BITS6_7) & M_POST_CUR_SEL_BITS6_7)
+
+#define S_POST_CUR_SEL_BITS8_9    6
+#define M_POST_CUR_SEL_BITS8_9    0x3U
+#define V_POST_CUR_SEL_BITS8_9(x) ((x) << S_POST_CUR_SEL_BITS8_9)
+#define G_POST_CUR_SEL_BITS8_9(x) (((x) >> S_POST_CUR_SEL_BITS8_9) & M_POST_CUR_SEL_BITS8_9)
+
+#define S_POST_CUR_SEL_BITS10_11    4
+#define M_POST_CUR_SEL_BITS10_11    0x3U
+#define V_POST_CUR_SEL_BITS10_11(x) ((x) << S_POST_CUR_SEL_BITS10_11)
+#define G_POST_CUR_SEL_BITS10_11(x) (((x) >> S_POST_CUR_SEL_BITS10_11) & M_POST_CUR_SEL_BITS10_11)
+
+#define S_POST_CUR_SEL_BITS12_13    2
+#define M_POST_CUR_SEL_BITS12_13    0x3U
+#define V_POST_CUR_SEL_BITS12_13(x) ((x) << S_POST_CUR_SEL_BITS12_13)
+#define G_POST_CUR_SEL_BITS12_13(x) (((x) >> S_POST_CUR_SEL_BITS12_13) & M_POST_CUR_SEL_BITS12_13)
+
+#define S_POST_CUR_SEL_BITS14_15    0
+#define M_POST_CUR_SEL_BITS14_15    0x3U
+#define V_POST_CUR_SEL_BITS14_15(x) ((x) << S_POST_CUR_SEL_BITS14_15)
+#define G_POST_CUR_SEL_BITS14_15(x) (((x) >> S_POST_CUR_SEL_BITS14_15) & M_POST_CUR_SEL_BITS14_15)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP1 0x450a4
+#define A_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP0 0x450a8
+
+#define S_SLEW_CTL_SEL_BITS0_1    14
+#define M_SLEW_CTL_SEL_BITS0_1    0x3U
+#define V_SLEW_CTL_SEL_BITS0_1(x) ((x) << S_SLEW_CTL_SEL_BITS0_1)
+#define G_SLEW_CTL_SEL_BITS0_1(x) (((x) >> S_SLEW_CTL_SEL_BITS0_1) & M_SLEW_CTL_SEL_BITS0_1)
+
+#define S_SLEW_CTL_SEL_BITS2_3    12
+#define M_SLEW_CTL_SEL_BITS2_3    0x3U
+#define V_SLEW_CTL_SEL_BITS2_3(x) ((x) << S_SLEW_CTL_SEL_BITS2_3)
+#define G_SLEW_CTL_SEL_BITS2_3(x) (((x) >> S_SLEW_CTL_SEL_BITS2_3) & M_SLEW_CTL_SEL_BITS2_3)
+
+#define S_SLEW_CTL_SEL_BITS4_5    10
+#define M_SLEW_CTL_SEL_BITS4_5    0x3U
+#define V_SLEW_CTL_SEL_BITS4_5(x) ((x) << S_SLEW_CTL_SEL_BITS4_5)
+#define G_SLEW_CTL_SEL_BITS4_5(x) (((x) >> S_SLEW_CTL_SEL_BITS4_5) & M_SLEW_CTL_SEL_BITS4_5)
+
+#define S_SLEW_CTL_SEL_BITS6_7    8
+#define M_SLEW_CTL_SEL_BITS6_7    0x3U
+#define V_SLEW_CTL_SEL_BITS6_7(x) ((x) << S_SLEW_CTL_SEL_BITS6_7)
+#define G_SLEW_CTL_SEL_BITS6_7(x) (((x) >> S_SLEW_CTL_SEL_BITS6_7) & M_SLEW_CTL_SEL_BITS6_7)
+
+#define S_SLEW_CTL_SEL_BITS8_9    6
+#define M_SLEW_CTL_SEL_BITS8_9    0x3U
+#define V_SLEW_CTL_SEL_BITS8_9(x) ((x) << S_SLEW_CTL_SEL_BITS8_9)
+#define G_SLEW_CTL_SEL_BITS8_9(x) (((x) >> S_SLEW_CTL_SEL_BITS8_9) & M_SLEW_CTL_SEL_BITS8_9)
+
+#define S_SLEW_CTL_SEL_BITS10_11    4
+#define M_SLEW_CTL_SEL_BITS10_11    0x3U
+#define V_SLEW_CTL_SEL_BITS10_11(x) ((x) << S_SLEW_CTL_SEL_BITS10_11)
+#define G_SLEW_CTL_SEL_BITS10_11(x) (((x) >> S_SLEW_CTL_SEL_BITS10_11) & M_SLEW_CTL_SEL_BITS10_11)
+
+#define S_SLEW_CTL_SEL_BITS12_13    2
+#define M_SLEW_CTL_SEL_BITS12_13    0x3U
+#define V_SLEW_CTL_SEL_BITS12_13(x) ((x) << S_SLEW_CTL_SEL_BITS12_13)
+#define G_SLEW_CTL_SEL_BITS12_13(x) (((x) >> S_SLEW_CTL_SEL_BITS12_13) & M_SLEW_CTL_SEL_BITS12_13)
+
+#define S_SLEW_CTL_SEL_BITS14_15    0
+#define M_SLEW_CTL_SEL_BITS14_15    0x3U
+#define V_SLEW_CTL_SEL_BITS14_15(x) ((x) << S_SLEW_CTL_SEL_BITS14_15)
+#define G_SLEW_CTL_SEL_BITS14_15(x) (((x) >> S_SLEW_CTL_SEL_BITS14_15) & M_SLEW_CTL_SEL_BITS14_15)
+
+#define A_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP1 0x450ac
+#define A_MC_ADR_DDRPHY_ADR_POWERDOWN_2 0x450b0
+
+#define S_ADR_LANE_0_11_PD    4
+#define M_ADR_LANE_0_11_PD    0xfffU
+#define V_ADR_LANE_0_11_PD(x) ((x) << S_ADR_LANE_0_11_PD)
+#define G_ADR_LANE_0_11_PD(x) (((x) >> S_ADR_LANE_0_11_PD) & M_ADR_LANE_0_11_PD)
+
+#define S_ADR_LANE_12_15_PD    0
+#define M_ADR_LANE_12_15_PD    0xfU
+#define V_ADR_LANE_12_15_PD(x) ((x) << S_ADR_LANE_12_15_PD)
+#define G_ADR_LANE_12_15_PD(x) (((x) >> S_ADR_LANE_12_15_PD) & M_ADR_LANE_12_15_PD)
+
+#define A_T6_MC_ADR_DDRPHY_ADR_BIT_ENABLE 0x45800
+#define A_T6_MC_ADR_DDRPHY_ADR_DIFFPAIR_ENABLE 0x45804
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY0 0x45810
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY1 0x45814
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY2 0x45818
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY3 0x4581c
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY4 0x45820
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY5 0x45824
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY6 0x45828
+#define A_T6_MC_ADR_DDRPHY_ADR_DELAY7 0x4582c
+#define A_T6_MC_ADR_DDRPHY_ADR_DFT_WRAP_STATUS_CONTROL 0x45830
+
+#define S_ADR_TEST_MODE    5
+#define M_ADR_TEST_MODE    0x3U
+#define V_ADR_TEST_MODE(x) ((x) << S_ADR_TEST_MODE)
+#define G_ADR_TEST_MODE(x) (((x) >> S_ADR_TEST_MODE) & M_ADR_TEST_MODE)
+
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN0 0x45840
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN1 0x45844
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN2 0x45848
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN3 0x4584c
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN0 0x45850
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN1 0x45854
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN2 0x45858
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN3 0x4585c
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE 0x45860
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE 0x45868
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP0 0x45880
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP1 0x45884
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP0 0x458a0
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP1 0x458a4
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP0 0x458a8
+#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP1 0x458ac
+#define A_T6_MC_ADR_DDRPHY_ADR_POWERDOWN_2 0x458b0
+#define A_MC_DDRPHY_ADR_PLL_VREG_CONFIG_0 0x460c0
+
+#define S_PLL_TUNE_0_2    13
+#define M_PLL_TUNE_0_2    0x7U
+#define V_PLL_TUNE_0_2(x) ((x) << S_PLL_TUNE_0_2)
+#define G_PLL_TUNE_0_2(x) (((x) >> S_PLL_TUNE_0_2) & M_PLL_TUNE_0_2)
+
+#define S_PLL_TUNECP_0_2    10
+#define M_PLL_TUNECP_0_2    0x7U
+#define V_PLL_TUNECP_0_2(x) ((x) << S_PLL_TUNECP_0_2)
+#define G_PLL_TUNECP_0_2(x) (((x) >> S_PLL_TUNECP_0_2) & M_PLL_TUNECP_0_2)
+
+#define S_PLL_TUNEF_0_5    4
+#define M_PLL_TUNEF_0_5    0x3fU
+#define V_PLL_TUNEF_0_5(x) ((x) << S_PLL_TUNEF_0_5)
+#define G_PLL_TUNEF_0_5(x) (((x) >> S_PLL_TUNEF_0_5) & M_PLL_TUNEF_0_5)
+
+#define S_PLL_TUNEVCO_0_1    2
+#define M_PLL_TUNEVCO_0_1    0x3U
+#define V_PLL_TUNEVCO_0_1(x) ((x) << S_PLL_TUNEVCO_0_1)
+#define G_PLL_TUNEVCO_0_1(x) (((x) >> S_PLL_TUNEVCO_0_1) & M_PLL_TUNEVCO_0_1)
+
+#define S_PLL_PLLXTR_0_1    0
+#define M_PLL_PLLXTR_0_1    0x3U
+#define V_PLL_PLLXTR_0_1(x) ((x) << S_PLL_PLLXTR_0_1)
+#define G_PLL_PLLXTR_0_1(x) (((x) >> S_PLL_PLLXTR_0_1) & M_PLL_PLLXTR_0_1)
+
+#define A_MC_DDRPHY_AD32S_PLL_VREG_CONFIG_0 0x460c0
+#define A_MC_DDRPHY_ADR_PLL_VREG_CONFIG_1 0x460c4
+
+#define S_PLL_TUNETDIV_0_2    13
+#define M_PLL_TUNETDIV_0_2    0x7U
+#define V_PLL_TUNETDIV_0_2(x) ((x) << S_PLL_TUNETDIV_0_2)
+#define G_PLL_TUNETDIV_0_2(x) (((x) >> S_PLL_TUNETDIV_0_2) & M_PLL_TUNETDIV_0_2)
+
+#define S_PLL_TUNEMDIV_0_1    11
+#define M_PLL_TUNEMDIV_0_1    0x3U
+#define V_PLL_TUNEMDIV_0_1(x) ((x) << S_PLL_TUNEMDIV_0_1)
+#define G_PLL_TUNEMDIV_0_1(x) (((x) >> S_PLL_TUNEMDIV_0_1) & M_PLL_TUNEMDIV_0_1)
+
+#define S_PLL_TUNEATST    10
+#define V_PLL_TUNEATST(x) ((x) << S_PLL_TUNEATST)
+#define F_PLL_TUNEATST    V_PLL_TUNEATST(1U)
+
+#define S_VREG_RANGE_0_1    8
+#define M_VREG_RANGE_0_1    0x3U
+#define V_VREG_RANGE_0_1(x) ((x) << S_VREG_RANGE_0_1)
+#define G_VREG_RANGE_0_1(x) (((x) >> S_VREG_RANGE_0_1) & M_VREG_RANGE_0_1)
+
+#define S_VREG_VREGSPARE    7
+#define V_VREG_VREGSPARE(x) ((x) << S_VREG_VREGSPARE)
+#define F_VREG_VREGSPARE    V_VREG_VREGSPARE(1U)
+
+#define S_VREG_VCCTUNE_0_1    5
+#define M_VREG_VCCTUNE_0_1    0x3U
+#define V_VREG_VCCTUNE_0_1(x) ((x) << S_VREG_VCCTUNE_0_1)
+#define G_VREG_VCCTUNE_0_1(x) (((x) >> S_VREG_VCCTUNE_0_1) & M_VREG_VCCTUNE_0_1)
+
+#define S_INTERP_SIG_SLEW_0_3    1
+#define M_INTERP_SIG_SLEW_0_3    0xfU
+#define V_INTERP_SIG_SLEW_0_3(x) ((x) << S_INTERP_SIG_SLEW_0_3)
+#define G_INTERP_SIG_SLEW_0_3(x) (((x) >> S_INTERP_SIG_SLEW_0_3) & M_INTERP_SIG_SLEW_0_3)
+
+#define S_ANALOG_WRAPON    0
+#define V_ANALOG_WRAPON(x) ((x) << S_ANALOG_WRAPON)
+#define F_ANALOG_WRAPON    V_ANALOG_WRAPON(1U)
+
+#define A_MC_DDRPHY_AD32S_PLL_VREG_CONFIG_1 0x460c4
+#define A_MC_DDRPHY_ADR_SYSCLK_CNTL_PR 0x460c8
+
+#define S_SYSCLK_ENABLE    15
+#define V_SYSCLK_ENABLE(x) ((x) << S_SYSCLK_ENABLE)
+#define F_SYSCLK_ENABLE    V_SYSCLK_ENABLE(1U)
+
+#define S_SYSCLK_ROT_OVERRIDE    8
+#define M_SYSCLK_ROT_OVERRIDE    0x7fU
+#define V_SYSCLK_ROT_OVERRIDE(x) ((x) << S_SYSCLK_ROT_OVERRIDE)
+#define G_SYSCLK_ROT_OVERRIDE(x) (((x) >> S_SYSCLK_ROT_OVERRIDE) & M_SYSCLK_ROT_OVERRIDE)
+
+#define S_SYSCLK_ROT_OVERRIDE_EN    7
+#define V_SYSCLK_ROT_OVERRIDE_EN(x) ((x) << S_SYSCLK_ROT_OVERRIDE_EN)
+#define F_SYSCLK_ROT_OVERRIDE_EN    V_SYSCLK_ROT_OVERRIDE_EN(1U)
+
+#define S_SYSCLK_PHASE_ALIGN_RESE    6
+#define V_SYSCLK_PHASE_ALIGN_RESE(x) ((x) << S_SYSCLK_PHASE_ALIGN_RESE)
+#define F_SYSCLK_PHASE_ALIGN_RESE    V_SYSCLK_PHASE_ALIGN_RESE(1U)
+
+#define S_SYSCLK_PHASE_CNTL_EN    5
+#define V_SYSCLK_PHASE_CNTL_EN(x) ((x) << S_SYSCLK_PHASE_CNTL_EN)
+#define F_SYSCLK_PHASE_CNTL_EN    V_SYSCLK_PHASE_CNTL_EN(1U)
+
+#define S_SYSCLK_PHASE_DEFAULT_EN    4
+#define V_SYSCLK_PHASE_DEFAULT_EN(x) ((x) << S_SYSCLK_PHASE_DEFAULT_EN)
+#define F_SYSCLK_PHASE_DEFAULT_EN    V_SYSCLK_PHASE_DEFAULT_EN(1U)
+
+#define S_SYSCLK_POS_EDGE_ALIGN    3
+#define V_SYSCLK_POS_EDGE_ALIGN(x) ((x) << S_SYSCLK_POS_EDGE_ALIGN)
+#define F_SYSCLK_POS_EDGE_ALIGN    V_SYSCLK_POS_EDGE_ALIGN(1U)
+
+#define S_CONTINUOUS_UPDATE    2
+#define V_CONTINUOUS_UPDATE(x) ((x) << S_CONTINUOUS_UPDATE)
+#define F_CONTINUOUS_UPDATE    V_CONTINUOUS_UPDATE(1U)
+
+#define S_CE0DLTVCC    0
+#define M_CE0DLTVCC    0x3U
+#define V_CE0DLTVCC(x) ((x) << S_CE0DLTVCC)
+#define G_CE0DLTVCC(x) (((x) >> S_CE0DLTVCC) & M_CE0DLTVCC)
+
+#define A_MC_DDRPHY_AD32S_SYSCLK_CNTL_PR 0x460c8
+#define A_MC_DDRPHY_ADR_MCCLK_WRCLK_PR_STATIC_OFFSET 0x460cc
+
+#define S_TSYS_WRCLK    8
+#define M_TSYS_WRCLK    0x7fU
+#define V_TSYS_WRCLK(x) ((x) << S_TSYS_WRCLK)
+#define G_TSYS_WRCLK(x) (((x) >> S_TSYS_WRCLK) & M_TSYS_WRCLK)
+
+#define A_MC_DDRPHY_AD32S_MCCLK_WRCLK_PR_STATIC_OFFSET 0x460cc
+#define A_MC_DDRPHY_ADR_SYSCLK_PR_VALUE_RO 0x460d0
+
+#define S_SLEW_LATE_SAMPLE    15
+#define V_SLEW_LATE_SAMPLE(x) ((x) << S_SLEW_LATE_SAMPLE)
+#define F_SLEW_LATE_SAMPLE    V_SLEW_LATE_SAMPLE(1U)
+
+#define S_SYSCLK_ROT    8
+#define M_SYSCLK_ROT    0x7fU
+#define V_SYSCLK_ROT(x) ((x) << S_SYSCLK_ROT)
+#define G_SYSCLK_ROT(x) (((x) >> S_SYSCLK_ROT) & M_SYSCLK_ROT)
+
+#define S_BB_LOCK    7
+#define V_BB_LOCK(x) ((x) << S_BB_LOCK)
+#define F_BB_LOCK    V_BB_LOCK(1U)
+
+#define S_SLEW_EARLY_SAMPLE    6
+#define V_SLEW_EARLY_SAMPLE(x) ((x) << S_SLEW_EARLY_SAMPLE)
+#define F_SLEW_EARLY_SAMPLE    V_SLEW_EARLY_SAMPLE(1U)
+
+#define S_SLEW_DONE_STATUS    4
+#define M_SLEW_DONE_STATUS    0x3U
+#define V_SLEW_DONE_STATUS(x) ((x) << S_SLEW_DONE_STATUS)
+#define G_SLEW_DONE_STATUS(x) (((x) >> S_SLEW_DONE_STATUS) & M_SLEW_DONE_STATUS)
+
+#define S_SLEW_CNTL    0
+#define M_SLEW_CNTL    0xfU
+#define V_SLEW_CNTL(x) ((x) << S_SLEW_CNTL)
+#define G_SLEW_CNTL(x) (((x) >> S_SLEW_CNTL) & M_SLEW_CNTL)
+
+#define A_MC_DDRPHY_AD32S_SYSCLK_PR_VALUE_RO 0x460d0
+#define A_MC_DDRPHY_ADR_GMTEST_ATEST_CNTL 0x460d4
+
+#define S_FLUSH    15
+#define V_FLUSH(x) ((x) << S_FLUSH)
+#define F_FLUSH    V_FLUSH(1U)
+
+#define S_GIANT_MUX_TEST_EN    14
+#define V_GIANT_MUX_TEST_EN(x) ((x) << S_GIANT_MUX_TEST_EN)
+#define F_GIANT_MUX_TEST_EN    V_GIANT_MUX_TEST_EN(1U)
+
+#define S_GIANT_MUX_TEST_VAL    13
+#define V_GIANT_MUX_TEST_VAL(x) ((x) << S_GIANT_MUX_TEST_VAL)
+#define F_GIANT_MUX_TEST_VAL    V_GIANT_MUX_TEST_VAL(1U)
+
+#define S_HS_PROBE_A_SEL_    8
+#define M_HS_PROBE_A_SEL_    0xfU
+#define V_HS_PROBE_A_SEL_(x) ((x) << S_HS_PROBE_A_SEL_)
+#define G_HS_PROBE_A_SEL_(x) (((x) >> S_HS_PROBE_A_SEL_) & M_HS_PROBE_A_SEL_)
+
+#define S_HS_PROBE_B_SEL_    4
+#define M_HS_PROBE_B_SEL_    0xfU
+#define V_HS_PROBE_B_SEL_(x) ((x) << S_HS_PROBE_B_SEL_)
+#define G_HS_PROBE_B_SEL_(x) (((x) >> S_HS_PROBE_B_SEL_) & M_HS_PROBE_B_SEL_)
+
+#define S_ATEST1CTL0    3
+#define V_ATEST1CTL0(x) ((x) << S_ATEST1CTL0)
+#define F_ATEST1CTL0    V_ATEST1CTL0(1U)
+
+#define S_ATEST1CTL1    2
+#define V_ATEST1CTL1(x) ((x) << S_ATEST1CTL1)
+#define F_ATEST1CTL1    V_ATEST1CTL1(1U)
+
+#define S_ATEST1CTL2    1
+#define V_ATEST1CTL2(x) ((x) << S_ATEST1CTL2)
+#define F_ATEST1CTL2    V_ATEST1CTL2(1U)
+
+#define S_ATEST1CTL3    0
+#define V_ATEST1CTL3(x) ((x) << S_ATEST1CTL3)
+#define F_ATEST1CTL3    V_ATEST1CTL3(1U)
+
+#define A_MC_DDRPHY_AD32S_OUTPUT_FORCE_ATEST_CNTL 0x460d4
+
+#define S_FORCE_EN    14
+#define V_FORCE_EN(x) ((x) << S_FORCE_EN)
+#define F_FORCE_EN    V_FORCE_EN(1U)
+
+#define S_AD32S_HS_PROBE_A_SEL    8
+#define M_AD32S_HS_PROBE_A_SEL    0xfU
+#define V_AD32S_HS_PROBE_A_SEL(x) ((x) << S_AD32S_HS_PROBE_A_SEL)
+#define G_AD32S_HS_PROBE_A_SEL(x) (((x) >> S_AD32S_HS_PROBE_A_SEL) & M_AD32S_HS_PROBE_A_SEL)
+
+#define S_AD32S_HS_PROBE_B_SEL    4
+#define M_AD32S_HS_PROBE_B_SEL    0xfU
+#define V_AD32S_HS_PROBE_B_SEL(x) ((x) << S_AD32S_HS_PROBE_B_SEL)
+#define G_AD32S_HS_PROBE_B_SEL(x) (((x) >> S_AD32S_HS_PROBE_B_SEL) & M_AD32S_HS_PROBE_B_SEL)
+
+#define A_MC_DDRPHY_ADR_GIANT_MUX_RESULTS_A0 0x460d8
+
+#define S_GIANT_MUX_TEST_RESULTS    0
+#define M_GIANT_MUX_TEST_RESULTS    0xffffU
+#define V_GIANT_MUX_TEST_RESULTS(x) ((x) << S_GIANT_MUX_TEST_RESULTS)
+#define G_GIANT_MUX_TEST_RESULTS(x) (((x) >> S_GIANT_MUX_TEST_RESULTS) & M_GIANT_MUX_TEST_RESULTS)
+
+#define A_MC_DDRPHY_AD32S_OUTPUT_DRIVER_FORCE_VALUE0 0x460d8
+
+#define S_OUTPUT_DRIVER_FORCE_VALUE    0
+#define M_OUTPUT_DRIVER_FORCE_VALUE    0xffffU
+#define V_OUTPUT_DRIVER_FORCE_VALUE(x) ((x) << S_OUTPUT_DRIVER_FORCE_VALUE)
+#define G_OUTPUT_DRIVER_FORCE_VALUE(x) (((x) >> S_OUTPUT_DRIVER_FORCE_VALUE) & M_OUTPUT_DRIVER_FORCE_VALUE)
+
+#define A_MC_DDRPHY_ADR_GIANT_MUX_RESULTS_A1 0x460dc
+#define A_MC_DDRPHY_AD32S_OUTPUT_DRIVER_FORCE_VALUE1 0x460dc
+#define A_MC_DDRPHY_ADR_POWERDOWN_1 0x460e0
+
+#define S_MASTER_PD_CNTL    15
+#define V_MASTER_PD_CNTL(x) ((x) << S_MASTER_PD_CNTL)
+#define F_MASTER_PD_CNTL    V_MASTER_PD_CNTL(1U)
+
+#define S_ANALOG_INPUT_STAB2    14
+#define V_ANALOG_INPUT_STAB2(x) ((x) << S_ANALOG_INPUT_STAB2)
+#define F_ANALOG_INPUT_STAB2    V_ANALOG_INPUT_STAB2(1U)
+
+#define S_ANALOG_INPUT_STAB1    8
+#define V_ANALOG_INPUT_STAB1(x) ((x) << S_ANALOG_INPUT_STAB1)
+#define F_ANALOG_INPUT_STAB1    V_ANALOG_INPUT_STAB1(1U)
+
+#define S_SYSCLK_CLK_GATE    6
+#define M_SYSCLK_CLK_GATE    0x3U
+#define V_SYSCLK_CLK_GATE(x) ((x) << S_SYSCLK_CLK_GATE)
+#define G_SYSCLK_CLK_GATE(x) (((x) >> S_SYSCLK_CLK_GATE) & M_SYSCLK_CLK_GATE)
+
+#define S_WR_FIFO_STAB    5
+#define V_WR_FIFO_STAB(x) ((x) << S_WR_FIFO_STAB)
+#define F_WR_FIFO_STAB    V_WR_FIFO_STAB(1U)
+
+#define S_ADR_RX_PD    4
+#define V_ADR_RX_PD(x) ((x) << S_ADR_RX_PD)
+#define F_ADR_RX_PD    V_ADR_RX_PD(1U)
+
+#define S_TX_TRISTATE_CNTL    1
+#define V_TX_TRISTATE_CNTL(x) ((x) << S_TX_TRISTATE_CNTL)
+#define F_TX_TRISTATE_CNTL    V_TX_TRISTATE_CNTL(1U)
+
+#define S_DVCC_REG_PD    0
+#define V_DVCC_REG_PD(x) ((x) << S_DVCC_REG_PD)
+#define F_DVCC_REG_PD    V_DVCC_REG_PD(1U)
+
+#define A_MC_DDRPHY_AD32S_POWERDOWN_1 0x460e0
+#define A_MC_DDRPHY_ADR_SLEW_CAL_CNTL 0x460e4
+
+#define S_SLEW_CAL_ENABLE    15
+#define V_SLEW_CAL_ENABLE(x) ((x) << S_SLEW_CAL_ENABLE)
+#define F_SLEW_CAL_ENABLE    V_SLEW_CAL_ENABLE(1U)
+
+#define S_SLEW_CAL_START    14
+#define V_SLEW_CAL_START(x) ((x) << S_SLEW_CAL_START)
+#define F_SLEW_CAL_START    V_SLEW_CAL_START(1U)
+
+#define S_SLEW_CAL_OVERRIDE_EN    12
+#define V_SLEW_CAL_OVERRIDE_EN(x) ((x) << S_SLEW_CAL_OVERRIDE_EN)
+#define F_SLEW_CAL_OVERRIDE_EN    V_SLEW_CAL_OVERRIDE_EN(1U)
+
+#define S_SLEW_CAL_OVERRIDE    8
+#define M_SLEW_CAL_OVERRIDE    0xfU
+#define V_SLEW_CAL_OVERRIDE(x) ((x) << S_SLEW_CAL_OVERRIDE)
+#define G_SLEW_CAL_OVERRIDE(x) (((x) >> S_SLEW_CAL_OVERRIDE) & M_SLEW_CAL_OVERRIDE)
+
+#define S_SLEW_TARGET_PR_OFFSET    0
+#define M_SLEW_TARGET_PR_OFFSET    0x1fU
+#define V_SLEW_TARGET_PR_OFFSET(x) ((x) << S_SLEW_TARGET_PR_OFFSET)
+#define G_SLEW_TARGET_PR_OFFSET(x) (((x) >> S_SLEW_TARGET_PR_OFFSET) & M_SLEW_TARGET_PR_OFFSET)
+
+#define A_MC_DDRPHY_AD32S_SLEW_CAL_CNTL 0x460e4
+#define A_MC_DDRPHY_PC_DP18_PLL_LOCK_STATUS 0x47000
+
+#define S_DP18_PLL_LOCK    1
+#define M_DP18_PLL_LOCK    0x7fffU
+#define V_DP18_PLL_LOCK(x) ((x) << S_DP18_PLL_LOCK)
+#define G_DP18_PLL_LOCK(x) (((x) >> S_DP18_PLL_LOCK) & M_DP18_PLL_LOCK)
+
+#define A_MC_DDRPHY_PC_AD32S_PLL_LOCK_STATUS 0x47004
+
+#define S_AD32S_PLL_LOCK    14
+#define M_AD32S_PLL_LOCK    0x3U
+#define V_AD32S_PLL_LOCK(x) ((x) << S_AD32S_PLL_LOCK)
+#define G_AD32S_PLL_LOCK(x) (((x) >> S_AD32S_PLL_LOCK) & M_AD32S_PLL_LOCK)
+
+#define A_MC_DDRPHY_PC_RANK_PAIR0 0x47008
+
+#define S_RANK_PAIR0_PRI    13
+#define M_RANK_PAIR0_PRI    0x7U
+#define V_RANK_PAIR0_PRI(x) ((x) << S_RANK_PAIR0_PRI)
+#define G_RANK_PAIR0_PRI(x) (((x) >> S_RANK_PAIR0_PRI) & M_RANK_PAIR0_PRI)
+
+#define S_RANK_PAIR0_PRI_V    12
+#define V_RANK_PAIR0_PRI_V(x) ((x) << S_RANK_PAIR0_PRI_V)
+#define F_RANK_PAIR0_PRI_V    V_RANK_PAIR0_PRI_V(1U)
+
+#define S_RANK_PAIR0_SEC    9
+#define M_RANK_PAIR0_SEC    0x7U
+#define V_RANK_PAIR0_SEC(x) ((x) << S_RANK_PAIR0_SEC)
+#define G_RANK_PAIR0_SEC(x) (((x) >> S_RANK_PAIR0_SEC) & M_RANK_PAIR0_SEC)
+
+#define S_RANK_PAIR0_SEC_V    8
+#define V_RANK_PAIR0_SEC_V(x) ((x) << S_RANK_PAIR0_SEC_V)
+#define F_RANK_PAIR0_SEC_V    V_RANK_PAIR0_SEC_V(1U)
+
+#define S_RANK_PAIR1_PRI    5
+#define M_RANK_PAIR1_PRI    0x7U
+#define V_RANK_PAIR1_PRI(x) ((x) << S_RANK_PAIR1_PRI)
+#define G_RANK_PAIR1_PRI(x) (((x) >> S_RANK_PAIR1_PRI) & M_RANK_PAIR1_PRI)
+
+#define S_RANK_PAIR1_PRI_V    4
+#define V_RANK_PAIR1_PRI_V(x) ((x) << S_RANK_PAIR1_PRI_V)
+#define F_RANK_PAIR1_PRI_V    V_RANK_PAIR1_PRI_V(1U)
+
+#define S_RANK_PAIR1_SEC    1
+#define M_RANK_PAIR1_SEC    0x7U
+#define V_RANK_PAIR1_SEC(x) ((x) << S_RANK_PAIR1_SEC)
+#define G_RANK_PAIR1_SEC(x) (((x) >> S_RANK_PAIR1_SEC) & M_RANK_PAIR1_SEC)
+
+#define S_RANK_PAIR1_SEC_V    0
+#define V_RANK_PAIR1_SEC_V(x) ((x) << S_RANK_PAIR1_SEC_V)
+#define F_RANK_PAIR1_SEC_V    V_RANK_PAIR1_SEC_V(1U)
+
+#define A_MC_DDRPHY_PC_RANK_PAIR1 0x4700c
+
+#define S_RANK_PAIR2_PRI    13
+#define M_RANK_PAIR2_PRI    0x7U
+#define V_RANK_PAIR2_PRI(x) ((x) << S_RANK_PAIR2_PRI)
+#define G_RANK_PAIR2_PRI(x) (((x) >> S_RANK_PAIR2_PRI) & M_RANK_PAIR2_PRI)
+
+#define S_RANK_PAIR2_PRI_V    12
+#define V_RANK_PAIR2_PRI_V(x) ((x) << S_RANK_PAIR2_PRI_V)
+#define F_RANK_PAIR2_PRI_V    V_RANK_PAIR2_PRI_V(1U)
+
+#define S_RANK_PAIR2_SEC    9
+#define M_RANK_PAIR2_SEC    0x7U
+#define V_RANK_PAIR2_SEC(x) ((x) << S_RANK_PAIR2_SEC)
+#define G_RANK_PAIR2_SEC(x) (((x) >> S_RANK_PAIR2_SEC) & M_RANK_PAIR2_SEC)
+
+#define S_RANK_PAIR2_SEC_V    8
+#define V_RANK_PAIR2_SEC_V(x) ((x) << S_RANK_PAIR2_SEC_V)
+#define F_RANK_PAIR2_SEC_V    V_RANK_PAIR2_SEC_V(1U)
+
+#define S_RANK_PAIR3_PRI    5
+#define M_RANK_PAIR3_PRI    0x7U
+#define V_RANK_PAIR3_PRI(x) ((x) << S_RANK_PAIR3_PRI)
+#define G_RANK_PAIR3_PRI(x) (((x) >> S_RANK_PAIR3_PRI) & M_RANK_PAIR3_PRI)
+
+#define S_RANK_PAIR3_PRI_V    4
+#define V_RANK_PAIR3_PRI_V(x) ((x) << S_RANK_PAIR3_PRI_V)
+#define F_RANK_PAIR3_PRI_V    V_RANK_PAIR3_PRI_V(1U)
+
+#define S_RANK_PAIR3_SEC    1
+#define M_RANK_PAIR3_SEC    0x7U
+#define V_RANK_PAIR3_SEC(x) ((x) << S_RANK_PAIR3_SEC)
+#define G_RANK_PAIR3_SEC(x) (((x) >> S_RANK_PAIR3_SEC) & M_RANK_PAIR3_SEC)
+
+#define S_RANK_PAIR3_SEC_V    0
+#define V_RANK_PAIR3_SEC_V(x) ((x) << S_RANK_PAIR3_SEC_V)
+#define F_RANK_PAIR3_SEC_V    V_RANK_PAIR3_SEC_V(1U)
+
+#define A_MC_DDRPHY_PC_BASE_CNTR0 0x47010
+
+#define S_PERIODIC_BASE_CNTR0    0
+#define M_PERIODIC_BASE_CNTR0    0xffffU
+#define V_PERIODIC_BASE_CNTR0(x) ((x) << S_PERIODIC_BASE_CNTR0)
+#define G_PERIODIC_BASE_CNTR0(x) (((x) >> S_PERIODIC_BASE_CNTR0) & M_PERIODIC_BASE_CNTR0)
+
+#define A_MC_DDRPHY_PC_RELOAD_VALUE0 0x47014
+
+#define S_PERIODIC_CAL_REQ_EN    15
+#define V_PERIODIC_CAL_REQ_EN(x) ((x) << S_PERIODIC_CAL_REQ_EN)
+#define F_PERIODIC_CAL_REQ_EN    V_PERIODIC_CAL_REQ_EN(1U)
+
+#define S_PERIODIC_RELOAD_VALUE0    0
+#define M_PERIODIC_RELOAD_VALUE0    0x7fffU
+#define V_PERIODIC_RELOAD_VALUE0(x) ((x) << S_PERIODIC_RELOAD_VALUE0)
+#define G_PERIODIC_RELOAD_VALUE0(x) (((x) >> S_PERIODIC_RELOAD_VALUE0) & M_PERIODIC_RELOAD_VALUE0)
+
+#define A_MC_DDRPHY_PC_BASE_CNTR1 0x47018
+
+#define S_PERIODIC_BASE_CNTR1    0
+#define M_PERIODIC_BASE_CNTR1    0xffffU
+#define V_PERIODIC_BASE_CNTR1(x) ((x) << S_PERIODIC_BASE_CNTR1)
+#define G_PERIODIC_BASE_CNTR1(x) (((x) >> S_PERIODIC_BASE_CNTR1) & M_PERIODIC_BASE_CNTR1)
+
+#define A_MC_DDRPHY_PC_CAL_TIMER 0x4701c
+
+#define S_PERIODIC_CAL_TIMER    0
+#define M_PERIODIC_CAL_TIMER    0xffffU
+#define V_PERIODIC_CAL_TIMER(x) ((x) << S_PERIODIC_CAL_TIMER)
+#define G_PERIODIC_CAL_TIMER(x) (((x) >> S_PERIODIC_CAL_TIMER) & M_PERIODIC_CAL_TIMER)
+
+#define A_MC_DDRPHY_PC_CAL_TIMER_RELOAD_VALUE 0x47020
+
+#define S_PERIODIC_TIMER_RELOAD_VALUE    0
+#define M_PERIODIC_TIMER_RELOAD_VALUE    0xffffU
+#define V_PERIODIC_TIMER_RELOAD_VALUE(x) ((x) << S_PERIODIC_TIMER_RELOAD_VALUE)
+#define G_PERIODIC_TIMER_RELOAD_VALUE(x) (((x) >> S_PERIODIC_TIMER_RELOAD_VALUE) & M_PERIODIC_TIMER_RELOAD_VALUE)
+
+#define A_MC_DDRPHY_PC_ZCAL_TIMER 0x47024
+
+#define S_PERIODIC_ZCAL_TIMER    0
+#define M_PERIODIC_ZCAL_TIMER    0xffffU
+#define V_PERIODIC_ZCAL_TIMER(x) ((x) << S_PERIODIC_ZCAL_TIMER)
+#define G_PERIODIC_ZCAL_TIMER(x) (((x) >> S_PERIODIC_ZCAL_TIMER) & M_PERIODIC_ZCAL_TIMER)
+
+#define A_MC_DDRPHY_PC_ZCAL_TIMER_RELOAD_VALUE 0x47028
+#define A_MC_DDRPHY_PC_PER_CAL_CONFIG 0x4702c
+
+#define S_PER_ENA_RANK_PAIR    12
+#define M_PER_ENA_RANK_PAIR    0xfU
+#define V_PER_ENA_RANK_PAIR(x) ((x) << S_PER_ENA_RANK_PAIR)
+#define G_PER_ENA_RANK_PAIR(x) (((x) >> S_PER_ENA_RANK_PAIR) & M_PER_ENA_RANK_PAIR)
+
+#define S_PER_ENA_ZCAL    11
+#define V_PER_ENA_ZCAL(x) ((x) << S_PER_ENA_ZCAL)
+#define F_PER_ENA_ZCAL    V_PER_ENA_ZCAL(1U)
+
+#define S_PER_ENA_SYSCLK_ALIGN    10
+#define V_PER_ENA_SYSCLK_ALIGN(x) ((x) << S_PER_ENA_SYSCLK_ALIGN)
+#define F_PER_ENA_SYSCLK_ALIGN    V_PER_ENA_SYSCLK_ALIGN(1U)
+
+#define S_ENA_PER_READ_CTR    9
+#define V_ENA_PER_READ_CTR(x) ((x) << S_ENA_PER_READ_CTR)
+#define F_ENA_PER_READ_CTR    V_ENA_PER_READ_CTR(1U)
+
+#define S_ENA_PER_RDCLK_ALIGN    8
+#define V_ENA_PER_RDCLK_ALIGN(x) ((x) << S_ENA_PER_RDCLK_ALIGN)
+#define F_ENA_PER_RDCLK_ALIGN    V_ENA_PER_RDCLK_ALIGN(1U)
+
+#define S_ENA_PER_DQS_ALIGN    7
+#define V_ENA_PER_DQS_ALIGN(x) ((x) << S_ENA_PER_DQS_ALIGN)
+#define F_ENA_PER_DQS_ALIGN    V_ENA_PER_DQS_ALIGN(1U)
+
+#define S_PER_NEXT_RANK_PAIR    5
+#define M_PER_NEXT_RANK_PAIR    0x3U
+#define V_PER_NEXT_RANK_PAIR(x) ((x) << S_PER_NEXT_RANK_PAIR)
+#define G_PER_NEXT_RANK_PAIR(x) (((x) >> S_PER_NEXT_RANK_PAIR) & M_PER_NEXT_RANK_PAIR)
+
+#define S_FAST_SIM_PER_CNTR    4
+#define V_FAST_SIM_PER_CNTR(x) ((x) << S_FAST_SIM_PER_CNTR)
+#define F_FAST_SIM_PER_CNTR    V_FAST_SIM_PER_CNTR(1U)
+
+#define S_START_INIT_CAL    3
+#define V_START_INIT_CAL(x) ((x) << S_START_INIT_CAL)
+#define F_START_INIT_CAL    V_START_INIT_CAL(1U)
+
+#define S_START_PER_CAL    2
+#define V_START_PER_CAL(x) ((x) << S_START_PER_CAL)
+#define F_START_PER_CAL    V_START_PER_CAL(1U)
+
+#define S_ABORT_ON_ERR_EN    1
+#define V_ABORT_ON_ERR_EN(x) ((x) << S_ABORT_ON_ERR_EN)
+#define F_ABORT_ON_ERR_EN    V_ABORT_ON_ERR_EN(1U)
+
+#define S_ENA_PER_RD_CTR    9
+#define V_ENA_PER_RD_CTR(x) ((x) << S_ENA_PER_RD_CTR)
+#define F_ENA_PER_RD_CTR    V_ENA_PER_RD_CTR(1U)
+
+#define A_MC_DDRPHY_PC_CONFIG0 0x47030
+
+#define S_PROTOCOL_DDR    12
+#define M_PROTOCOL_DDR    0xfU
+#define V_PROTOCOL_DDR(x) ((x) << S_PROTOCOL_DDR)
+#define G_PROTOCOL_DDR(x) (((x) >> S_PROTOCOL_DDR) & M_PROTOCOL_DDR)
+
+#define S_DATA_MUX4_1MODE    11
+#define V_DATA_MUX4_1MODE(x) ((x) << S_DATA_MUX4_1MODE)
+#define F_DATA_MUX4_1MODE    V_DATA_MUX4_1MODE(1U)
+
+#define S_DDR4_CMD_SIG_REDUCTION    9
+#define V_DDR4_CMD_SIG_REDUCTION(x) ((x) << S_DDR4_CMD_SIG_REDUCTION)
+#define F_DDR4_CMD_SIG_REDUCTION    V_DDR4_CMD_SIG_REDUCTION(1U)
+
+#define S_SYSCLK_2X_MEMINTCLKO    8
+#define V_SYSCLK_2X_MEMINTCLKO(x) ((x) << S_SYSCLK_2X_MEMINTCLKO)
+#define F_SYSCLK_2X_MEMINTCLKO    V_SYSCLK_2X_MEMINTCLKO(1U)
+
+#define S_RANK_OVERRIDE    7
+#define V_RANK_OVERRIDE(x) ((x) << S_RANK_OVERRIDE)
+#define F_RANK_OVERRIDE    V_RANK_OVERRIDE(1U)
+
+#define S_RANK_OVERRIDE_VALUE    4
+#define M_RANK_OVERRIDE_VALUE    0x7U
+#define V_RANK_OVERRIDE_VALUE(x) ((x) << S_RANK_OVERRIDE_VALUE)
+#define G_RANK_OVERRIDE_VALUE(x) (((x) >> S_RANK_OVERRIDE_VALUE) & M_RANK_OVERRIDE_VALUE)
+
+#define S_LOW_LATENCY    3
+#define V_LOW_LATENCY(x) ((x) << S_LOW_LATENCY)
+#define F_LOW_LATENCY    V_LOW_LATENCY(1U)
+
+#define S_DDR4_BANK_REFRESH    2
+#define V_DDR4_BANK_REFRESH(x) ((x) << S_DDR4_BANK_REFRESH)
+#define F_DDR4_BANK_REFRESH    V_DDR4_BANK_REFRESH(1U)
+
+#define S_DDR4_VLEVEL_BANK_GROUP    1
+#define V_DDR4_VLEVEL_BANK_GROUP(x) ((x) << S_DDR4_VLEVEL_BANK_GROUP)
+#define F_DDR4_VLEVEL_BANK_GROUP    V_DDR4_VLEVEL_BANK_GROUP(1U)
+
+#define S_DDRPHY_PROTOCOL    12
+#define M_DDRPHY_PROTOCOL    0xfU
+#define V_DDRPHY_PROTOCOL(x) ((x) << S_DDRPHY_PROTOCOL)
+#define G_DDRPHY_PROTOCOL(x) (((x) >> S_DDRPHY_PROTOCOL) & M_DDRPHY_PROTOCOL)
+
+#define S_SPAM_EN    10
+#define V_SPAM_EN(x) ((x) << S_SPAM_EN)
+#define F_SPAM_EN    V_SPAM_EN(1U)
+
+#define S_DDR4_IPW_LOOP_DIS    2
+#define V_DDR4_IPW_LOOP_DIS(x) ((x) << S_DDR4_IPW_LOOP_DIS)
+#define F_DDR4_IPW_LOOP_DIS    V_DDR4_IPW_LOOP_DIS(1U)
+
+#define A_MC_DDRPHY_PC_CONFIG1 0x47034
+
+#define S_WRITE_LATENCY_OFFSET    12
+#define M_WRITE_LATENCY_OFFSET    0xfU
+#define V_WRITE_LATENCY_OFFSET(x) ((x) << S_WRITE_LATENCY_OFFSET)
+#define G_WRITE_LATENCY_OFFSET(x) (((x) >> S_WRITE_LATENCY_OFFSET) & M_WRITE_LATENCY_OFFSET)
+
+#define S_READ_LATENCY_OFFSET    8
+#define M_READ_LATENCY_OFFSET    0xfU
+#define V_READ_LATENCY_OFFSET(x) ((x) << S_READ_LATENCY_OFFSET)
+#define G_READ_LATENCY_OFFSET(x) (((x) >> S_READ_LATENCY_OFFSET) & M_READ_LATENCY_OFFSET)
+
+#define S_MEMCTL_CIC_FAST    7
+#define V_MEMCTL_CIC_FAST(x) ((x) << S_MEMCTL_CIC_FAST)
+#define F_MEMCTL_CIC_FAST    V_MEMCTL_CIC_FAST(1U)
+
+#define S_MEMCTL_CTRN_IGNORE    6
+#define V_MEMCTL_CTRN_IGNORE(x) ((x) << S_MEMCTL_CTRN_IGNORE)
+#define F_MEMCTL_CTRN_IGNORE    V_MEMCTL_CTRN_IGNORE(1U)
+
+#define S_DISABLE_MEMCTL_CAL    5
+#define V_DISABLE_MEMCTL_CAL(x) ((x) << S_DISABLE_MEMCTL_CAL)
+#define F_DISABLE_MEMCTL_CAL    V_DISABLE_MEMCTL_CAL(1U)
+
+#define S_MEMCTL_CIS_IGNORE    6
+#define V_MEMCTL_CIS_IGNORE(x) ((x) << S_MEMCTL_CIS_IGNORE)
+#define F_MEMCTL_CIS_IGNORE    V_MEMCTL_CIS_IGNORE(1U)
+
+#define S_MEMORY_TYPE    2
+#define M_MEMORY_TYPE    0x7U
+#define V_MEMORY_TYPE(x) ((x) << S_MEMORY_TYPE)
+#define G_MEMORY_TYPE(x) (((x) >> S_MEMORY_TYPE) & M_MEMORY_TYPE)
+
+#define S_DDR4_PDA_MODE    1
+#define V_DDR4_PDA_MODE(x) ((x) << S_DDR4_PDA_MODE)
+#define F_DDR4_PDA_MODE    V_DDR4_PDA_MODE(1U)
+
+#define A_MC_DDRPHY_PC_RESETS 0x47038
+
+#define S_PLL_RESET    15
+#define V_PLL_RESET(x) ((x) << S_PLL_RESET)
+#define F_PLL_RESET    V_PLL_RESET(1U)
+
+#define S_SYSCLK_RESET    14
+#define V_SYSCLK_RESET(x) ((x) << S_SYSCLK_RESET)
+#define F_SYSCLK_RESET    V_SYSCLK_RESET(1U)
+
+#define A_MC_DDRPHY_PC_PER_ZCAL_CONFIG 0x4703c
+
+#define S_PER_ZCAL_ENA_RANK    8
+#define M_PER_ZCAL_ENA_RANK    0xffU
+#define V_PER_ZCAL_ENA_RANK(x) ((x) << S_PER_ZCAL_ENA_RANK)
+#define G_PER_ZCAL_ENA_RANK(x) (((x) >> S_PER_ZCAL_ENA_RANK) & M_PER_ZCAL_ENA_RANK)
+
+#define S_PER_ZCAL_NEXT_RANK    5
+#define M_PER_ZCAL_NEXT_RANK    0x7U
+#define V_PER_ZCAL_NEXT_RANK(x) ((x) << S_PER_ZCAL_NEXT_RANK)
+#define G_PER_ZCAL_NEXT_RANK(x) (((x) >> S_PER_ZCAL_NEXT_RANK) & M_PER_ZCAL_NEXT_RANK)
+
+#define S_START_PER_ZCAL    4
+#define V_START_PER_ZCAL(x) ((x) << S_START_PER_ZCAL)
+#define F_START_PER_ZCAL    V_START_PER_ZCAL(1U)
+
+#define A_MC_DDRPHY_PC_RANK_GROUP 0x47044
+
+#define S_ADDR_MIRROR_RP0_PRI    15
+#define V_ADDR_MIRROR_RP0_PRI(x) ((x) << S_ADDR_MIRROR_RP0_PRI)
+#define F_ADDR_MIRROR_RP0_PRI    V_ADDR_MIRROR_RP0_PRI(1U)
+
+#define S_ADDR_MIRROR_RP0_SEC    14
+#define V_ADDR_MIRROR_RP0_SEC(x) ((x) << S_ADDR_MIRROR_RP0_SEC)
+#define F_ADDR_MIRROR_RP0_SEC    V_ADDR_MIRROR_RP0_SEC(1U)
+
+#define S_ADDR_MIRROR_RP1_PRI    13
+#define V_ADDR_MIRROR_RP1_PRI(x) ((x) << S_ADDR_MIRROR_RP1_PRI)
+#define F_ADDR_MIRROR_RP1_PRI    V_ADDR_MIRROR_RP1_PRI(1U)
+
+#define S_ADDR_MIRROR_RP1_SEC    12
+#define V_ADDR_MIRROR_RP1_SEC(x) ((x) << S_ADDR_MIRROR_RP1_SEC)
+#define F_ADDR_MIRROR_RP1_SEC    V_ADDR_MIRROR_RP1_SEC(1U)
+
+#define S_ADDR_MIRROR_RP2_PRI    11
+#define V_ADDR_MIRROR_RP2_PRI(x) ((x) << S_ADDR_MIRROR_RP2_PRI)
+#define F_ADDR_MIRROR_RP2_PRI    V_ADDR_MIRROR_RP2_PRI(1U)
+
+#define S_ADDR_MIRROR_RP2_SEC    10
+#define V_ADDR_MIRROR_RP2_SEC(x) ((x) << S_ADDR_MIRROR_RP2_SEC)
+#define F_ADDR_MIRROR_RP2_SEC    V_ADDR_MIRROR_RP2_SEC(1U)
+
+#define S_ADDR_MIRROR_RP3_PRI    9
+#define V_ADDR_MIRROR_RP3_PRI(x) ((x) << S_ADDR_MIRROR_RP3_PRI)
+#define F_ADDR_MIRROR_RP3_PRI    V_ADDR_MIRROR_RP3_PRI(1U)
+
+#define S_ADDR_MIRROR_RP3_SEC    8
+#define V_ADDR_MIRROR_RP3_SEC(x) ((x) << S_ADDR_MIRROR_RP3_SEC)
+#define F_ADDR_MIRROR_RP3_SEC    V_ADDR_MIRROR_RP3_SEC(1U)
+
+#define S_RANK_GROUPING    6
+#define M_RANK_GROUPING    0x3U
+#define V_RANK_GROUPING(x) ((x) << S_RANK_GROUPING)
+#define G_RANK_GROUPING(x) (((x) >> S_RANK_GROUPING) & M_RANK_GROUPING)
+
+#define S_ADDR_MIRROR_A3_A4    5
+#define V_ADDR_MIRROR_A3_A4(x) ((x) << S_ADDR_MIRROR_A3_A4)
+#define F_ADDR_MIRROR_A3_A4    V_ADDR_MIRROR_A3_A4(1U)
+
+#define S_ADDR_MIRROR_A5_A6    4
+#define V_ADDR_MIRROR_A5_A6(x) ((x) << S_ADDR_MIRROR_A5_A6)
+#define F_ADDR_MIRROR_A5_A6    V_ADDR_MIRROR_A5_A6(1U)
+
+#define S_ADDR_MIRROR_A7_A8    3
+#define V_ADDR_MIRROR_A7_A8(x) ((x) << S_ADDR_MIRROR_A7_A8)
+#define F_ADDR_MIRROR_A7_A8    V_ADDR_MIRROR_A7_A8(1U)
+
+#define S_ADDR_MIRROR_A11_A13    2
+#define V_ADDR_MIRROR_A11_A13(x) ((x) << S_ADDR_MIRROR_A11_A13)
+#define F_ADDR_MIRROR_A11_A13    V_ADDR_MIRROR_A11_A13(1U)
+
+#define S_ADDR_MIRROR_BA0_BA1    1
+#define V_ADDR_MIRROR_BA0_BA1(x) ((x) << S_ADDR_MIRROR_BA0_BA1)
+#define F_ADDR_MIRROR_BA0_BA1    V_ADDR_MIRROR_BA0_BA1(1U)
+
+#define S_ADDR_MIRROR_BG0_BG1    0
+#define V_ADDR_MIRROR_BG0_BG1(x) ((x) << S_ADDR_MIRROR_BG0_BG1)
+#define F_ADDR_MIRROR_BG0_BG1    V_ADDR_MIRROR_BG0_BG1(1U)
+
+#define A_MC_DDRPHY_PC_ERROR_STATUS0 0x47048
+
+#define S_RC_ERROR    15
+#define V_RC_ERROR(x) ((x) << S_RC_ERROR)
+#define F_RC_ERROR    V_RC_ERROR(1U)
+
+#define S_WC_ERROR    14
+#define V_WC_ERROR(x) ((x) << S_WC_ERROR)
+#define F_WC_ERROR    V_WC_ERROR(1U)
+
+#define S_SEQ_ERROR    13
+#define V_SEQ_ERROR(x) ((x) << S_SEQ_ERROR)
+#define F_SEQ_ERROR    V_SEQ_ERROR(1U)
+
+#define S_CC_ERROR    12
+#define V_CC_ERROR(x) ((x) << S_CC_ERROR)
+#define F_CC_ERROR    V_CC_ERROR(1U)
+
+#define S_APB_ERROR    11
+#define V_APB_ERROR(x) ((x) << S_APB_ERROR)
+#define F_APB_ERROR    V_APB_ERROR(1U)
+
+#define S_PC_ERROR    10
+#define V_PC_ERROR(x) ((x) << S_PC_ERROR)
+#define F_PC_ERROR    V_PC_ERROR(1U)
+
+#define A_MC_DDRPHY_PC_ERROR_MASK0 0x4704c
+
+#define S_RC_ERROR_MASK    15
+#define V_RC_ERROR_MASK(x) ((x) << S_RC_ERROR_MASK)
+#define F_RC_ERROR_MASK    V_RC_ERROR_MASK(1U)
+
+#define S_WC_ERROR_MASK    14
+#define V_WC_ERROR_MASK(x) ((x) << S_WC_ERROR_MASK)
+#define F_WC_ERROR_MASK    V_WC_ERROR_MASK(1U)
+
+#define S_SEQ_ERROR_MASK    13
+#define V_SEQ_ERROR_MASK(x) ((x) << S_SEQ_ERROR_MASK)
+#define F_SEQ_ERROR_MASK    V_SEQ_ERROR_MASK(1U)
+
+#define S_CC_ERROR_MASK    12
+#define V_CC_ERROR_MASK(x) ((x) << S_CC_ERROR_MASK)
+#define F_CC_ERROR_MASK    V_CC_ERROR_MASK(1U)
+
+#define S_APB_ERROR_MASK    11
+#define V_APB_ERROR_MASK(x) ((x) << S_APB_ERROR_MASK)
+#define F_APB_ERROR_MASK    V_APB_ERROR_MASK(1U)
+
+#define S_PC_ERROR_MASK    10
+#define V_PC_ERROR_MASK(x) ((x) << S_PC_ERROR_MASK)
+#define F_PC_ERROR_MASK    V_PC_ERROR_MASK(1U)
+
+#define A_MC_DDRPHY_PC_IO_PVT_FET_CONTROL 0x47050
+
+#define S_PVTP    11
+#define M_PVTP    0x1fU
+#define V_PVTP(x) ((x) << S_PVTP)
+#define G_PVTP(x) (((x) >> S_PVTP) & M_PVTP)
+
+#define S_PVTN    6
+#define M_PVTN    0x1fU
+#define V_PVTN(x) ((x) << S_PVTN)
+#define G_PVTN(x) (((x) >> S_PVTN) & M_PVTN)
+
+#define S_PVT_OVERRIDE    5
+#define V_PVT_OVERRIDE(x) ((x) << S_PVT_OVERRIDE)
+#define F_PVT_OVERRIDE    V_PVT_OVERRIDE(1U)
+
+#define S_ENABLE_ZCAL    4
+#define V_ENABLE_ZCAL(x) ((x) << S_ENABLE_ZCAL)
+#define F_ENABLE_ZCAL    V_ENABLE_ZCAL(1U)
+
+#define A_MC_DDRPHY_PC_VREF_DRV_CONTROL 0x47054
+
+#define S_VREFDQ0DSGN    15
+#define V_VREFDQ0DSGN(x) ((x) << S_VREFDQ0DSGN)
+#define F_VREFDQ0DSGN    V_VREFDQ0DSGN(1U)
+
+#define S_VREFDQ0D    11
+#define M_VREFDQ0D    0xfU
+#define V_VREFDQ0D(x) ((x) << S_VREFDQ0D)
+#define G_VREFDQ0D(x) (((x) >> S_VREFDQ0D) & M_VREFDQ0D)
+
+#define S_VREFDQ1DSGN    10
+#define V_VREFDQ1DSGN(x) ((x) << S_VREFDQ1DSGN)
+#define F_VREFDQ1DSGN    V_VREFDQ1DSGN(1U)
+
+#define S_VREFDQ1D    6
+#define M_VREFDQ1D    0xfU
+#define V_VREFDQ1D(x) ((x) << S_VREFDQ1D)
+#define G_VREFDQ1D(x) (((x) >> S_VREFDQ1D) & M_VREFDQ1D)
+
+#define S_EN_ANALOG_PD    3
+#define V_EN_ANALOG_PD(x) ((x) << S_EN_ANALOG_PD)
+#define F_EN_ANALOG_PD    V_EN_ANALOG_PD(1U)
+
+#define S_ANALOG_PD_DLY    2
+#define V_ANALOG_PD_DLY(x) ((x) << S_ANALOG_PD_DLY)
+#define F_ANALOG_PD_DLY    V_ANALOG_PD_DLY(1U)
+
+#define S_ANALOG_PD_DIV    0
+#define M_ANALOG_PD_DIV    0x3U
+#define V_ANALOG_PD_DIV(x) ((x) << S_ANALOG_PD_DIV)
+#define G_ANALOG_PD_DIV(x) (((x) >> S_ANALOG_PD_DIV) & M_ANALOG_PD_DIV)
+
+#define A_MC_DDRPHY_PC_INIT_CAL_CONFIG0 0x47058
+
+#define S_ENA_WR_LEVEL    15
+#define V_ENA_WR_LEVEL(x) ((x) << S_ENA_WR_LEVEL)
+#define F_ENA_WR_LEVEL    V_ENA_WR_LEVEL(1U)
+
+#define S_ENA_INITIAL_PAT_WR    14
+#define V_ENA_INITIAL_PAT_WR(x) ((x) << S_ENA_INITIAL_PAT_WR)
+#define F_ENA_INITIAL_PAT_WR    V_ENA_INITIAL_PAT_WR(1U)
+
+#define S_ENA_DQS_ALIGN    13
+#define V_ENA_DQS_ALIGN(x) ((x) << S_ENA_DQS_ALIGN)
+#define F_ENA_DQS_ALIGN    V_ENA_DQS_ALIGN(1U)
+
+#define S_ENA_RDCLK_ALIGN    12
+#define V_ENA_RDCLK_ALIGN(x) ((x) << S_ENA_RDCLK_ALIGN)
+#define F_ENA_RDCLK_ALIGN    V_ENA_RDCLK_ALIGN(1U)
+
+#define S_ENA_READ_CTR    11
+#define V_ENA_READ_CTR(x) ((x) << S_ENA_READ_CTR)
+#define F_ENA_READ_CTR    V_ENA_READ_CTR(1U)
+
+#define S_ENA_WRITE_CTR    10
+#define V_ENA_WRITE_CTR(x) ((x) << S_ENA_WRITE_CTR)
+#define F_ENA_WRITE_CTR    V_ENA_WRITE_CTR(1U)
+
+#define S_ENA_INITIAL_COARSE_WR    9
+#define V_ENA_INITIAL_COARSE_WR(x) ((x) << S_ENA_INITIAL_COARSE_WR)
+#define F_ENA_INITIAL_COARSE_WR    V_ENA_INITIAL_COARSE_WR(1U)
+
+#define S_ENA_COARSE_RD    8
+#define V_ENA_COARSE_RD(x) ((x) << S_ENA_COARSE_RD)
+#define F_ENA_COARSE_RD    V_ENA_COARSE_RD(1U)
+
+#define S_ENA_CUSTOM_RD    7
+#define V_ENA_CUSTOM_RD(x) ((x) << S_ENA_CUSTOM_RD)
+#define F_ENA_CUSTOM_RD    V_ENA_CUSTOM_RD(1U)
+
+#define S_ENA_CUSTOM_WR    6
+#define V_ENA_CUSTOM_WR(x) ((x) << S_ENA_CUSTOM_WR)
+#define F_ENA_CUSTOM_WR    V_ENA_CUSTOM_WR(1U)
+
+#define S_ABORT_ON_CAL_ERROR    5
+#define V_ABORT_ON_CAL_ERROR(x) ((x) << S_ABORT_ON_CAL_ERROR)
+#define F_ABORT_ON_CAL_ERROR    V_ABORT_ON_CAL_ERROR(1U)
+
+#define S_ENA_DIGITAL_EYE    4
+#define V_ENA_DIGITAL_EYE(x) ((x) << S_ENA_DIGITAL_EYE)
+#define F_ENA_DIGITAL_EYE    V_ENA_DIGITAL_EYE(1U)
+
+#define S_ENA_RANK_PAIR    0
+#define M_ENA_RANK_PAIR    0xfU
+#define V_ENA_RANK_PAIR(x) ((x) << S_ENA_RANK_PAIR)
+#define G_ENA_RANK_PAIR(x) (((x) >> S_ENA_RANK_PAIR) & M_ENA_RANK_PAIR)
+
+#define A_MC_DDRPHY_PC_INIT_CAL_CONFIG1 0x4705c
+
+#define S_REFRESH_COUNT    12
+#define M_REFRESH_COUNT    0xfU
+#define V_REFRESH_COUNT(x) ((x) << S_REFRESH_COUNT)
+#define G_REFRESH_COUNT(x) (((x) >> S_REFRESH_COUNT) & M_REFRESH_COUNT)
+
+#define S_REFRESH_CONTROL    10
+#define M_REFRESH_CONTROL    0x3U
+#define V_REFRESH_CONTROL(x) ((x) << S_REFRESH_CONTROL)
+#define G_REFRESH_CONTROL(x) (((x) >> S_REFRESH_CONTROL) & M_REFRESH_CONTROL)
+
+#define S_REFRESH_ALL_RANKS    9
+#define V_REFRESH_ALL_RANKS(x) ((x) << S_REFRESH_ALL_RANKS)
+#define F_REFRESH_ALL_RANKS    V_REFRESH_ALL_RANKS(1U)
+
+#define S_REFRESH_INTERVAL    0
+#define M_REFRESH_INTERVAL    0x7fU
+#define V_REFRESH_INTERVAL(x) ((x) << S_REFRESH_INTERVAL)
+#define G_REFRESH_INTERVAL(x) (((x) >> S_REFRESH_INTERVAL) & M_REFRESH_INTERVAL)
+
+#define A_MC_DDRPHY_PC_INIT_CAL_ERROR 0x47060
+
+#define S_ERROR_WR_LEVEL    15
+#define V_ERROR_WR_LEVEL(x) ((x) << S_ERROR_WR_LEVEL)
+#define F_ERROR_WR_LEVEL    V_ERROR_WR_LEVEL(1U)
+
+#define S_ERROR_INITIAL_PAT_WRITE    14
+#define V_ERROR_INITIAL_PAT_WRITE(x) ((x) << S_ERROR_INITIAL_PAT_WRITE)
+#define F_ERROR_INITIAL_PAT_WRITE    V_ERROR_INITIAL_PAT_WRITE(1U)
+
+#define S_ERROR_DQS_ALIGN    13
+#define V_ERROR_DQS_ALIGN(x) ((x) << S_ERROR_DQS_ALIGN)
+#define F_ERROR_DQS_ALIGN    V_ERROR_DQS_ALIGN(1U)
+
+#define S_ERROR_RDCLK_ALIGN    12
+#define V_ERROR_RDCLK_ALIGN(x) ((x) << S_ERROR_RDCLK_ALIGN)
+#define F_ERROR_RDCLK_ALIGN    V_ERROR_RDCLK_ALIGN(1U)
+
+#define S_ERROR_READ_CTR    11
+#define V_ERROR_READ_CTR(x) ((x) << S_ERROR_READ_CTR)
+#define F_ERROR_READ_CTR    V_ERROR_READ_CTR(1U)
+
+#define S_ERROR_WRITE_CTR    10
+#define V_ERROR_WRITE_CTR(x) ((x) << S_ERROR_WRITE_CTR)
+#define F_ERROR_WRITE_CTR    V_ERROR_WRITE_CTR(1U)
+
+#define S_ERROR_INITIAL_COARSE_WR    9
+#define V_ERROR_INITIAL_COARSE_WR(x) ((x) << S_ERROR_INITIAL_COARSE_WR)
+#define F_ERROR_INITIAL_COARSE_WR    V_ERROR_INITIAL_COARSE_WR(1U)
+
+#define S_ERROR_COARSE_RD    8
+#define V_ERROR_COARSE_RD(x) ((x) << S_ERROR_COARSE_RD)
+#define F_ERROR_COARSE_RD    V_ERROR_COARSE_RD(1U)
+
+#define S_ERROR_CUSTOM_RD    7
+#define V_ERROR_CUSTOM_RD(x) ((x) << S_ERROR_CUSTOM_RD)
+#define F_ERROR_CUSTOM_RD    V_ERROR_CUSTOM_RD(1U)
+
+#define S_ERROR_CUSTOM_WR    6
+#define V_ERROR_CUSTOM_WR(x) ((x) << S_ERROR_CUSTOM_WR)
+#define F_ERROR_CUSTOM_WR    V_ERROR_CUSTOM_WR(1U)
+
+#define S_ERROR_DIGITAL_EYE    5
+#define V_ERROR_DIGITAL_EYE(x) ((x) << S_ERROR_DIGITAL_EYE)
+#define F_ERROR_DIGITAL_EYE    V_ERROR_DIGITAL_EYE(1U)
+
+#define S_ERROR_RANK_PAIR    0
+#define M_ERROR_RANK_PAIR    0xfU
+#define V_ERROR_RANK_PAIR(x) ((x) << S_ERROR_RANK_PAIR)
+#define G_ERROR_RANK_PAIR(x) (((x) >> S_ERROR_RANK_PAIR) & M_ERROR_RANK_PAIR)
+
+#define A_MC_DDRPHY_PC_INIT_CAL_STATUS 0x47064
+
+#define S_INIT_CAL_COMPLETE    12
+#define M_INIT_CAL_COMPLETE    0xfU
+#define V_INIT_CAL_COMPLETE(x) ((x) << S_INIT_CAL_COMPLETE)
+#define G_INIT_CAL_COMPLETE(x) (((x) >> S_INIT_CAL_COMPLETE) & M_INIT_CAL_COMPLETE)
+
+#define S_PER_CAL_ABORT    6
+#define V_PER_CAL_ABORT(x) ((x) << S_PER_CAL_ABORT)
+#define F_PER_CAL_ABORT    V_PER_CAL_ABORT(1U)
+
+#define A_MC_DDRPHY_PC_INIT_CAL_MASK 0x47068
+
+#define S_ERROR_WR_LEVEL_MASK    15
+#define V_ERROR_WR_LEVEL_MASK(x) ((x) << S_ERROR_WR_LEVEL_MASK)
+#define F_ERROR_WR_LEVEL_MASK    V_ERROR_WR_LEVEL_MASK(1U)
+
+#define S_ERROR_INITIAL_PAT_WRITE_MASK    14
+#define V_ERROR_INITIAL_PAT_WRITE_MASK(x) ((x) << S_ERROR_INITIAL_PAT_WRITE_MASK)
+#define F_ERROR_INITIAL_PAT_WRITE_MASK    V_ERROR_INITIAL_PAT_WRITE_MASK(1U)
+
+#define S_ERROR_DQS_ALIGN_MASK    13
+#define V_ERROR_DQS_ALIGN_MASK(x) ((x) << S_ERROR_DQS_ALIGN_MASK)
+#define F_ERROR_DQS_ALIGN_MASK    V_ERROR_DQS_ALIGN_MASK(1U)
+
+#define S_ERROR_RDCLK_ALIGN_MASK    12
+#define V_ERROR_RDCLK_ALIGN_MASK(x) ((x) << S_ERROR_RDCLK_ALIGN_MASK)
+#define F_ERROR_RDCLK_ALIGN_MASK    V_ERROR_RDCLK_ALIGN_MASK(1U)
+
+#define S_ERROR_READ_CTR_MASK    11
+#define V_ERROR_READ_CTR_MASK(x) ((x) << S_ERROR_READ_CTR_MASK)
+#define F_ERROR_READ_CTR_MASK    V_ERROR_READ_CTR_MASK(1U)
+
+#define S_ERROR_WRITE_CTR_MASK    10
+#define V_ERROR_WRITE_CTR_MASK(x) ((x) << S_ERROR_WRITE_CTR_MASK)
+#define F_ERROR_WRITE_CTR_MASK    V_ERROR_WRITE_CTR_MASK(1U)
+
+#define S_ERROR_INITIAL_COARSE_WR_MASK    9
+#define V_ERROR_INITIAL_COARSE_WR_MASK(x) ((x) << S_ERROR_INITIAL_COARSE_WR_MASK)
+#define F_ERROR_INITIAL_COARSE_WR_MASK    V_ERROR_INITIAL_COARSE_WR_MASK(1U)
+
+#define S_ERROR_COARSE_RD_MASK    8
+#define V_ERROR_COARSE_RD_MASK(x) ((x) << S_ERROR_COARSE_RD_MASK)
+#define F_ERROR_COARSE_RD_MASK    V_ERROR_COARSE_RD_MASK(1U)
+
+#define S_ERROR_CUSTOM_RD_MASK    7
+#define V_ERROR_CUSTOM_RD_MASK(x) ((x) << S_ERROR_CUSTOM_RD_MASK)
+#define F_ERROR_CUSTOM_RD_MASK    V_ERROR_CUSTOM_RD_MASK(1U)
+
+#define S_ERROR_CUSTOM_WR_MASK    6
+#define V_ERROR_CUSTOM_WR_MASK(x) ((x) << S_ERROR_CUSTOM_WR_MASK)
+#define F_ERROR_CUSTOM_WR_MASK    V_ERROR_CUSTOM_WR_MASK(1U)
+
+#define S_ERROR_DIGITAL_EYE_MASK    5
+#define V_ERROR_DIGITAL_EYE_MASK(x) ((x) << S_ERROR_DIGITAL_EYE_MASK)
+#define F_ERROR_DIGITAL_EYE_MASK    V_ERROR_DIGITAL_EYE_MASK(1U)
+
+#define A_MC_DDRPHY_PC_IO_PVT_FET_STATUS 0x4706c
+#define A_MC_DDRPHY_PC_MR0_PRI_RP 0x47070
+
+#define S_MODEREGISTER0VALUE    0
+#define M_MODEREGISTER0VALUE    0xffffU
+#define V_MODEREGISTER0VALUE(x) ((x) << S_MODEREGISTER0VALUE)
+#define G_MODEREGISTER0VALUE(x) (((x) >> S_MODEREGISTER0VALUE) & M_MODEREGISTER0VALUE)
+
+#define A_MC_DDRPHY_PC_MR1_PRI_RP 0x47074
+
+#define S_MODEREGISTER1VALUE    0
+#define M_MODEREGISTER1VALUE    0xffffU
+#define V_MODEREGISTER1VALUE(x) ((x) << S_MODEREGISTER1VALUE)
+#define G_MODEREGISTER1VALUE(x) (((x) >> S_MODEREGISTER1VALUE) & M_MODEREGISTER1VALUE)
+
+#define A_MC_DDRPHY_PC_MR2_PRI_RP 0x47078
+
+#define S_MODEREGISTER2VALUE    0
+#define M_MODEREGISTER2VALUE    0xffffU
+#define V_MODEREGISTER2VALUE(x) ((x) << S_MODEREGISTER2VALUE)
+#define G_MODEREGISTER2VALUE(x) (((x) >> S_MODEREGISTER2VALUE) & M_MODEREGISTER2VALUE)
+
+#define A_MC_DDRPHY_PC_MR3_PRI_RP 0x4707c
+
+#define S_MODEREGISTER3VALUE    0
+#define M_MODEREGISTER3VALUE    0xffffU
+#define V_MODEREGISTER3VALUE(x) ((x) << S_MODEREGISTER3VALUE)
+#define G_MODEREGISTER3VALUE(x) (((x) >> S_MODEREGISTER3VALUE) & M_MODEREGISTER3VALUE)
+
+#define A_MC_DDRPHY_PC_MR0_SEC_RP 0x47080
+#define A_MC_DDRPHY_PC_MR1_SEC_RP 0x47084
+#define A_MC_DDRPHY_PC_MR2_SEC_RP 0x47088
+#define A_MC_DDRPHY_PC_MR3_SEC_RP 0x4708c
+
+#define S_MODE_REGISTER_3_VALUE    0
+#define M_MODE_REGISTER_3_VALUE    0xffffU
+#define V_MODE_REGISTER_3_VALUE(x) ((x) << S_MODE_REGISTER_3_VALUE)
+#define G_MODE_REGISTER_3_VALUE(x) (((x) >> S_MODE_REGISTER_3_VALUE) & M_MODE_REGISTER_3_VALUE)
+
+#define A_MC_DDRPHY_SEQ_RD_WR_DATA0 0x47200
+
+#define S_DRD_WR_DATA_REG    0
+#define M_DRD_WR_DATA_REG    0xffffU
+#define V_DRD_WR_DATA_REG(x) ((x) << S_DRD_WR_DATA_REG)
+#define G_DRD_WR_DATA_REG(x) (((x) >> S_DRD_WR_DATA_REG) & M_DRD_WR_DATA_REG)
+
+#define A_MC_DDRPHY_SEQ_RD_WR_DATA1 0x47204
+#define A_MC_DDRPHY_SEQ_CONFIG0 0x47208
+
+#define S_MPR_PATTERN_BIT    15
+#define V_MPR_PATTERN_BIT(x) ((x) << S_MPR_PATTERN_BIT)
+#define F_MPR_PATTERN_BIT    V_MPR_PATTERN_BIT(1U)
+
+#define S_TWO_CYCLE_ADDR_EN    14
+#define V_TWO_CYCLE_ADDR_EN(x) ((x) << S_TWO_CYCLE_ADDR_EN)
+#define F_TWO_CYCLE_ADDR_EN    V_TWO_CYCLE_ADDR_EN(1U)
+
+#define S_MR_MASK_EN    10
+#define M_MR_MASK_EN    0xfU
+#define V_MR_MASK_EN(x) ((x) << S_MR_MASK_EN)
+#define G_MR_MASK_EN(x) (((x) >> S_MR_MASK_EN) & M_MR_MASK_EN)
+
+#define S_PARITY_DLY    9
+#define V_PARITY_DLY(x) ((x) << S_PARITY_DLY)
+#define F_PARITY_DLY    V_PARITY_DLY(1U)
+
+#define S_FORCE_RESERVED    7
+#define V_FORCE_RESERVED(x) ((x) << S_FORCE_RESERVED)
+#define F_FORCE_RESERVED    V_FORCE_RESERVED(1U)
+
+#define S_HALT_ROTATION    6
+#define V_HALT_ROTATION(x) ((x) << S_HALT_ROTATION)
+#define F_HALT_ROTATION    V_HALT_ROTATION(1U)
+
+#define S_FORCE_MPR    5
+#define V_FORCE_MPR(x) ((x) << S_FORCE_MPR)
+#define F_FORCE_MPR    V_FORCE_MPR(1U)
+
+#define S_IPW_SIDEAB_SEL    2
+#define V_IPW_SIDEAB_SEL(x) ((x) << S_IPW_SIDEAB_SEL)
+#define F_IPW_SIDEAB_SEL    V_IPW_SIDEAB_SEL(1U)
+
+#define S_PARITY_A17_MASK    1
+#define V_PARITY_A17_MASK(x) ((x) << S_PARITY_A17_MASK)
+#define F_PARITY_A17_MASK    V_PARITY_A17_MASK(1U)
+
+#define S_X16_DEVICE    0
+#define V_X16_DEVICE(x) ((x) << S_X16_DEVICE)
+#define F_X16_DEVICE    V_X16_DEVICE(1U)
+
+#define A_MC_DDRPHY_SEQ_RESERVED_ADDR0 0x4720c
+#define A_MC_DDRPHY_SEQ_RESERVED_ADDR1 0x47210
+#define A_MC_DDRPHY_SEQ_RESERVED_ADDR2 0x47214
+#define A_MC_DDRPHY_SEQ_RESERVED_ADDR3 0x47218
+#define A_MC_DDRPHY_SEQ_RESERVED_ADDR4 0x4721c
+#define A_MC_DDRPHY_SEQ_ERROR_STATUS0 0x47220
+
+#define S_MULTIPLE_REQ_ERROR    15
+#define V_MULTIPLE_REQ_ERROR(x) ((x) << S_MULTIPLE_REQ_ERROR)
+#define F_MULTIPLE_REQ_ERROR    V_MULTIPLE_REQ_ERROR(1U)
+
+#define S_INVALID_REQTYPE_ERRO    14
+#define V_INVALID_REQTYPE_ERRO(x) ((x) << S_INVALID_REQTYPE_ERRO)
+#define F_INVALID_REQTYPE_ERRO    V_INVALID_REQTYPE_ERRO(1U)
+
+#define S_EARLY_REQ_ERROR    13
+#define V_EARLY_REQ_ERROR(x) ((x) << S_EARLY_REQ_ERROR)
+#define F_EARLY_REQ_ERROR    V_EARLY_REQ_ERROR(1U)
+
+#define S_MULTIPLE_REQ_SOURCE    10
+#define M_MULTIPLE_REQ_SOURCE    0x7U
+#define V_MULTIPLE_REQ_SOURCE(x) ((x) << S_MULTIPLE_REQ_SOURCE)
+#define G_MULTIPLE_REQ_SOURCE(x) (((x) >> S_MULTIPLE_REQ_SOURCE) & M_MULTIPLE_REQ_SOURCE)
+
+#define S_INVALID_REQTYPE    6
+#define M_INVALID_REQTYPE    0xfU
+#define V_INVALID_REQTYPE(x) ((x) << S_INVALID_REQTYPE)
+#define G_INVALID_REQTYPE(x) (((x) >> S_INVALID_REQTYPE) & M_INVALID_REQTYPE)
+
+#define S_INVALID_REQ_SOURCE    3
+#define M_INVALID_REQ_SOURCE    0x7U
+#define V_INVALID_REQ_SOURCE(x) ((x) << S_INVALID_REQ_SOURCE)
+#define G_INVALID_REQ_SOURCE(x) (((x) >> S_INVALID_REQ_SOURCE) & M_INVALID_REQ_SOURCE)
+
+#define S_EARLY_REQ_SOURCE    0
+#define M_EARLY_REQ_SOURCE    0x7U
+#define V_EARLY_REQ_SOURCE(x) ((x) << S_EARLY_REQ_SOURCE)
+#define G_EARLY_REQ_SOURCE(x) (((x) >> S_EARLY_REQ_SOURCE) & M_EARLY_REQ_SOURCE)
+
+#define A_MC_DDRPHY_SEQ_ERROR_MASK0 0x47224
+
+#define S_MULT_REQ_ERR_MASK    15
+#define V_MULT_REQ_ERR_MASK(x) ((x) << S_MULT_REQ_ERR_MASK)
+#define F_MULT_REQ_ERR_MASK    V_MULT_REQ_ERR_MASK(1U)
+
+#define S_INVALID_REQTYPE_ERR_MASK    14
+#define V_INVALID_REQTYPE_ERR_MASK(x) ((x) << S_INVALID_REQTYPE_ERR_MASK)
+#define F_INVALID_REQTYPE_ERR_MASK    V_INVALID_REQTYPE_ERR_MASK(1U)
+
+#define S_EARLY_REQ_ERR_MASK    13
+#define V_EARLY_REQ_ERR_MASK(x) ((x) << S_EARLY_REQ_ERR_MASK)
+#define F_EARLY_REQ_ERR_MASK    V_EARLY_REQ_ERR_MASK(1U)
+
+#define A_MC_DDRPHY_SEQ_ODT_WR_CONFIG0 0x47228
+
+#define S_ODT_WR_VALUES_BITS0_7    8
+#define M_ODT_WR_VALUES_BITS0_7    0xffU
+#define V_ODT_WR_VALUES_BITS0_7(x) ((x) << S_ODT_WR_VALUES_BITS0_7)
+#define G_ODT_WR_VALUES_BITS0_7(x) (((x) >> S_ODT_WR_VALUES_BITS0_7) & M_ODT_WR_VALUES_BITS0_7)
+
+#define S_ODT_WR_VALUES_BITS8_15    0
+#define M_ODT_WR_VALUES_BITS8_15    0xffU
+#define V_ODT_WR_VALUES_BITS8_15(x) ((x) << S_ODT_WR_VALUES_BITS8_15)
+#define G_ODT_WR_VALUES_BITS8_15(x) (((x) >> S_ODT_WR_VALUES_BITS8_15) & M_ODT_WR_VALUES_BITS8_15)
+
+#define A_MC_DDRPHY_SEQ_ODT_WR_CONFIG1 0x4722c
+#define A_MC_DDRPHY_SEQ_ODT_WR_CONFIG2 0x47230
+#define A_MC_DDRPHY_SEQ_ODT_WR_CONFIG3 0x47234
+#define A_MC_DDRPHY_SEQ_ODT_RD_CONFIG0 0x47238
+
+#define S_ODT_RD_VALUES_X2    8
+#define M_ODT_RD_VALUES_X2    0xffU
+#define V_ODT_RD_VALUES_X2(x) ((x) << S_ODT_RD_VALUES_X2)
+#define G_ODT_RD_VALUES_X2(x) (((x) >> S_ODT_RD_VALUES_X2) & M_ODT_RD_VALUES_X2)
+
+#define S_ODT_RD_VALUES_X2PLUS1    0
+#define M_ODT_RD_VALUES_X2PLUS1    0xffU
+#define V_ODT_RD_VALUES_X2PLUS1(x) ((x) << S_ODT_RD_VALUES_X2PLUS1)
+#define G_ODT_RD_VALUES_X2PLUS1(x) (((x) >> S_ODT_RD_VALUES_X2PLUS1) & M_ODT_RD_VALUES_X2PLUS1)
+
+#define A_MC_DDRPHY_SEQ_ODT_RD_CONFIG1 0x4723c
+#define A_MC_DDRPHY_SEQ_ODT_RD_CONFIG2 0x47240
+#define A_MC_DDRPHY_SEQ_ODT_RD_CONFIG3 0x47244
+#define A_MC_DDRPHY_SEQ_MEM_TIMING_PARAM0 0x47248
+
+#define S_TMOD_CYCLES    12
+#define M_TMOD_CYCLES    0xfU
+#define V_TMOD_CYCLES(x) ((x) << S_TMOD_CYCLES)
+#define G_TMOD_CYCLES(x) (((x) >> S_TMOD_CYCLES) & M_TMOD_CYCLES)
+
+#define S_TRCD_CYCLES    8
+#define M_TRCD_CYCLES    0xfU
+#define V_TRCD_CYCLES(x) ((x) << S_TRCD_CYCLES)
+#define G_TRCD_CYCLES(x) (((x) >> S_TRCD_CYCLES) & M_TRCD_CYCLES)
+
+#define S_TRP_CYCLES    4
+#define M_TRP_CYCLES    0xfU
+#define V_TRP_CYCLES(x) ((x) << S_TRP_CYCLES)
+#define G_TRP_CYCLES(x) (((x) >> S_TRP_CYCLES) & M_TRP_CYCLES)
+
+#define S_TRFC_CYCLES    0
+#define M_TRFC_CYCLES    0xfU
+#define V_TRFC_CYCLES(x) ((x) << S_TRFC_CYCLES)
+#define G_TRFC_CYCLES(x) (((x) >> S_TRFC_CYCLES) & M_TRFC_CYCLES)
+
+#define A_MC_DDRPHY_SEQ_MEM_TIMING_PARAM1 0x4724c
+
+#define S_TZQINIT_CYCLES    12
+#define M_TZQINIT_CYCLES    0xfU
+#define V_TZQINIT_CYCLES(x) ((x) << S_TZQINIT_CYCLES)
+#define G_TZQINIT_CYCLES(x) (((x) >> S_TZQINIT_CYCLES) & M_TZQINIT_CYCLES)
+
+#define S_TZQCS_CYCLES    8
+#define M_TZQCS_CYCLES    0xfU
+#define V_TZQCS_CYCLES(x) ((x) << S_TZQCS_CYCLES)
+#define G_TZQCS_CYCLES(x) (((x) >> S_TZQCS_CYCLES) & M_TZQCS_CYCLES)
+
+#define S_TWLDQSEN_CYCLES    4
+#define M_TWLDQSEN_CYCLES    0xfU
+#define V_TWLDQSEN_CYCLES(x) ((x) << S_TWLDQSEN_CYCLES)
+#define G_TWLDQSEN_CYCLES(x) (((x) >> S_TWLDQSEN_CYCLES) & M_TWLDQSEN_CYCLES)
+
+#define S_TWRMRD_CYCLES    0
+#define M_TWRMRD_CYCLES    0xfU
+#define V_TWRMRD_CYCLES(x) ((x) << S_TWRMRD_CYCLES)
+#define G_TWRMRD_CYCLES(x) (((x) >> S_TWRMRD_CYCLES) & M_TWRMRD_CYCLES)
+
+#define A_MC_DDRPHY_SEQ_MEM_TIMING_PARAM2 0x47250
+
+#define S_TODTLON_OFF_CYCLES    12
+#define M_TODTLON_OFF_CYCLES    0xfU
+#define V_TODTLON_OFF_CYCLES(x) ((x) << S_TODTLON_OFF_CYCLES)
+#define G_TODTLON_OFF_CYCLES(x) (((x) >> S_TODTLON_OFF_CYCLES) & M_TODTLON_OFF_CYCLES)
+
+#define S_TRC_CYCLES    8
+#define M_TRC_CYCLES    0xfU
+#define V_TRC_CYCLES(x) ((x) << S_TRC_CYCLES)
+#define G_TRC_CYCLES(x) (((x) >> S_TRC_CYCLES) & M_TRC_CYCLES)
+
+#define S_TMRSC_CYCLES    4
+#define M_TMRSC_CYCLES    0xfU
+#define V_TMRSC_CYCLES(x) ((x) << S_TMRSC_CYCLES)
+#define G_TMRSC_CYCLES(x) (((x) >> S_TMRSC_CYCLES) & M_TMRSC_CYCLES)
+
+#define S_MRS_CMD_SPACE    0
+#define M_MRS_CMD_SPACE    0xfU
+#define V_MRS_CMD_SPACE(x) ((x) << S_MRS_CMD_SPACE)
+#define G_MRS_CMD_SPACE(x) (((x) >> S_MRS_CMD_SPACE) & M_MRS_CMD_SPACE)
+
+#define A_MC_DDRPHY_RC_CONFIG0 0x47400
+
+#define S_GLOBAL_PHY_OFFSET    12
+#define M_GLOBAL_PHY_OFFSET    0xfU
+#define V_GLOBAL_PHY_OFFSET(x) ((x) << S_GLOBAL_PHY_OFFSET)
+#define G_GLOBAL_PHY_OFFSET(x) (((x) >> S_GLOBAL_PHY_OFFSET) & M_GLOBAL_PHY_OFFSET)
+
+#define S_ADVANCE_RD_VALID    11
+#define V_ADVANCE_RD_VALID(x) ((x) << S_ADVANCE_RD_VALID)
+#define F_ADVANCE_RD_VALID    V_ADVANCE_RD_VALID(1U)
+
+#define S_SINGLE_BIT_MPR_RP0    6
+#define V_SINGLE_BIT_MPR_RP0(x) ((x) << S_SINGLE_BIT_MPR_RP0)
+#define F_SINGLE_BIT_MPR_RP0    V_SINGLE_BIT_MPR_RP0(1U)
+
+#define S_SINGLE_BIT_MPR_RP1    5
+#define V_SINGLE_BIT_MPR_RP1(x) ((x) << S_SINGLE_BIT_MPR_RP1)
+#define F_SINGLE_BIT_MPR_RP1    V_SINGLE_BIT_MPR_RP1(1U)
+
+#define S_SINGLE_BIT_MPR_RP2    4
+#define V_SINGLE_BIT_MPR_RP2(x) ((x) << S_SINGLE_BIT_MPR_RP2)
+#define F_SINGLE_BIT_MPR_RP2    V_SINGLE_BIT_MPR_RP2(1U)
+
+#define S_SINGLE_BIT_MPR_RP3    3
+#define V_SINGLE_BIT_MPR_RP3(x) ((x) << S_SINGLE_BIT_MPR_RP3)
+#define F_SINGLE_BIT_MPR_RP3    V_SINGLE_BIT_MPR_RP3(1U)
+
+#define S_ALIGN_ON_EVEN_CYCLES    2
+#define V_ALIGN_ON_EVEN_CYCLES(x) ((x) << S_ALIGN_ON_EVEN_CYCLES)
+#define F_ALIGN_ON_EVEN_CYCLES    V_ALIGN_ON_EVEN_CYCLES(1U)
+
+#define S_PERFORM_RDCLK_ALIGN    1
+#define V_PERFORM_RDCLK_ALIGN(x) ((x) << S_PERFORM_RDCLK_ALIGN)
+#define F_PERFORM_RDCLK_ALIGN    V_PERFORM_RDCLK_ALIGN(1U)
+
+#define S_STAGGERED_PATTERN    0
+#define V_STAGGERED_PATTERN(x) ((x) << S_STAGGERED_PATTERN)
+#define F_STAGGERED_PATTERN    V_STAGGERED_PATTERN(1U)
+
+#define S_ERS_MODE    10
+#define V_ERS_MODE(x) ((x) << S_ERS_MODE)
+#define F_ERS_MODE    V_ERS_MODE(1U)
+
+#define A_MC_DDRPHY_RC_CONFIG1 0x47404
+
+#define S_OUTER_LOOP_CNT    2
+#define M_OUTER_LOOP_CNT    0x3fffU
+#define V_OUTER_LOOP_CNT(x) ((x) << S_OUTER_LOOP_CNT)
+#define G_OUTER_LOOP_CNT(x) (((x) >> S_OUTER_LOOP_CNT) & M_OUTER_LOOP_CNT)
+
+#define A_MC_DDRPHY_RC_CONFIG2 0x47408
+
+#define S_CONSEQ_PASS    11
+#define M_CONSEQ_PASS    0x1fU
+#define V_CONSEQ_PASS(x) ((x) << S_CONSEQ_PASS)
+#define G_CONSEQ_PASS(x) (((x) >> S_CONSEQ_PASS) & M_CONSEQ_PASS)
+
+#define S_BURST_WINDOW    5
+#define M_BURST_WINDOW    0x3U
+#define V_BURST_WINDOW(x) ((x) << S_BURST_WINDOW)
+#define G_BURST_WINDOW(x) (((x) >> S_BURST_WINDOW) & M_BURST_WINDOW)
+
+#define S_ALLOW_RD_FIFO_AUTO_R_ESET    4
+#define V_ALLOW_RD_FIFO_AUTO_R_ESET(x) ((x) << S_ALLOW_RD_FIFO_AUTO_R_ESET)
+#define F_ALLOW_RD_FIFO_AUTO_R_ESET    V_ALLOW_RD_FIFO_AUTO_R_ESET(1U)
+
+#define S_DIS_LOW_PWR_PER_CAL    3
+#define V_DIS_LOW_PWR_PER_CAL(x) ((x) << S_DIS_LOW_PWR_PER_CAL)
+#define F_DIS_LOW_PWR_PER_CAL    V_DIS_LOW_PWR_PER_CAL(1U)
+
+#define A_MC_DDRPHY_RC_ERROR_STATUS0 0x47414
+
+#define S_RD_CNTL_ERROR    15
+#define V_RD_CNTL_ERROR(x) ((x) << S_RD_CNTL_ERROR)
+#define F_RD_CNTL_ERROR    V_RD_CNTL_ERROR(1U)
+
+#define A_MC_DDRPHY_RC_ERROR_MASK0 0x47418
+
+#define S_RD_CNTL_ERROR_MASK    15
+#define V_RD_CNTL_ERROR_MASK(x) ((x) << S_RD_CNTL_ERROR_MASK)
+#define F_RD_CNTL_ERROR_MASK    V_RD_CNTL_ERROR_MASK(1U)
+
+#define A_MC_DDRPHY_RC_CONFIG3 0x4741c
+
+#define S_FINE_CAL_STEP_SIZE    13
+#define M_FINE_CAL_STEP_SIZE    0x7U
+#define V_FINE_CAL_STEP_SIZE(x) ((x) << S_FINE_CAL_STEP_SIZE)
+#define G_FINE_CAL_STEP_SIZE(x) (((x) >> S_FINE_CAL_STEP_SIZE) & M_FINE_CAL_STEP_SIZE)
+
+#define S_COARSE_CAL_STEP_SIZE    9
+#define M_COARSE_CAL_STEP_SIZE    0xfU
+#define V_COARSE_CAL_STEP_SIZE(x) ((x) << S_COARSE_CAL_STEP_SIZE)
+#define G_COARSE_CAL_STEP_SIZE(x) (((x) >> S_COARSE_CAL_STEP_SIZE) & M_COARSE_CAL_STEP_SIZE)
+
+#define S_DQ_SEL_QUAD    7
+#define M_DQ_SEL_QUAD    0x3U
+#define V_DQ_SEL_QUAD(x) ((x) << S_DQ_SEL_QUAD)
+#define G_DQ_SEL_QUAD(x) (((x) >> S_DQ_SEL_QUAD) & M_DQ_SEL_QUAD)
+
+#define S_DQ_SEL_LANE    4
+#define M_DQ_SEL_LANE    0x7U
+#define V_DQ_SEL_LANE(x) ((x) << S_DQ_SEL_LANE)
+#define G_DQ_SEL_LANE(x) (((x) >> S_DQ_SEL_LANE) & M_DQ_SEL_LANE)
+
+#define A_MC_DDRPHY_RC_PERIODIC 0x47420
+#define A_MC_DDRPHY_WC_CONFIG0 0x47600
+
+#define S_TWLO_TWLOE    8
+#define M_TWLO_TWLOE    0xffU
+#define V_TWLO_TWLOE(x) ((x) << S_TWLO_TWLOE)
+#define G_TWLO_TWLOE(x) (((x) >> S_TWLO_TWLOE) & M_TWLO_TWLOE)
+
+#define S_WL_ONE_DQS_PULSE    7
+#define V_WL_ONE_DQS_PULSE(x) ((x) << S_WL_ONE_DQS_PULSE)
+#define F_WL_ONE_DQS_PULSE    V_WL_ONE_DQS_PULSE(1U)
+
+#define S_FW_WR_RD    1
+#define M_FW_WR_RD    0x3fU
+#define V_FW_WR_RD(x) ((x) << S_FW_WR_RD)
+#define G_FW_WR_RD(x) (((x) >> S_FW_WR_RD) & M_FW_WR_RD)
+
+#define S_CUSTOM_INIT_WRITE    0
+#define V_CUSTOM_INIT_WRITE(x) ((x) << S_CUSTOM_INIT_WRITE)
+#define F_CUSTOM_INIT_WRITE    V_CUSTOM_INIT_WRITE(1U)
+
+#define A_MC_DDRPHY_WC_CONFIG1 0x47604
+
+#define S_BIG_STEP    12
+#define M_BIG_STEP    0xfU
+#define V_BIG_STEP(x) ((x) << S_BIG_STEP)
+#define G_BIG_STEP(x) (((x) >> S_BIG_STEP) & M_BIG_STEP)
+
+#define S_SMALL_STEP    9
+#define M_SMALL_STEP    0x7U
+#define V_SMALL_STEP(x) ((x) << S_SMALL_STEP)
+#define G_SMALL_STEP(x) (((x) >> S_SMALL_STEP) & M_SMALL_STEP)
+
+#define S_WR_PRE_DLY    3
+#define M_WR_PRE_DLY    0x3fU
+#define V_WR_PRE_DLY(x) ((x) << S_WR_PRE_DLY)
+#define G_WR_PRE_DLY(x) (((x) >> S_WR_PRE_DLY) & M_WR_PRE_DLY)
+
+#define A_MC_DDRPHY_WC_CONFIG2 0x47608
+
+#define S_NUM_VALID_SAMPLES    12
+#define M_NUM_VALID_SAMPLES    0xfU
+#define V_NUM_VALID_SAMPLES(x) ((x) << S_NUM_VALID_SAMPLES)
+#define G_NUM_VALID_SAMPLES(x) (((x) >> S_NUM_VALID_SAMPLES) & M_NUM_VALID_SAMPLES)
+
+#define S_FW_RD_WR    6
+#define M_FW_RD_WR    0x3fU
+#define V_FW_RD_WR(x) ((x) << S_FW_RD_WR)
+#define G_FW_RD_WR(x) (((x) >> S_FW_RD_WR) & M_FW_RD_WR)
+
+#define S_EN_RESET_WR_DELAY_WL    0
+#define V_EN_RESET_WR_DELAY_WL(x) ((x) << S_EN_RESET_WR_DELAY_WL)
+#define F_EN_RESET_WR_DELAY_WL    V_EN_RESET_WR_DELAY_WL(1U)
+
+#define S_TWR_MPR    2
+#define M_TWR_MPR    0xfU
+#define V_TWR_MPR(x) ((x) << S_TWR_MPR)
+#define G_TWR_MPR(x) (((x) >> S_TWR_MPR) & M_TWR_MPR)
+
+#define A_MC_DDRPHY_WC_ERROR_STATUS0 0x4760c
+
+#define S_WR_CNTL_ERROR    15
+#define V_WR_CNTL_ERROR(x) ((x) << S_WR_CNTL_ERROR)
+#define F_WR_CNTL_ERROR    V_WR_CNTL_ERROR(1U)
+
+#define A_MC_DDRPHY_WC_ERROR_MASK0 0x47610
+
+#define S_WR_CNTL_ERROR_MASK    15
+#define V_WR_CNTL_ERROR_MASK(x) ((x) << S_WR_CNTL_ERROR_MASK)
+#define F_WR_CNTL_ERROR_MASK    V_WR_CNTL_ERROR_MASK(1U)
+
+#define A_MC_DDRPHY_WC_CONFIG3 0x47614
+
+#define S_DDR4_MRS_CMD_DQ_EN    15
+#define V_DDR4_MRS_CMD_DQ_EN(x) ((x) << S_DDR4_MRS_CMD_DQ_EN)
+#define F_DDR4_MRS_CMD_DQ_EN    V_DDR4_MRS_CMD_DQ_EN(1U)
+
+#define S_MRS_CMD_DQ_ON    9
+#define M_MRS_CMD_DQ_ON    0x3fU
+#define V_MRS_CMD_DQ_ON(x) ((x) << S_MRS_CMD_DQ_ON)
+#define G_MRS_CMD_DQ_ON(x) (((x) >> S_MRS_CMD_DQ_ON) & M_MRS_CMD_DQ_ON)
+
+#define S_MRS_CMD_DQ_OFF    3
+#define M_MRS_CMD_DQ_OFF    0x3fU
+#define V_MRS_CMD_DQ_OFF(x) ((x) << S_MRS_CMD_DQ_OFF)
+#define G_MRS_CMD_DQ_OFF(x) (((x) >> S_MRS_CMD_DQ_OFF) & M_MRS_CMD_DQ_OFF)
+
+#define A_MC_DDRPHY_WC_WRCLK_CNTL 0x47618
+
+#define S_WRCLK_CAL_START    15
+#define V_WRCLK_CAL_START(x) ((x) << S_WRCLK_CAL_START)
+#define F_WRCLK_CAL_START    V_WRCLK_CAL_START(1U)
+
+#define S_WRCLK_CAL_DONE    14
+#define V_WRCLK_CAL_DONE(x) ((x) << S_WRCLK_CAL_DONE)
+#define F_WRCLK_CAL_DONE    V_WRCLK_CAL_DONE(1U)
+
+#define A_MC_DDRPHY_APB_CONFIG0 0x47800
+
+#define S_DISABLE_PARITY_CHECKER    15
+#define V_DISABLE_PARITY_CHECKER(x) ((x) << S_DISABLE_PARITY_CHECKER)
+#define F_DISABLE_PARITY_CHECKER    V_DISABLE_PARITY_CHECKER(1U)
+
+#define S_GENERATE_EVEN_PARITY    14
+#define V_GENERATE_EVEN_PARITY(x) ((x) << S_GENERATE_EVEN_PARITY)
+#define F_GENERATE_EVEN_PARITY    V_GENERATE_EVEN_PARITY(1U)
+
+#define S_FORCE_ON_CLK_GATE    13
+#define V_FORCE_ON_CLK_GATE(x) ((x) << S_FORCE_ON_CLK_GATE)
+#define F_FORCE_ON_CLK_GATE    V_FORCE_ON_CLK_GATE(1U)
+
+#define S_DEBUG_BUS_SEL_LO    12
+#define V_DEBUG_BUS_SEL_LO(x) ((x) << S_DEBUG_BUS_SEL_LO)
+#define F_DEBUG_BUS_SEL_LO    V_DEBUG_BUS_SEL_LO(1U)
+
+#define S_DEBUG_BUS_SEL_HI    8
+#define M_DEBUG_BUS_SEL_HI    0xfU
+#define V_DEBUG_BUS_SEL_HI(x) ((x) << S_DEBUG_BUS_SEL_HI)
+#define G_DEBUG_BUS_SEL_HI(x) (((x) >> S_DEBUG_BUS_SEL_HI) & M_DEBUG_BUS_SEL_HI)
+
+#define A_MC_DDRPHY_APB_ERROR_STATUS0 0x47804
+
+#define S_INVALID_ADDRESS    15
+#define V_INVALID_ADDRESS(x) ((x) << S_INVALID_ADDRESS)
+#define F_INVALID_ADDRESS    V_INVALID_ADDRESS(1U)
+
+#define S_WR_PAR_ERR    14
+#define V_WR_PAR_ERR(x) ((x) << S_WR_PAR_ERR)
+#define F_WR_PAR_ERR    V_WR_PAR_ERR(1U)
+
+#define A_MC_DDRPHY_APB_ERROR_MASK0 0x47808
+
+#define S_INVALID_ADDRESS_MASK    15
+#define V_INVALID_ADDRESS_MASK(x) ((x) << S_INVALID_ADDRESS_MASK)
+#define F_INVALID_ADDRESS_MASK    V_INVALID_ADDRESS_MASK(1U)
+
+#define S_WR_PAR_ERR_MASK    14
+#define V_WR_PAR_ERR_MASK(x) ((x) << S_WR_PAR_ERR_MASK)
+#define F_WR_PAR_ERR_MASK    V_WR_PAR_ERR_MASK(1U)
+
+#define A_MC_DDRPHY_APB_DP18_POPULATION 0x4780c
+
+#define S_DP18_0_POPULATED    15
+#define V_DP18_0_POPULATED(x) ((x) << S_DP18_0_POPULATED)
+#define F_DP18_0_POPULATED    V_DP18_0_POPULATED(1U)
+
+#define S_DP18_1_POPULATED    14
+#define V_DP18_1_POPULATED(x) ((x) << S_DP18_1_POPULATED)
+#define F_DP18_1_POPULATED    V_DP18_1_POPULATED(1U)
+
+#define S_DP18_2_POPULATED    13
+#define V_DP18_2_POPULATED(x) ((x) << S_DP18_2_POPULATED)
+#define F_DP18_2_POPULATED    V_DP18_2_POPULATED(1U)
+
+#define S_DP18_3_POPULATED    12
+#define V_DP18_3_POPULATED(x) ((x) << S_DP18_3_POPULATED)
+#define F_DP18_3_POPULATED    V_DP18_3_POPULATED(1U)
+
+#define S_DP18_4_POPULATED    11
+#define V_DP18_4_POPULATED(x) ((x) << S_DP18_4_POPULATED)
+#define F_DP18_4_POPULATED    V_DP18_4_POPULATED(1U)
+
+#define S_DP18_5_POPULATED    10
+#define V_DP18_5_POPULATED(x) ((x) << S_DP18_5_POPULATED)
+#define F_DP18_5_POPULATED    V_DP18_5_POPULATED(1U)
+
+#define S_DP18_6_POPULATED    9
+#define V_DP18_6_POPULATED(x) ((x) << S_DP18_6_POPULATED)
+#define F_DP18_6_POPULATED    V_DP18_6_POPULATED(1U)
+
+#define S_DP18_7_POPULATED    8
+#define V_DP18_7_POPULATED(x) ((x) << S_DP18_7_POPULATED)
+#define F_DP18_7_POPULATED    V_DP18_7_POPULATED(1U)
+
+#define S_DP18_8_POPULATED    7
+#define V_DP18_8_POPULATED(x) ((x) << S_DP18_8_POPULATED)
+#define F_DP18_8_POPULATED    V_DP18_8_POPULATED(1U)
+
+#define S_DP18_9_POPULATED    6
+#define V_DP18_9_POPULATED(x) ((x) << S_DP18_9_POPULATED)
+#define F_DP18_9_POPULATED    V_DP18_9_POPULATED(1U)
+
+#define S_DP18_10_POPULATED    5
+#define V_DP18_10_POPULATED(x) ((x) << S_DP18_10_POPULATED)
+#define F_DP18_10_POPULATED    V_DP18_10_POPULATED(1U)
+
+#define S_DP18_11_POPULATED    4
+#define V_DP18_11_POPULATED(x) ((x) << S_DP18_11_POPULATED)
+#define F_DP18_11_POPULATED    V_DP18_11_POPULATED(1U)
+
+#define S_DP18_12_POPULATED    3
+#define V_DP18_12_POPULATED(x) ((x) << S_DP18_12_POPULATED)
+#define F_DP18_12_POPULATED    V_DP18_12_POPULATED(1U)
+
+#define S_DP18_13_POPULATED    2
+#define V_DP18_13_POPULATED(x) ((x) << S_DP18_13_POPULATED)
+#define F_DP18_13_POPULATED    V_DP18_13_POPULATED(1U)
+
+#define S_DP18_14_POPULATED    1
+#define V_DP18_14_POPULATED(x) ((x) << S_DP18_14_POPULATED)
+#define F_DP18_14_POPULATED    V_DP18_14_POPULATED(1U)
+
+#define A_MC_DDRPHY_APB_ADR_POPULATION 0x47810
+
+#define S_ADR16_0_POPULATED    15
+#define V_ADR16_0_POPULATED(x) ((x) << S_ADR16_0_POPULATED)
+#define F_ADR16_0_POPULATED    V_ADR16_0_POPULATED(1U)
+
+#define S_ADR16_1_POPULATED    14
+#define V_ADR16_1_POPULATED(x) ((x) << S_ADR16_1_POPULATED)
+#define F_ADR16_1_POPULATED    V_ADR16_1_POPULATED(1U)
+
+#define S_ADR16_2_POPULATED    13
+#define V_ADR16_2_POPULATED(x) ((x) << S_ADR16_2_POPULATED)
+#define F_ADR16_2_POPULATED    V_ADR16_2_POPULATED(1U)
+
+#define S_ADR16_3_POPULATED    12
+#define V_ADR16_3_POPULATED(x) ((x) << S_ADR16_3_POPULATED)
+#define F_ADR16_3_POPULATED    V_ADR16_3_POPULATED(1U)
+
+#define S_ADR12_0_POPULATED    7
+#define V_ADR12_0_POPULATED(x) ((x) << S_ADR12_0_POPULATED)
+#define F_ADR12_0_POPULATED    V_ADR12_0_POPULATED(1U)
+
+#define S_ADR12_1_POPULATED    6
+#define V_ADR12_1_POPULATED(x) ((x) << S_ADR12_1_POPULATED)
+#define F_ADR12_1_POPULATED    V_ADR12_1_POPULATED(1U)
+
+#define S_ADR12_2_POPULATED    5
+#define V_ADR12_2_POPULATED(x) ((x) << S_ADR12_2_POPULATED)
+#define F_ADR12_2_POPULATED    V_ADR12_2_POPULATED(1U)
+
+#define S_ADR12_3_POPULATED    4
+#define V_ADR12_3_POPULATED(x) ((x) << S_ADR12_3_POPULATED)
+#define F_ADR12_3_POPULATED    V_ADR12_3_POPULATED(1U)
+
+#define A_MC_DDRPHY_APB_ATEST_MUX_SEL 0x47814
+
+#define S_ATEST_CNTL    10
+#define M_ATEST_CNTL    0x3fU
+#define V_ATEST_CNTL(x) ((x) << S_ATEST_CNTL)
+#define G_ATEST_CNTL(x) (((x) >> S_ATEST_CNTL) & M_ATEST_CNTL)
+
+#define A_MC_DDRPHY_APB_MTCTL_REG0 0x47820
+
+#define S_MT_DATA_MUX4_1MODE    15
+#define V_MT_DATA_MUX4_1MODE(x) ((x) << S_MT_DATA_MUX4_1MODE)
+#define F_MT_DATA_MUX4_1MODE    V_MT_DATA_MUX4_1MODE(1U)
+
+#define S_MT_PLL_RESET    14
+#define V_MT_PLL_RESET(x) ((x) << S_MT_PLL_RESET)
+#define F_MT_PLL_RESET    V_MT_PLL_RESET(1U)
+
+#define S_MT_SYSCLK_RESET    13
+#define V_MT_SYSCLK_RESET(x) ((x) << S_MT_SYSCLK_RESET)
+#define F_MT_SYSCLK_RESET    V_MT_SYSCLK_RESET(1U)
+
+#define S_MT_GLOBAL_PHY_OFFSET    9
+#define M_MT_GLOBAL_PHY_OFFSET    0xfU
+#define V_MT_GLOBAL_PHY_OFFSET(x) ((x) << S_MT_GLOBAL_PHY_OFFSET)
+#define G_MT_GLOBAL_PHY_OFFSET(x) (((x) >> S_MT_GLOBAL_PHY_OFFSET) & M_MT_GLOBAL_PHY_OFFSET)
+
+#define S_MT_DQ_SEL_QUAD    7
+#define M_MT_DQ_SEL_QUAD    0x3U
+#define V_MT_DQ_SEL_QUAD(x) ((x) << S_MT_DQ_SEL_QUAD)
+#define G_MT_DQ_SEL_QUAD(x) (((x) >> S_MT_DQ_SEL_QUAD) & M_MT_DQ_SEL_QUAD)
+
+#define S_MT_PERFORM_RDCLK_ALIGN    6
+#define V_MT_PERFORM_RDCLK_ALIGN(x) ((x) << S_MT_PERFORM_RDCLK_ALIGN)
+#define F_MT_PERFORM_RDCLK_ALIGN    V_MT_PERFORM_RDCLK_ALIGN(1U)
+
+#define S_MT_ALIGN_ON_EVEN_CYCLES    5
+#define V_MT_ALIGN_ON_EVEN_CYCLES(x) ((x) << S_MT_ALIGN_ON_EVEN_CYCLES)
+#define F_MT_ALIGN_ON_EVEN_CYCLES    V_MT_ALIGN_ON_EVEN_CYCLES(1U)
+
+#define S_MT_WRCLK_CAL_START    4
+#define V_MT_WRCLK_CAL_START(x) ((x) << S_MT_WRCLK_CAL_START)
+#define F_MT_WRCLK_CAL_START    V_MT_WRCLK_CAL_START(1U)
+
+#define A_MC_DDRPHY_APB_MTCTL_REG1 0x47824
+
+#define S_MT_WPRD_ENABLE    15
+#define V_MT_WPRD_ENABLE(x) ((x) << S_MT_WPRD_ENABLE)
+#define F_MT_WPRD_ENABLE    V_MT_WPRD_ENABLE(1U)
+
+#define S_MT_PVTP    10
+#define M_MT_PVTP    0x1fU
+#define V_MT_PVTP(x) ((x) << S_MT_PVTP)
+#define G_MT_PVTP(x) (((x) >> S_MT_PVTP) & M_MT_PVTP)
+
+#define S_MT_PVTN    5
+#define M_MT_PVTN    0x1fU
+#define V_MT_PVTN(x) ((x) << S_MT_PVTN)
+#define G_MT_PVTN(x) (((x) >> S_MT_PVTN) & M_MT_PVTN)
+
+#define A_MC_DDRPHY_APB_MTSTAT_REG0 0x47828
+#define A_MC_DDRPHY_APB_MTSTAT_REG1 0x4782c
+
+#define S_MT_ADR32_PLL_LOCK_SUM    1
+#define V_MT_ADR32_PLL_LOCK_SUM(x) ((x) << S_MT_ADR32_PLL_LOCK_SUM)
+#define F_MT_ADR32_PLL_LOCK_SUM    V_MT_ADR32_PLL_LOCK_SUM(1U)
+
+#define S_MT_DP18_PLL_LOCK_SUM    0
+#define V_MT_DP18_PLL_LOCK_SUM(x) ((x) << S_MT_DP18_PLL_LOCK_SUM)
+#define F_MT_DP18_PLL_LOCK_SUM    V_MT_DP18_PLL_LOCK_SUM(1U)
+
+/* registers for module MC_1 */
+#define MC_1_BASE_ADDR 0x48000
+
+/* registers for module EDC_T50 */
+#define EDC_T50_BASE_ADDR 0x50000
+
+#define A_EDC_H_REF 0x50000
+
+#define S_EDC_SLEEPSTATUS    31
+#define V_EDC_SLEEPSTATUS(x) ((x) << S_EDC_SLEEPSTATUS)
+#define F_EDC_SLEEPSTATUS    V_EDC_SLEEPSTATUS(1U)
+
+#define S_EDC_SLEEPREQ    30
+#define V_EDC_SLEEPREQ(x) ((x) << S_EDC_SLEEPREQ)
+#define F_EDC_SLEEPREQ    V_EDC_SLEEPREQ(1U)
+
+#define S_PING_PONG    29
+#define V_PING_PONG(x) ((x) << S_PING_PONG)
+#define F_PING_PONG    V_PING_PONG(1U)
+
+#define A_EDC_H_BIST_CMD 0x50004
+#define A_EDC_H_BIST_CMD_ADDR 0x50008
+#define A_EDC_H_BIST_CMD_LEN 0x5000c
+#define A_EDC_H_BIST_DATA_PATTERN 0x50010
+#define A_EDC_H_BIST_USER_WDATA0 0x50014
+#define A_EDC_H_BIST_USER_WDATA1 0x50018
+#define A_EDC_H_BIST_USER_WDATA2 0x5001c
+#define A_EDC_H_BIST_NUM_ERR 0x50020
+#define A_EDC_H_BIST_ERR_FIRST_ADDR 0x50024
+#define A_EDC_H_BIST_STATUS_RDATA 0x50028
+#define A_EDC_H_PAR_ENABLE 0x50070
+
+#define S_PERR_PAR_ENABLE    0
+#define V_PERR_PAR_ENABLE(x) ((x) << S_PERR_PAR_ENABLE)
+#define F_PERR_PAR_ENABLE    V_PERR_PAR_ENABLE(1U)
+
+#define A_EDC_H_INT_ENABLE 0x50074
+#define A_EDC_H_INT_CAUSE 0x50078
+
+#define S_ECC_UE_INT0_CAUSE    5
+#define V_ECC_UE_INT0_CAUSE(x) ((x) << S_ECC_UE_INT0_CAUSE)
+#define F_ECC_UE_INT0_CAUSE    V_ECC_UE_INT0_CAUSE(1U)
+
+#define S_ECC_CE_INT0_CAUSE    4
+#define V_ECC_CE_INT0_CAUSE(x) ((x) << S_ECC_CE_INT0_CAUSE)
+#define F_ECC_CE_INT0_CAUSE    V_ECC_CE_INT0_CAUSE(1U)
+
+#define S_PERR_INT0_CAUSE    3
+#define V_PERR_INT0_CAUSE(x) ((x) << S_PERR_INT0_CAUSE)
+#define F_PERR_INT0_CAUSE    V_PERR_INT0_CAUSE(1U)
+
+#define A_EDC_H_ECC_STATUS 0x5007c
+#define A_EDC_H_ECC_ERR_SEL 0x50080
+
+#define S_CFG    0
+#define M_CFG    0x3U
+#define V_CFG(x) ((x) << S_CFG)
+#define G_CFG(x) (((x) >> S_CFG) & M_CFG)
+
+#define A_EDC_H_ECC_ERR_ADDR 0x50084
+
+#define S_ECC_ADDR    0
+#define M_ECC_ADDR    0x7fffffU
+#define V_ECC_ADDR(x) ((x) << S_ECC_ADDR)
+#define G_ECC_ADDR(x) (((x) >> S_ECC_ADDR) & M_ECC_ADDR)
+
+#define A_EDC_H_ECC_ERR_DATA_RDATA 0x50090
+#define A_EDC_H_BIST_CRC_SEED 0x50400
+
+/* registers for module EDC_T51 */
+#define EDC_T51_BASE_ADDR 0x50800
+
+/* registers for module HMA_T5 */
+#define HMA_T5_BASE_ADDR 0x51000
+
+#define A_HMA_TABLE_ACCESS 0x51000
+
+#define S_TRIG    31
+#define V_TRIG(x) ((x) << S_TRIG)
+#define F_TRIG    V_TRIG(1U)
+
+#define S_RW    30
+#define V_RW(x) ((x) << S_RW)
+#define F_RW    V_RW(1U)
+
+#define S_L_SEL    0
+#define M_L_SEL    0xfU
+#define V_L_SEL(x) ((x) << S_L_SEL)
+#define G_L_SEL(x) (((x) >> S_L_SEL) & M_L_SEL)
+
+#define A_HMA_TABLE_LINE0 0x51004
+
+#define S_CLIENT_EN    0
+#define M_CLIENT_EN    0x1fffU
+#define V_CLIENT_EN(x) ((x) << S_CLIENT_EN)
+#define G_CLIENT_EN(x) (((x) >> S_CLIENT_EN) & M_CLIENT_EN)
+
+#define A_HMA_TABLE_LINE1 0x51008
+#define A_HMA_TABLE_LINE2 0x5100c
+#define A_HMA_TABLE_LINE3 0x51010
+#define A_HMA_TABLE_LINE4 0x51014
+#define A_HMA_TABLE_LINE5 0x51018
+
+#define S_FID    16
+#define M_FID    0x7ffU
+#define V_FID(x) ((x) << S_FID)
+#define G_FID(x) (((x) >> S_FID) & M_FID)
+
+#define S_NOS    15
+#define V_NOS(x) ((x) << S_NOS)
+#define F_NOS    V_NOS(1U)
+
+#define S_RO    14
+#define V_RO(x) ((x) << S_RO)
+#define F_RO    V_RO(1U)
+
+#define A_HMA_COOKIE 0x5101c
+
+#define S_C_REQ    31
+#define V_C_REQ(x) ((x) << S_C_REQ)
+#define F_C_REQ    V_C_REQ(1U)
+
+#define S_C_FID    18
+#define M_C_FID    0x7ffU
+#define V_C_FID(x) ((x) << S_C_FID)
+#define G_C_FID(x) (((x) >> S_C_FID) & M_C_FID)
+
+#define S_C_VAL    8
+#define M_C_VAL    0x3ffU
+#define V_C_VAL(x) ((x) << S_C_VAL)
+#define G_C_VAL(x) (((x) >> S_C_VAL) & M_C_VAL)
+
+#define S_C_SEL    0
+#define M_C_SEL    0xfU
+#define V_C_SEL(x) ((x) << S_C_SEL)
+#define G_C_SEL(x) (((x) >> S_C_SEL) & M_C_SEL)
+
+#define A_HMA_PAR_ENABLE 0x51300
+#define A_HMA_INT_ENABLE 0x51304
+#define A_HMA_INT_CAUSE 0x51308
+
+/* registers for module EDC_T60 */
+#define EDC_T60_BASE_ADDR 0x50000
+
+#define S_QDR_CLKPHASE    24
+#define M_QDR_CLKPHASE    0x7U
+#define V_QDR_CLKPHASE(x) ((x) << S_QDR_CLKPHASE)
+#define G_QDR_CLKPHASE(x) (((x) >> S_QDR_CLKPHASE) & M_QDR_CLKPHASE)
+
+#define S_MAXOPSPERTRC    21
+#define M_MAXOPSPERTRC    0x7U
+#define V_MAXOPSPERTRC(x) ((x) << S_MAXOPSPERTRC)
+#define G_MAXOPSPERTRC(x) (((x) >> S_MAXOPSPERTRC) & M_MAXOPSPERTRC)
+
+#define S_NUMPIPESTAGES    19
+#define M_NUMPIPESTAGES    0x3U
+#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES)
+#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES)
+
+#define A_EDC_H_DBG_MA_CMD_INTF 0x50300
+
+#define S_MCMDADDR    12
+#define M_MCMDADDR    0xfffffU
+#define V_MCMDADDR(x) ((x) << S_MCMDADDR)
+#define G_MCMDADDR(x) (((x) >> S_MCMDADDR) & M_MCMDADDR)
+
+#define S_MCMDLEN    5
+#define M_MCMDLEN    0x7fU
+#define V_MCMDLEN(x) ((x) << S_MCMDLEN)
+#define G_MCMDLEN(x) (((x) >> S_MCMDLEN) & M_MCMDLEN)
+
+#define S_MCMDNRE    4
+#define V_MCMDNRE(x) ((x) << S_MCMDNRE)
+#define F_MCMDNRE    V_MCMDNRE(1U)
+
+#define S_MCMDNRB    3
+#define V_MCMDNRB(x) ((x) << S_MCMDNRB)
+#define F_MCMDNRB    V_MCMDNRB(1U)
+
+#define S_MCMDWR    2
+#define V_MCMDWR(x) ((x) << S_MCMDWR)
+#define F_MCMDWR    V_MCMDWR(1U)
+
+#define S_MCMDRDY    1
+#define V_MCMDRDY(x) ((x) << S_MCMDRDY)
+#define F_MCMDRDY    V_MCMDRDY(1U)
+
+#define S_MCMDVLD    0
+#define V_MCMDVLD(x) ((x) << S_MCMDVLD)
+#define F_MCMDVLD    V_MCMDVLD(1U)
+
+#define A_EDC_H_DBG_MA_WDATA_INTF 0x50304
+
+#define S_MWDATAVLD    31
+#define V_MWDATAVLD(x) ((x) << S_MWDATAVLD)
+#define F_MWDATAVLD    V_MWDATAVLD(1U)
+
+#define S_MWDATARDY    30
+#define V_MWDATARDY(x) ((x) << S_MWDATARDY)
+#define F_MWDATARDY    V_MWDATARDY(1U)
+
+#define S_MWDATA    0
+#define M_MWDATA    0x3fffffffU
+#define V_MWDATA(x) ((x) << S_MWDATA)
+#define G_MWDATA(x) (((x) >> S_MWDATA) & M_MWDATA)
+
+#define A_EDC_H_DBG_MA_RDATA_INTF 0x50308
+
+#define S_MRSPVLD    31
+#define V_MRSPVLD(x) ((x) << S_MRSPVLD)
+#define F_MRSPVLD    V_MRSPVLD(1U)
+
+#define S_MRSPRDY    30
+#define V_MRSPRDY(x) ((x) << S_MRSPRDY)
+#define F_MRSPRDY    V_MRSPRDY(1U)
+
+#define S_MRSPDATA    0
+#define M_MRSPDATA    0x3fffffffU
+#define V_MRSPDATA(x) ((x) << S_MRSPDATA)
+#define G_MRSPDATA(x) (((x) >> S_MRSPDATA) & M_MRSPDATA)
+
+#define A_EDC_H_DBG_BIST_CMD_INTF 0x5030c
+
+#define S_BCMDADDR    9
+#define M_BCMDADDR    0x7fffffU
+#define V_BCMDADDR(x) ((x) << S_BCMDADDR)
+#define G_BCMDADDR(x) (((x) >> S_BCMDADDR) & M_BCMDADDR)
+
+#define S_BCMDLEN    3
+#define M_BCMDLEN    0x3fU
+#define V_BCMDLEN(x) ((x) << S_BCMDLEN)
+#define G_BCMDLEN(x) (((x) >> S_BCMDLEN) & M_BCMDLEN)
+
+#define S_BCMDWR    2
+#define V_BCMDWR(x) ((x) << S_BCMDWR)
+#define F_BCMDWR    V_BCMDWR(1U)
+
+#define S_BCMDRDY    1
+#define V_BCMDRDY(x) ((x) << S_BCMDRDY)
+#define F_BCMDRDY    V_BCMDRDY(1U)
+
+#define S_BCMDVLD    0
+#define V_BCMDVLD(x) ((x) << S_BCMDVLD)
+#define F_BCMDVLD    V_BCMDVLD(1U)
+
+#define A_EDC_H_DBG_BIST_WDATA_INTF 0x50310
+
+#define S_BWDATAVLD    31
+#define V_BWDATAVLD(x) ((x) << S_BWDATAVLD)
+#define F_BWDATAVLD    V_BWDATAVLD(1U)
+
+#define S_BWDATARDY    30
+#define V_BWDATARDY(x) ((x) << S_BWDATARDY)
+#define F_BWDATARDY    V_BWDATARDY(1U)
+
+#define S_BWDATA    0
+#define M_BWDATA    0x3fffffffU
+#define V_BWDATA(x) ((x) << S_BWDATA)
+#define G_BWDATA(x) (((x) >> S_BWDATA) & M_BWDATA)
+
+#define A_EDC_H_DBG_BIST_RDATA_INTF 0x50314
+
+#define S_BRSPVLD    31
+#define V_BRSPVLD(x) ((x) << S_BRSPVLD)
+#define F_BRSPVLD    V_BRSPVLD(1U)
+
+#define S_BRSPRDY    30
+#define V_BRSPRDY(x) ((x) << S_BRSPRDY)
+#define F_BRSPRDY    V_BRSPRDY(1U)
+
+#define S_BRSPDATA    0
+#define M_BRSPDATA    0x3fffffffU
+#define V_BRSPDATA(x) ((x) << S_BRSPDATA)
+#define G_BRSPDATA(x) (((x) >> S_BRSPDATA) & M_BRSPDATA)
+
+#define A_EDC_H_DBG_EDRAM_CMD_INTF 0x50318
+
+#define S_EDRAMADDR    16
+#define M_EDRAMADDR    0xffffU
+#define V_EDRAMADDR(x) ((x) << S_EDRAMADDR)
+#define G_EDRAMADDR(x) (((x) >> S_EDRAMADDR) & M_EDRAMADDR)
+
+#define S_EDRAMDWSN    8
+#define M_EDRAMDWSN    0xffU
+#define V_EDRAMDWSN(x) ((x) << S_EDRAMDWSN)
+#define G_EDRAMDWSN(x) (((x) >> S_EDRAMDWSN) & M_EDRAMDWSN)
+
+#define S_EDRAMCRA    5
+#define M_EDRAMCRA    0x7U
+#define V_EDRAMCRA(x) ((x) << S_EDRAMCRA)
+#define G_EDRAMCRA(x) (((x) >> S_EDRAMCRA) & M_EDRAMCRA)
+
+#define S_EDRAMREFENLO    4
+#define V_EDRAMREFENLO(x) ((x) << S_EDRAMREFENLO)
+#define F_EDRAMREFENLO    V_EDRAMREFENLO(1U)
+
+#define S_EDRAM1WRENLO    3
+#define V_EDRAM1WRENLO(x) ((x) << S_EDRAM1WRENLO)
+#define F_EDRAM1WRENLO    V_EDRAM1WRENLO(1U)
+
+#define S_EDRAM1RDENLO    2
+#define V_EDRAM1RDENLO(x) ((x) << S_EDRAM1RDENLO)
+#define F_EDRAM1RDENLO    V_EDRAM1RDENLO(1U)
+
+#define S_EDRAM0WRENLO    1
+#define V_EDRAM0WRENLO(x) ((x) << S_EDRAM0WRENLO)
+#define F_EDRAM0WRENLO    V_EDRAM0WRENLO(1U)
+
+#define S_EDRAM0RDENLO    0
+#define V_EDRAM0RDENLO(x) ((x) << S_EDRAM0RDENLO)
+#define F_EDRAM0RDENLO    V_EDRAM0RDENLO(1U)
+
+#define A_EDC_H_DBG_EDRAM_WDATA_INTF 0x5031c
+
+#define S_EDRAMWDATA    9
+#define M_EDRAMWDATA    0x7fffffU
+#define V_EDRAMWDATA(x) ((x) << S_EDRAMWDATA)
+#define G_EDRAMWDATA(x) (((x) >> S_EDRAMWDATA) & M_EDRAMWDATA)
+
+#define S_EDRAMWBYTEEN    0
+#define M_EDRAMWBYTEEN    0x1ffU
+#define V_EDRAMWBYTEEN(x) ((x) << S_EDRAMWBYTEEN)
+#define G_EDRAMWBYTEEN(x) (((x) >> S_EDRAMWBYTEEN) & M_EDRAMWBYTEEN)
+
+#define A_EDC_H_DBG_EDRAM0_RDATA_INTF 0x50320
+#define A_EDC_H_DBG_EDRAM1_RDATA_INTF 0x50324
+#define A_EDC_H_DBG_MA_WR_REQ_CNT 0x50328
+#define A_EDC_H_DBG_MA_WR_EXP_DAT_CYC_CNT 0x5032c
+#define A_EDC_H_DBG_MA_WR_DAT_CYC_CNT 0x50330
+#define A_EDC_H_DBG_MA_RD_REQ_CNT 0x50334
+#define A_EDC_H_DBG_MA_RD_EXP_DAT_CYC_CNT 0x50338
+#define A_EDC_H_DBG_MA_RD_DAT_CYC_CNT 0x5033c
+#define A_EDC_H_DBG_BIST_WR_REQ_CNT 0x50340
+#define A_EDC_H_DBG_BIST_WR_EXP_DAT_CYC_CNT 0x50344
+#define A_EDC_H_DBG_BIST_WR_DAT_CYC_CNT 0x50348
+#define A_EDC_H_DBG_BIST_RD_REQ_CNT 0x5034c
+#define A_EDC_H_DBG_BIST_RD_EXP_DAT_CYC_CNT 0x50350
+#define A_EDC_H_DBG_BIST_RD_DAT_CYC_CNT 0x50354
+#define A_EDC_H_DBG_EDRAM0_WR_REQ_CNT 0x50358
+#define A_EDC_H_DBG_EDRAM0_RD_REQ_CNT 0x5035c
+#define A_EDC_H_DBG_EDRAM0_RMW_CNT 0x50360
+#define A_EDC_H_DBG_EDRAM1_WR_REQ_CNT 0x50364
+#define A_EDC_H_DBG_EDRAM1_RD_REQ_CNT 0x50368
+#define A_EDC_H_DBG_EDRAM1_RMW_CNT 0x5036c
+#define A_EDC_H_DBG_EDRAM_REF_BURST_CNT 0x50370
+#define A_EDC_H_DBG_FIFO_STATUS 0x50374
+
+#define S_RDTAG_NOTFULL    17
+#define V_RDTAG_NOTFULL(x) ((x) << S_RDTAG_NOTFULL)
+#define F_RDTAG_NOTFULL    V_RDTAG_NOTFULL(1U)
+
+#define S_RDTAG_NOTEMPTY    16
+#define V_RDTAG_NOTEMPTY(x) ((x) << S_RDTAG_NOTEMPTY)
+#define F_RDTAG_NOTEMPTY    V_RDTAG_NOTEMPTY(1U)
+
+#define S_INP_CMDQ_NOTFULL_ARB    15
+#define V_INP_CMDQ_NOTFULL_ARB(x) ((x) << S_INP_CMDQ_NOTFULL_ARB)
+#define F_INP_CMDQ_NOTFULL_ARB    V_INP_CMDQ_NOTFULL_ARB(1U)
+
+#define S_INP_CMDQ_NOTEMPTY    14
+#define V_INP_CMDQ_NOTEMPTY(x) ((x) << S_INP_CMDQ_NOTEMPTY)
+#define F_INP_CMDQ_NOTEMPTY    V_INP_CMDQ_NOTEMPTY(1U)
+
+#define S_INP_WRDQ_WRRDY    13
+#define V_INP_WRDQ_WRRDY(x) ((x) << S_INP_WRDQ_WRRDY)
+#define F_INP_WRDQ_WRRDY    V_INP_WRDQ_WRRDY(1U)
+
+#define S_INP_WRDQ_NOTEMPTY    12
+#define V_INP_WRDQ_NOTEMPTY(x) ((x) << S_INP_WRDQ_NOTEMPTY)
+#define F_INP_WRDQ_NOTEMPTY    V_INP_WRDQ_NOTEMPTY(1U)
+
+#define S_INP_BEQ_WRRDY_OPEN    11
+#define V_INP_BEQ_WRRDY_OPEN(x) ((x) << S_INP_BEQ_WRRDY_OPEN)
+#define F_INP_BEQ_WRRDY_OPEN    V_INP_BEQ_WRRDY_OPEN(1U)
+
+#define S_INP_BEQ_NOTEMPTY    10
+#define V_INP_BEQ_NOTEMPTY(x) ((x) << S_INP_BEQ_NOTEMPTY)
+#define F_INP_BEQ_NOTEMPTY    V_INP_BEQ_NOTEMPTY(1U)
+
+#define S_RDDQ_NOTFULL_OPEN    9
+#define V_RDDQ_NOTFULL_OPEN(x) ((x) << S_RDDQ_NOTFULL_OPEN)
+#define F_RDDQ_NOTFULL_OPEN    V_RDDQ_NOTFULL_OPEN(1U)
+
+#define S_RDDQ_RDCNT    4
+#define M_RDDQ_RDCNT    0x1fU
+#define V_RDDQ_RDCNT(x) ((x) << S_RDDQ_RDCNT)
+#define G_RDDQ_RDCNT(x) (((x) >> S_RDDQ_RDCNT) & M_RDDQ_RDCNT)
+
+#define S_RDSIDEQ_NOTFULL    3
+#define V_RDSIDEQ_NOTFULL(x) ((x) << S_RDSIDEQ_NOTFULL)
+#define F_RDSIDEQ_NOTFULL    V_RDSIDEQ_NOTFULL(1U)
+
+#define S_RDSIDEQ_NOTEMPTY    2
+#define V_RDSIDEQ_NOTEMPTY(x) ((x) << S_RDSIDEQ_NOTEMPTY)
+#define F_RDSIDEQ_NOTEMPTY    V_RDSIDEQ_NOTEMPTY(1U)
+
+#define S_STG_CMDQ_NOTEMPTY    1
+#define V_STG_CMDQ_NOTEMPTY(x) ((x) << S_STG_CMDQ_NOTEMPTY)
+#define F_STG_CMDQ_NOTEMPTY    V_STG_CMDQ_NOTEMPTY(1U)
+
+#define S_STG_WRDQ_NOTEMPTY    0
+#define V_STG_WRDQ_NOTEMPTY(x) ((x) << S_STG_WRDQ_NOTEMPTY)
+#define F_STG_WRDQ_NOTEMPTY    V_STG_WRDQ_NOTEMPTY(1U)
+
+#define A_EDC_H_DBG_FSM_STATE 0x50378
+
+#define S_CMDSPLITFSM    3
+#define V_CMDSPLITFSM(x) ((x) << S_CMDSPLITFSM)
+#define F_CMDSPLITFSM    V_CMDSPLITFSM(1U)
+
+#define S_CMDFSM    0
+#define M_CMDFSM    0x7U
+#define V_CMDFSM(x) ((x) << S_CMDFSM)
+#define G_CMDFSM(x) (((x) >> S_CMDFSM) & M_CMDFSM)
+
+#define A_EDC_H_DBG_STALL_CYCLES 0x5037c
+
+#define S_STALL_RMW    19
+#define V_STALL_RMW(x) ((x) << S_STALL_RMW)
+#define F_STALL_RMW    V_STALL_RMW(1U)
+
+#define S_STALL_EDC_CMD    18
+#define V_STALL_EDC_CMD(x) ((x) << S_STALL_EDC_CMD)
+#define F_STALL_EDC_CMD    V_STALL_EDC_CMD(1U)
+
+#define S_DEAD_CYCLE0    17
+#define V_DEAD_CYCLE0(x) ((x) << S_DEAD_CYCLE0)
+#define F_DEAD_CYCLE0    V_DEAD_CYCLE0(1U)
+
+#define S_DEAD_CYCLE1    16
+#define V_DEAD_CYCLE1(x) ((x) << S_DEAD_CYCLE1)
+#define F_DEAD_CYCLE1    V_DEAD_CYCLE1(1U)
+
+#define S_DEAD_CYCLE0_BBI    15
+#define V_DEAD_CYCLE0_BBI(x) ((x) << S_DEAD_CYCLE0_BBI)
+#define F_DEAD_CYCLE0_BBI    V_DEAD_CYCLE0_BBI(1U)
+
+#define S_DEAD_CYCLE1_BBI    14
+#define V_DEAD_CYCLE1_BBI(x) ((x) << S_DEAD_CYCLE1_BBI)
+#define F_DEAD_CYCLE1_BBI    V_DEAD_CYCLE1_BBI(1U)
+
+#define S_DEAD_CYCLE0_MAX_OP    13
+#define V_DEAD_CYCLE0_MAX_OP(x) ((x) << S_DEAD_CYCLE0_MAX_OP)
+#define F_DEAD_CYCLE0_MAX_OP    V_DEAD_CYCLE0_MAX_OP(1U)
+
+#define S_DEAD_CYCLE1_MAX_OP    12
+#define V_DEAD_CYCLE1_MAX_OP(x) ((x) << S_DEAD_CYCLE1_MAX_OP)
+#define F_DEAD_CYCLE1_MAX_OP    V_DEAD_CYCLE1_MAX_OP(1U)
+
+#define S_DEAD_CYCLE0_PRE_REF    11
+#define V_DEAD_CYCLE0_PRE_REF(x) ((x) << S_DEAD_CYCLE0_PRE_REF)
+#define F_DEAD_CYCLE0_PRE_REF    V_DEAD_CYCLE0_PRE_REF(1U)
+
+#define S_DEAD_CYCLE1_PRE_REF    10
+#define V_DEAD_CYCLE1_PRE_REF(x) ((x) << S_DEAD_CYCLE1_PRE_REF)
+#define F_DEAD_CYCLE1_PRE_REF    V_DEAD_CYCLE1_PRE_REF(1U)
+
+#define S_DEAD_CYCLE0_POST_REF    9
+#define V_DEAD_CYCLE0_POST_REF(x) ((x) << S_DEAD_CYCLE0_POST_REF)
+#define F_DEAD_CYCLE0_POST_REF    V_DEAD_CYCLE0_POST_REF(1U)
+
+#define S_DEAD_CYCLE1_POST_REF    8
+#define V_DEAD_CYCLE1_POST_REF(x) ((x) << S_DEAD_CYCLE1_POST_REF)
+#define F_DEAD_CYCLE1_POST_REF    V_DEAD_CYCLE1_POST_REF(1U)
+
+#define S_DEAD_CYCLE0_RMW    7
+#define V_DEAD_CYCLE0_RMW(x) ((x) << S_DEAD_CYCLE0_RMW)
+#define F_DEAD_CYCLE0_RMW    V_DEAD_CYCLE0_RMW(1U)
+
+#define S_DEAD_CYCLE1_RMW    6
+#define V_DEAD_CYCLE1_RMW(x) ((x) << S_DEAD_CYCLE1_RMW)
+#define F_DEAD_CYCLE1_RMW    V_DEAD_CYCLE1_RMW(1U)
+
+#define S_DEAD_CYCLE0_BBI_RMW    5
+#define V_DEAD_CYCLE0_BBI_RMW(x) ((x) << S_DEAD_CYCLE0_BBI_RMW)
+#define F_DEAD_CYCLE0_BBI_RMW    V_DEAD_CYCLE0_BBI_RMW(1U)
+
+#define S_DEAD_CYCLE1_BBI_RMW    4
+#define V_DEAD_CYCLE1_BBI_RMW(x) ((x) << S_DEAD_CYCLE1_BBI_RMW)
+#define F_DEAD_CYCLE1_BBI_RMW    V_DEAD_CYCLE1_BBI_RMW(1U)
+
+#define S_DEAD_CYCLE0_PRE_REF_RMW    3
+#define V_DEAD_CYCLE0_PRE_REF_RMW(x) ((x) << S_DEAD_CYCLE0_PRE_REF_RMW)
+#define F_DEAD_CYCLE0_PRE_REF_RMW    V_DEAD_CYCLE0_PRE_REF_RMW(1U)
+
+#define S_DEAD_CYCLE1_PRE_REF_RMW    2
+#define V_DEAD_CYCLE1_PRE_REF_RMW(x) ((x) << S_DEAD_CYCLE1_PRE_REF_RMW)
+#define F_DEAD_CYCLE1_PRE_REF_RMW    V_DEAD_CYCLE1_PRE_REF_RMW(1U)
+
+#define S_DEAD_CYCLE0_POST_REF_RMW    1
+#define V_DEAD_CYCLE0_POST_REF_RMW(x) ((x) << S_DEAD_CYCLE0_POST_REF_RMW)
+#define F_DEAD_CYCLE0_POST_REF_RMW    V_DEAD_CYCLE0_POST_REF_RMW(1U)
+
+#define S_DEAD_CYCLE1_POST_REF_RMW    0
+#define V_DEAD_CYCLE1_POST_REF_RMW(x) ((x) << S_DEAD_CYCLE1_POST_REF_RMW)
+#define F_DEAD_CYCLE1_POST_REF_RMW    V_DEAD_CYCLE1_POST_REF_RMW(1U)
+
+#define A_EDC_H_DBG_CMD_QUEUE 0x50380
+
+#define S_ECMDNRE    31
+#define V_ECMDNRE(x) ((x) << S_ECMDNRE)
+#define F_ECMDNRE    V_ECMDNRE(1U)
+
+#define S_ECMDNRB    30
+#define V_ECMDNRB(x) ((x) << S_ECMDNRB)
+#define F_ECMDNRB    V_ECMDNRB(1U)
+
+#define S_ECMDWR    29
+#define V_ECMDWR(x) ((x) << S_ECMDWR)
+#define F_ECMDWR    V_ECMDWR(1U)
+
+#define S_ECMDLEN    22
+#define M_ECMDLEN    0x7fU
+#define V_ECMDLEN(x) ((x) << S_ECMDLEN)
+#define G_ECMDLEN(x) (((x) >> S_ECMDLEN) & M_ECMDLEN)
+
+#define S_ECMDADDR    0
+#define M_ECMDADDR    0x3fffffU
+#define V_ECMDADDR(x) ((x) << S_ECMDADDR)
+#define G_ECMDADDR(x) (((x) >> S_ECMDADDR) & M_ECMDADDR)
+
+#define A_EDC_H_DBG_REFRESH 0x50384
+
+#define S_REFDONE    12
+#define V_REFDONE(x) ((x) << S_REFDONE)
+#define F_REFDONE    V_REFDONE(1U)
+
+#define S_REFCNTEXPR    11
+#define V_REFCNTEXPR(x) ((x) << S_REFCNTEXPR)
+#define F_REFCNTEXPR    V_REFCNTEXPR(1U)
+
+#define S_REFPTR    8
+#define M_REFPTR    0x7U
+#define V_REFPTR(x) ((x) << S_REFPTR)
+#define G_REFPTR(x) (((x) >> S_REFPTR) & M_REFPTR)
+
+#define S_REFCNT    0
+#define M_REFCNT    0xffU
+#define V_REFCNT(x) ((x) << S_REFCNT)
+#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT)
+
+/* registers for module EDC_T61 */
+#define EDC_T61_BASE_ADDR 0x50800
+
+/* registers for module HMA_T6 */
+#define HMA_T6_BASE_ADDR 0x51000
+
+#define S_TPH    12
+#define M_TPH    0x3U
+#define V_TPH(x) ((x) << S_TPH)
+#define G_TPH(x) (((x) >> S_TPH) & M_TPH)
+
+#define S_TPH_V    11
+#define V_TPH_V(x) ((x) << S_TPH_V)
+#define F_TPH_V    V_TPH_V(1U)
+
+#define S_DCA    0
+#define M_DCA    0x7ffU
+#define V_DCA(x) ((x) << S_DCA)
+#define G_DCA(x) (((x) >> S_DCA) & M_DCA)
+
+#define A_HMA_CFG 0x51020
+
+#define S_OP_MODE    31
+#define V_OP_MODE(x) ((x) << S_OP_MODE)
+#define F_OP_MODE    V_OP_MODE(1U)
+
+#define A_HMA_TLB_ACCESS 0x51028
+
+#define S_INV_ALL    29
+#define V_INV_ALL(x) ((x) << S_INV_ALL)
+#define F_INV_ALL    V_INV_ALL(1U)
+
+#define S_LOCK_ENTRY    28
+#define V_LOCK_ENTRY(x) ((x) << S_LOCK_ENTRY)
+#define F_LOCK_ENTRY    V_LOCK_ENTRY(1U)
+
+#define S_E_SEL    0
+#define M_E_SEL    0x1fU
+#define V_E_SEL(x) ((x) << S_E_SEL)
+#define G_E_SEL(x) (((x) >> S_E_SEL) & M_E_SEL)
+
+#define A_HMA_TLB_BITS 0x5102c
+
+#define S_VA    12
+#define M_VA    0xfffffU
+#define V_VA(x) ((x) << S_VA)
+#define G_VA(x) (((x) >> S_VA) & M_VA)
+
+#define S_VALID_E    4
+#define V_VALID_E(x) ((x) << S_VALID_E)
+#define F_VALID_E    V_VALID_E(1U)
+
+#define S_LOCK_HMA    3
+#define V_LOCK_HMA(x) ((x) << S_LOCK_HMA)
+#define F_LOCK_HMA    V_LOCK_HMA(1U)
+
+#define S_T6_USED    2
+#define V_T6_USED(x) ((x) << S_T6_USED)
+#define F_T6_USED    V_T6_USED(1U)
+
+#define S_REGION    0
+#define M_REGION    0x3U
+#define V_REGION(x) ((x) << S_REGION)
+#define G_REGION(x) (((x) >> S_REGION) & M_REGION)
+
+#define A_HMA_TLB_DESC_0_H 0x51030
+#define A_HMA_TLB_DESC_0_L 0x51034
+#define A_HMA_TLB_DESC_1_H 0x51038
+#define A_HMA_TLB_DESC_1_L 0x5103c
+#define A_HMA_TLB_DESC_2_H 0x51040
+#define A_HMA_TLB_DESC_2_L 0x51044
+#define A_HMA_TLB_DESC_3_H 0x51048
+#define A_HMA_TLB_DESC_3_L 0x5104c
+#define A_HMA_TLB_DESC_4_H 0x51050
+#define A_HMA_TLB_DESC_4_L 0x51054
+#define A_HMA_TLB_DESC_5_H 0x51058
+#define A_HMA_TLB_DESC_5_L 0x5105c
+#define A_HMA_TLB_DESC_6_H 0x51060
+#define A_HMA_TLB_DESC_6_L 0x51064
+#define A_HMA_TLB_DESC_7_H 0x51068
+#define A_HMA_TLB_DESC_7_L 0x5106c
+#define A_HMA_REG0_MIN 0x51070
+
+#define S_ADDR0_MIN    12
+#define M_ADDR0_MIN    0xfffffU
+#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN)
+#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN)
+
+#define A_HMA_REG0_MAX 0x51074
+
+#define S_ADDR0_MAX    12
+#define M_ADDR0_MAX    0xfffffU
+#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX)
+#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX)
+
+#define A_HMA_REG0_MASK 0x51078
+
+#define S_PAGE_SIZE0    12
+#define M_PAGE_SIZE0    0xfffffU
+#define V_PAGE_SIZE0(x) ((x) << S_PAGE_SIZE0)
+#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0)
+
+#define A_HMA_REG0_BASE 0x5107c
+#define A_HMA_REG1_MIN 0x51080
+
+#define S_ADDR1_MIN    12
+#define M_ADDR1_MIN    0xfffffU
+#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN)
+#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN)
+
+#define A_HMA_REG1_MAX 0x51084
+
+#define S_ADDR1_MAX    12
+#define M_ADDR1_MAX    0xfffffU
+#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX)
+#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX)
+
+#define A_HMA_REG1_MASK 0x51088
+
+#define S_PAGE_SIZE1    12
+#define M_PAGE_SIZE1    0xfffffU
+#define V_PAGE_SIZE1(x) ((x) << S_PAGE_SIZE1)
+#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1)
+
+#define A_HMA_REG1_BASE 0x5108c
+#define A_HMA_REG2_MIN 0x51090
+
+#define S_ADDR2_MIN    12
+#define M_ADDR2_MIN    0xfffffU
+#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN)
+#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN)
+
+#define A_HMA_REG2_MAX 0x51094
+
+#define S_ADDR2_MAX    12
+#define M_ADDR2_MAX    0xfffffU
+#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX)
+#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX)
+
+#define A_HMA_REG2_MASK 0x51098
+
+#define S_PAGE_SIZE2    12
+#define M_PAGE_SIZE2    0xfffffU
+#define V_PAGE_SIZE2(x) ((x) << S_PAGE_SIZE2)
+#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2)
+
+#define A_HMA_REG2_BASE 0x5109c
+#define A_HMA_REG3_MIN 0x510a0
+
+#define S_ADDR3_MIN    12
+#define M_ADDR3_MIN    0xfffffU
+#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN)
+#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN)
+
+#define A_HMA_REG3_MAX 0x510a4
+
+#define S_ADDR3_MAX    12
+#define M_ADDR3_MAX    0xfffffU
+#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX)
+#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX)
+
+#define A_HMA_REG3_MASK 0x510a8
+
+#define S_PAGE_SIZE3    12
+#define M_PAGE_SIZE3    0xfffffU
+#define V_PAGE_SIZE3(x) ((x) << S_PAGE_SIZE3)
+#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3)
+
+#define A_HMA_REG3_BASE 0x510ac
+#define A_HMA_SW_SYNC 0x510b0
+
+#define S_ENTER_SYNC    31
+#define V_ENTER_SYNC(x) ((x) << S_ENTER_SYNC)
+#define F_ENTER_SYNC    V_ENTER_SYNC(1U)
+
+#define S_EXIT_SYNC    30
+#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC)
+#define F_EXIT_SYNC    V_EXIT_SYNC(1U)
+
+#define S_IDTF_INT_ENABLE    5
+#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE)
+#define F_IDTF_INT_ENABLE    V_IDTF_INT_ENABLE(1U)
+
+#define S_OTF_INT_ENABLE    4
+#define V_OTF_INT_ENABLE(x) ((x) << S_OTF_INT_ENABLE)
+#define F_OTF_INT_ENABLE    V_OTF_INT_ENABLE(1U)
+
+#define S_RTF_INT_ENABLE    3
+#define V_RTF_INT_ENABLE(x) ((x) << S_RTF_INT_ENABLE)
+#define F_RTF_INT_ENABLE    V_RTF_INT_ENABLE(1U)
+
+#define S_PCIEMST_INT_ENABLE    2
+#define V_PCIEMST_INT_ENABLE(x) ((x) << S_PCIEMST_INT_ENABLE)
+#define F_PCIEMST_INT_ENABLE    V_PCIEMST_INT_ENABLE(1U)
+
+#define S_MAMST_INT_ENABLE    1
+#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE)
+#define F_MAMST_INT_ENABLE    V_MAMST_INT_ENABLE(1U)
+
+#define S_IDTF_INT_CAUSE    5
+#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE)
+#define F_IDTF_INT_CAUSE    V_IDTF_INT_CAUSE(1U)
+
+#define S_OTF_INT_CAUSE    4
+#define V_OTF_INT_CAUSE(x) ((x) << S_OTF_INT_CAUSE)
+#define F_OTF_INT_CAUSE    V_OTF_INT_CAUSE(1U)
+
+#define S_RTF_INT_CAUSE    3
+#define V_RTF_INT_CAUSE(x) ((x) << S_RTF_INT_CAUSE)
+#define F_RTF_INT_CAUSE    V_RTF_INT_CAUSE(1U)
+
+#define S_PCIEMST_INT_CAUSE    2
+#define V_PCIEMST_INT_CAUSE(x) ((x) << S_PCIEMST_INT_CAUSE)
+#define F_PCIEMST_INT_CAUSE    V_PCIEMST_INT_CAUSE(1U)
+
+#define S_MAMST_INT_CAUSE    1
+#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE)
+#define F_MAMST_INT_CAUSE    V_MAMST_INT_CAUSE(1U)
+
+#define A_HMA_MA_MST_ERR 0x5130c
+#define A_HMA_RTF_ERR 0x51310
+#define A_HMA_OTF_ERR 0x51314
+#define A_HMA_IDTF_ERR 0x51318
+#define A_HMA_EXIT_TF 0x5131c
+
+#define S_RTF    30
+#define V_RTF(x) ((x) << S_RTF)
+#define F_RTF    V_RTF(1U)
+
+#define S_OTF    29
+#define V_OTF(x) ((x) << S_OTF)
+#define F_OTF    V_OTF(1U)
+
+#define S_IDTF    28
+#define V_IDTF(x) ((x) << S_IDTF)
+#define F_IDTF    V_IDTF(1U)
+
+#define A_HMA_LOCAL_DEBUG_CFG 0x51320
+#define A_HMA_LOCAL_DEBUG_RPT 0x51324
+#define A_HMA_DEBUG_FSM_0 0xa000
+
+#define S_EDC_FSM    18
+#define M_EDC_FSM    0x1fU
+#define V_EDC_FSM(x) ((x) << S_EDC_FSM)
+#define G_EDC_FSM(x) (((x) >> S_EDC_FSM) & M_EDC_FSM)
+
+#define S_RAS_FSM_SLV    15
+#define M_RAS_FSM_SLV    0x7U
+#define V_RAS_FSM_SLV(x) ((x) << S_RAS_FSM_SLV)
+#define G_RAS_FSM_SLV(x) (((x) >> S_RAS_FSM_SLV) & M_RAS_FSM_SLV)
+
+#define S_FC_FSM    10
+#define M_FC_FSM    0x1fU
+#define V_FC_FSM(x) ((x) << S_FC_FSM)
+#define G_FC_FSM(x) (((x) >> S_FC_FSM) & M_FC_FSM)
+
+#define S_COOKIE_ARB_FSM    8
+#define M_COOKIE_ARB_FSM    0x3U
+#define V_COOKIE_ARB_FSM(x) ((x) << S_COOKIE_ARB_FSM)
+#define G_COOKIE_ARB_FSM(x) (((x) >> S_COOKIE_ARB_FSM) & M_COOKIE_ARB_FSM)
+
+#define S_PCIE_CHUNK_FSM    6
+#define M_PCIE_CHUNK_FSM    0x3U
+#define V_PCIE_CHUNK_FSM(x) ((x) << S_PCIE_CHUNK_FSM)
+#define G_PCIE_CHUNK_FSM(x) (((x) >> S_PCIE_CHUNK_FSM) & M_PCIE_CHUNK_FSM)
+
+#define S_WTRANSFER_FSM    4
+#define M_WTRANSFER_FSM    0x3U
+#define V_WTRANSFER_FSM(x) ((x) << S_WTRANSFER_FSM)
+#define G_WTRANSFER_FSM(x) (((x) >> S_WTRANSFER_FSM) & M_WTRANSFER_FSM)
+
+#define S_WD_FSM    2
+#define M_WD_FSM    0x3U
+#define V_WD_FSM(x) ((x) << S_WD_FSM)
+#define G_WD_FSM(x) (((x) >> S_WD_FSM) & M_WD_FSM)
+
+#define S_RD_FSM    0
+#define M_RD_FSM    0x3U
+#define V_RD_FSM(x) ((x) << S_RD_FSM)
+#define G_RD_FSM(x) (((x) >> S_RD_FSM) & M_RD_FSM)
+
+#define A_HMA_DEBUG_FSM_1 0xa001
+
+#define S_SYNC_FSM    11
+#define M_SYNC_FSM    0x3ffU
+#define V_SYNC_FSM(x) ((x) << S_SYNC_FSM)
+#define G_SYNC_FSM(x) (((x) >> S_SYNC_FSM) & M_SYNC_FSM)
+
+#define S_OCHK_FSM    9
+#define M_OCHK_FSM    0x3U
+#define V_OCHK_FSM(x) ((x) << S_OCHK_FSM)
+#define G_OCHK_FSM(x) (((x) >> S_OCHK_FSM) & M_OCHK_FSM)
+
+#define S_TLB_FSM    5
+#define M_TLB_FSM    0xfU
+#define V_TLB_FSM(x) ((x) << S_TLB_FSM)
+#define G_TLB_FSM(x) (((x) >> S_TLB_FSM) & M_TLB_FSM)
+
+#define S_PIO_FSM    0
+#define M_PIO_FSM    0x1fU
+#define V_PIO_FSM(x) ((x) << S_PIO_FSM)
+#define G_PIO_FSM(x) (((x) >> S_PIO_FSM) & M_PIO_FSM)
+
+#define A_HMA_DEBUG_PCIE_INTF 0xa002
+
+#define S_T6_H_REQVLD    28
+#define V_T6_H_REQVLD(x) ((x) << S_T6_H_REQVLD)
+#define F_T6_H_REQVLD    V_T6_H_REQVLD(1U)
+
+#define S_H_REQFULL    27
+#define V_H_REQFULL(x) ((x) << S_H_REQFULL)
+#define F_H_REQFULL    V_H_REQFULL(1U)
+
+#define S_H_REQSOP    26
+#define V_H_REQSOP(x) ((x) << S_H_REQSOP)
+#define F_H_REQSOP    V_H_REQSOP(1U)
+
+#define S_H_REQEOP    25
+#define V_H_REQEOP(x) ((x) << S_H_REQEOP)
+#define F_H_REQEOP    V_H_REQEOP(1U)
+
+#define S_T6_H_RSPVLD    24
+#define V_T6_H_RSPVLD(x) ((x) << S_T6_H_RSPVLD)
+#define F_T6_H_RSPVLD    V_T6_H_RSPVLD(1U)
+
+#define S_H_RSPFULL    23
+#define V_H_RSPFULL(x) ((x) << S_H_RSPFULL)
+#define F_H_RSPFULL    V_H_RSPFULL(1U)
+
+#define S_H_RSPSOP    22
+#define V_H_RSPSOP(x) ((x) << S_H_RSPSOP)
+#define F_H_RSPSOP    V_H_RSPSOP(1U)
+
+#define S_H_RSPEOP    21
+#define V_H_RSPEOP(x) ((x) << S_H_RSPEOP)
+#define F_H_RSPEOP    V_H_RSPEOP(1U)
+
+#define S_H_RSPERR    20
+#define V_H_RSPERR(x) ((x) << S_H_RSPERR)
+#define F_H_RSPERR    V_H_RSPERR(1U)
+
+#define S_PCIE_CMD_AVAIL    19
+#define V_PCIE_CMD_AVAIL(x) ((x) << S_PCIE_CMD_AVAIL)
+#define F_PCIE_CMD_AVAIL    V_PCIE_CMD_AVAIL(1U)
+
+#define S_PCIE_CMD_RDY    18
+#define V_PCIE_CMD_RDY(x) ((x) << S_PCIE_CMD_RDY)
+#define F_PCIE_CMD_RDY    V_PCIE_CMD_RDY(1U)
+
+#define S_PCIE_WNR    17
+#define V_PCIE_WNR(x) ((x) << S_PCIE_WNR)
+#define F_PCIE_WNR    V_PCIE_WNR(1U)
+
+#define S_PCIE_LEN    9
+#define M_PCIE_LEN    0xffU
+#define V_PCIE_LEN(x) ((x) << S_PCIE_LEN)
+#define G_PCIE_LEN(x) (((x) >> S_PCIE_LEN) & M_PCIE_LEN)
+
+#define S_PCIE_TRWDAT_RDY    8
+#define V_PCIE_TRWDAT_RDY(x) ((x) << S_PCIE_TRWDAT_RDY)
+#define F_PCIE_TRWDAT_RDY    V_PCIE_TRWDAT_RDY(1U)
+
+#define S_PCIE_TRWDAT_AVAIL    7
+#define V_PCIE_TRWDAT_AVAIL(x) ((x) << S_PCIE_TRWDAT_AVAIL)
+#define F_PCIE_TRWDAT_AVAIL    V_PCIE_TRWDAT_AVAIL(1U)
+
+#define S_PCIE_TRWSOP    6
+#define V_PCIE_TRWSOP(x) ((x) << S_PCIE_TRWSOP)
+#define F_PCIE_TRWSOP    V_PCIE_TRWSOP(1U)
+
+#define S_PCIE_TRWEOP    5
+#define V_PCIE_TRWEOP(x) ((x) << S_PCIE_TRWEOP)
+#define F_PCIE_TRWEOP    V_PCIE_TRWEOP(1U)
+
+#define S_PCIE_TRRDAT_RDY    4
+#define V_PCIE_TRRDAT_RDY(x) ((x) << S_PCIE_TRRDAT_RDY)
+#define F_PCIE_TRRDAT_RDY    V_PCIE_TRRDAT_RDY(1U)
+
+#define S_PCIE_TRRDAT_AVAIL    3
+#define V_PCIE_TRRDAT_AVAIL(x) ((x) << S_PCIE_TRRDAT_AVAIL)
+#define F_PCIE_TRRDAT_AVAIL    V_PCIE_TRRDAT_AVAIL(1U)
+
+#define S_PCIE_TRRSOP    2
+#define V_PCIE_TRRSOP(x) ((x) << S_PCIE_TRRSOP)
+#define F_PCIE_TRRSOP    V_PCIE_TRRSOP(1U)
+
+#define S_PCIE_TRREOP    1
+#define V_PCIE_TRREOP(x) ((x) << S_PCIE_TRREOP)
+#define F_PCIE_TRREOP    V_PCIE_TRREOP(1U)
+
+#define S_PCIE_TRRERR    0
+#define V_PCIE_TRRERR(x) ((x) << S_PCIE_TRRERR)
+#define F_PCIE_TRRERR    V_PCIE_TRRERR(1U)
+
+#define A_HMA_DEBUG_PCIE_ADDR_INTERNAL_LO 0xa003
+#define A_HMA_DEBUG_PCIE_ADDR_INTERNAL_HI 0xa004
+#define A_HMA_DEBUG_PCIE_REQ_DATA_EXTERNAL 0xa005
+
+#define S_REQDATA2    24
+#define M_REQDATA2    0xffU
+#define V_REQDATA2(x) ((x) << S_REQDATA2)
+#define G_REQDATA2(x) (((x) >> S_REQDATA2) & M_REQDATA2)
+
+#define S_REQDATA1    21
+#define M_REQDATA1    0x7U
+#define V_REQDATA1(x) ((x) << S_REQDATA1)
+#define G_REQDATA1(x) (((x) >> S_REQDATA1) & M_REQDATA1)
+
+#define S_REQDATA0    0
+#define M_REQDATA0    0x1fffffU
+#define V_REQDATA0(x) ((x) << S_REQDATA0)
+#define G_REQDATA0(x) (((x) >> S_REQDATA0) & M_REQDATA0)
+
+#define A_HMA_DEBUG_PCIE_RSP_DATA_EXTERNAL 0xa006
+
+#define S_RSPDATA3    24
+#define M_RSPDATA3    0xffU
+#define V_RSPDATA3(x) ((x) << S_RSPDATA3)
+#define G_RSPDATA3(x) (((x) >> S_RSPDATA3) & M_RSPDATA3)
+
+#define S_RSPDATA2    16
+#define M_RSPDATA2    0xffU
+#define V_RSPDATA2(x) ((x) << S_RSPDATA2)
+#define G_RSPDATA2(x) (((x) >> S_RSPDATA2) & M_RSPDATA2)
+
+#define S_RSPDATA1    8
+#define M_RSPDATA1    0xffU
+#define V_RSPDATA1(x) ((x) << S_RSPDATA1)
+#define G_RSPDATA1(x) (((x) >> S_RSPDATA1) & M_RSPDATA1)
+
+#define S_RSPDATA0    0
+#define M_RSPDATA0    0xffU
+#define V_RSPDATA0(x) ((x) << S_RSPDATA0)
+#define G_RSPDATA0(x) (((x) >> S_RSPDATA0) & M_RSPDATA0)
+
+#define A_HMA_DEBUG_MA_SLV_CTL 0xa007
+
+#define S_MA_CMD_AVAIL    19
+#define V_MA_CMD_AVAIL(x) ((x) << S_MA_CMD_AVAIL)
+#define F_MA_CMD_AVAIL    V_MA_CMD_AVAIL(1U)
+
+#define S_MA_CLNT    15
+#define M_MA_CLNT    0xfU
+#define V_MA_CLNT(x) ((x) << S_MA_CLNT)
+#define G_MA_CLNT(x) (((x) >> S_MA_CLNT) & M_MA_CLNT)
+
+#define S_MA_WNR    14
+#define V_MA_WNR(x) ((x) << S_MA_WNR)
+#define F_MA_WNR    V_MA_WNR(1U)
+
+#define S_MA_LEN    6
+#define M_MA_LEN    0xffU
+#define V_MA_LEN(x) ((x) << S_MA_LEN)
+#define G_MA_LEN(x) (((x) >> S_MA_LEN) & M_MA_LEN)
+
+#define S_MA_MST_RD    5
+#define V_MA_MST_RD(x) ((x) << S_MA_MST_RD)
+#define F_MA_MST_RD    V_MA_MST_RD(1U)
+
+#define S_MA_MST_VLD    4
+#define V_MA_MST_VLD(x) ((x) << S_MA_MST_VLD)
+#define F_MA_MST_VLD    V_MA_MST_VLD(1U)
+
+#define S_MA_MST_ERR    3
+#define V_MA_MST_ERR(x) ((x) << S_MA_MST_ERR)
+#define F_MA_MST_ERR    V_MA_MST_ERR(1U)
+
+#define S_MAS_TLB_REQ    2
+#define V_MAS_TLB_REQ(x) ((x) << S_MAS_TLB_REQ)
+#define F_MAS_TLB_REQ    V_MAS_TLB_REQ(1U)
+
+#define S_MAS_TLB_ACK    1
+#define V_MAS_TLB_ACK(x) ((x) << S_MAS_TLB_ACK)
+#define F_MAS_TLB_ACK    V_MAS_TLB_ACK(1U)
+
+#define S_MAS_TLB_ERR    0
+#define V_MAS_TLB_ERR(x) ((x) << S_MAS_TLB_ERR)
+#define F_MAS_TLB_ERR    V_MAS_TLB_ERR(1U)
+
+#define A_HMA_DEBUG_MA_SLV_ADDR_INTERNAL 0xa008
+#define A_HMA_DEBUG_TLB_HIT_ENTRY 0xa009
+#define A_HMA_DEBUG_TLB_HIT_CNT 0xa00a
+#define A_HMA_DEBUG_TLB_MISS_CNT 0xa00b
+#define A_HMA_DEBUG_PAGE_TBL_LKP_CTL 0xa00c
+
+#define S_LKP_REQ_VLD    4
+#define V_LKP_REQ_VLD(x) ((x) << S_LKP_REQ_VLD)
+#define F_LKP_REQ_VLD    V_LKP_REQ_VLD(1U)
+
+#define S_LKP_DESC_SEL    1
+#define M_LKP_DESC_SEL    0x7U
+#define V_LKP_DESC_SEL(x) ((x) << S_LKP_DESC_SEL)
+#define G_LKP_DESC_SEL(x) (((x) >> S_LKP_DESC_SEL) & M_LKP_DESC_SEL)
+
+#define S_LKP_RSP_VLD    0
+#define V_LKP_RSP_VLD(x) ((x) << S_LKP_RSP_VLD)
+#define F_LKP_RSP_VLD    V_LKP_RSP_VLD(1U)
+
+#define A_HMA_DEBUG_PAGE_TBL_LKP_REQ_ADDR 0xa00d
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_0 0xa00e
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_1 0xa00f
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_2 0xa010
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_3 0xa011
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_4 0xa012
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_5 0xa013
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_6 0xa014
+#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_7 0xa015
+#define A_HMA_DEBUG_PHYS_DESC_INTERNAL_LO 0xa016
+#define A_HMA_DEBUG_PCIE_RD_REQ_CNT_LO 0xa017
+#define A_HMA_DEBUG_PCIE_RD_REQ_CNT_HI 0xa018
+#define A_HMA_DEBUG_PCIE_WR_REQ_CNT_LO 0xa019
+#define A_HMA_DEBUG_PCIE_WR_REQ_CNT_HI 0xa01a
+#define A_HMA_DEBUG_PCIE_RD_DATA_CYC_CNT_LO 0xa01b
+#define A_HMA_DEBUG_PCIE_RD_DATA_CYC_CNT_HI 0xa01c
+#define A_HMA_DEBUG_PCIE_WR_DATA_CYC_CNT_LO 0xa01d
+#define A_HMA_DEBUG_PCIE_WR_DATA_CYC_CNT_HI 0xa01e
+#define A_HMA_DEBUG_PCIE_SOP_EOP_CNT 0xa01f
+
+#define S_WR_EOP_CNT    16
+#define M_WR_EOP_CNT    0xffU
+#define V_WR_EOP_CNT(x) ((x) << S_WR_EOP_CNT)
+#define G_WR_EOP_CNT(x) (((x) >> S_WR_EOP_CNT) & M_WR_EOP_CNT)
+
+#define S_RD_SOP_CNT    8
+#define M_RD_SOP_CNT    0xffU
+#define V_RD_SOP_CNT(x) ((x) << S_RD_SOP_CNT)
+#define G_RD_SOP_CNT(x) (((x) >> S_RD_SOP_CNT) & M_RD_SOP_CNT)
+
+#define S_RD_EOP_CNT    0
+#define M_RD_EOP_CNT    0xffU
+#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT)
+#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT)

Modified: trunk/sys/dev/cxgbe/common/t4_regs_values.h
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_regs_values.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_regs_values.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/t4_regs_values.h 218792 2011-02-18 08:00:26Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/t4_regs_values.h 308304 2016-11-04 18:45:06Z jhb $
  *
  */
 
@@ -82,6 +83,16 @@
 #define X_INGPCIEBOUNDARY_2048B		6
 #define X_INGPCIEBOUNDARY_4096B		7
 
+#define X_T6_INGPADBOUNDARY_SHIFT	3
+#define X_T6_INGPADBOUNDARY_8B		0
+#define X_T6_INGPADBOUNDARY_16B		1
+#define X_T6_INGPADBOUNDARY_32B		2
+#define X_T6_INGPADBOUNDARY_64B		3
+#define X_T6_INGPADBOUNDARY_128B	4
+#define X_T6_INGPADBOUNDARY_256B	5
+#define X_T6_INGPADBOUNDARY_512B	6
+#define X_T6_INGPADBOUNDARY_1024B	7
+
 #define X_INGPADBOUNDARY_SHIFT		5
 #define X_INGPADBOUNDARY_32B		0
 #define X_INGPADBOUNDARY_64B		1
@@ -102,6 +113,17 @@
 #define X_EGRPCIEBOUNDARY_2048B		6
 #define X_EGRPCIEBOUNDARY_4096B		7
 
+/* CONTROL2 register */
+#define X_INGPACKBOUNDARY_SHIFT		5	// *most* of the values ...
+#define X_INGPACKBOUNDARY_16B		0	// Note weird value!
+#define X_INGPACKBOUNDARY_64B		1
+#define X_INGPACKBOUNDARY_128B		2
+#define X_INGPACKBOUNDARY_256B		3
+#define X_INGPACKBOUNDARY_512B		4
+#define X_INGPACKBOUNDARY_1024B		5
+#define X_INGPACKBOUNDARY_2048B		6
+#define X_INGPACKBOUNDARY_4096B		7
+
 /* GTS register */
 #define SGE_TIMERREGS			6
 #define X_TIMERREG_COUNTER0		0
@@ -178,6 +200,52 @@
 #define X_RSPD_TYPE_INTR		2
 
 /*
+ * Context field definitions.  This is by no means a complete list of SGE
+ * Context fields.  In the vast majority of cases the firmware initializes
+ * things the way they need to be set up.  But in a few small cases, we need
+ * to compute new values and ship them off to the firmware to be applied to
+ * the SGE Conexts ...
+ */
+
+/*
+ * Congestion Manager Definitions.
+ */
+#define S_CONMCTXT_CNGTPMODE		19
+#define M_CONMCTXT_CNGTPMODE		0x3
+#define V_CONMCTXT_CNGTPMODE(x)		((x) << S_CONMCTXT_CNGTPMODE)
+#define G_CONMCTXT_CNGTPMODE(x)  \
+	(((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE)
+#define S_CONMCTXT_CNGCHMAP		0
+#define M_CONMCTXT_CNGCHMAP		0xffff
+#define V_CONMCTXT_CNGCHMAP(x)		((x) << S_CONMCTXT_CNGCHMAP)
+#define G_CONMCTXT_CNGCHMAP(x)   \
+	(((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP)
+
+#define X_CONMCTXT_CNGTPMODE_DISABLE	0
+#define X_CONMCTXT_CNGTPMODE_QUEUE	1
+#define X_CONMCTXT_CNGTPMODE_CHANNEL	2
+#define X_CONMCTXT_CNGTPMODE_BOTH	3
+
+/*
+ * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64.  For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively.  We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE		128
+#define SGE_UDB_KDOORBELL	8
+#define SGE_UDB_GTS		20
+#define SGE_UDB_WCDOORBELL	64
+
+/*
  * CIM definitions.
  * ================
  */
@@ -188,5 +256,62 @@
 #define X_MBOWNER_NONE			0
 #define X_MBOWNER_FW			1
 #define X_MBOWNER_PL			2
+#define X_MBOWNER_FW_DEFERRED		3
 
+/*
+ * PCI-E definitions.
+ * ==================
+ */
+
+#define X_WINDOW_SHIFT			10
+#define X_PCIEOFST_SHIFT		10
+
+/*
+ * TP definitions.
+ * ===============
+ */
+
+/*
+ * TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters.  Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present.  These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define S_FT_FIRST			S_FCOE
+#define S_FT_LAST			S_FRAGMENTATION
+
+#define W_FT_FCOE			1
+#define W_FT_PORT			3
+#define W_FT_VNIC_ID			17
+#define W_FT_VLAN			17
+#define W_FT_TOS			8
+#define W_FT_PROTOCOL			8
+#define W_FT_ETHERTYPE			16
+#define W_FT_MACMATCH			9
+#define W_FT_MPSHITTYPE			3
+#define W_FT_FRAGMENTATION		1
+
+/*
+ * Some of the Compressed Filter Tuple fields have internal structure.  These
+ * bit shifts/masks describe those structures.  All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD			16
+#define V_FT_VLAN_VLD(x)		((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD			V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF			0
+#define M_FT_VNID_ID_VF			0x7fU
+#define V_FT_VNID_ID_VF(x)		((x) << S_FT_VNID_ID_VF)
+#define G_FT_VNID_ID_VF(x)		(((x) >> S_FT_VNID_ID_VF) & M_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF			7
+#define M_FT_VNID_ID_PF			0x7U
+#define V_FT_VNID_ID_PF(x)		((x) << S_FT_VNID_ID_PF)
+#define G_FT_VNID_ID_PF(x)		(((x) >> S_FT_VNID_ID_PF) & M_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD		16
+#define V_FT_VNID_ID_VLD(x)		((x) << S_FT_VNID_ID_VLD)
+#define F_FT_VNID_ID_VLD(x)		V_FT_VNID_ID_VLD(1U)
+
 #endif /* __T4_REGS_VALUES_H__ */

Modified: trunk/sys/dev/cxgbe/common/t4_tcb.h
===================================================================
--- trunk/sys/dev/cxgbe/common/t4_tcb.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/common/t4_tcb.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2011 Chelsio Communications, Inc.
+ * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/common/t4_tcb.h 218792 2011-02-18 08:00:26Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/common/t4_tcb.h 308304 2016-11-04 18:45:06Z jhb $
  *
  */
 
@@ -332,12 +333,19 @@
 #define M_TCB_PDU_HDR_LEN    0xffULL
 #define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN)
 
-/* 1023:1001 */
+/* 1019:1001 */
 #define W_TCB_AUX1_SLUSH1    31
 #define S_TCB_AUX1_SLUSH1    9
-#define M_TCB_AUX1_SLUSH1    0x7fffffULL
+#define M_TCB_AUX1_SLUSH1    0x7ffffULL
 #define V_TCB_AUX1_SLUSH1(x) ((x) << S_TCB_AUX1_SLUSH1)
 
+/* 1023:1020 */
+#define W_TCB_ULP_EXT    31
+#define S_TCP_ULP_EXT    28
+#define M_TCB_ULP_EXT    0xfULL
+#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT)
+
+
 /* 840:832 */
 #define W_TCB_IRS_ULP    26
 #define S_TCB_IRS_ULP    0

Added: trunk/sys/dev/cxgbe/common/t4vf_hw.c
===================================================================
--- trunk/sys/dev/cxgbe/common/t4vf_hw.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/common/t4vf_hw.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,385 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/common/t4vf_hw.c 309560 2016-12-05 20:43:25Z jhb $");
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+
+#undef msleep
+#define msleep(x) do { \
+	if (cold) \
+		DELAY((x) * 1000); \
+	else \
+		pause("t4hw", (x) * hz / 1000); \
+} while (0)
+
+/*
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's).  Return an error if it doesn't
+ * become ready ...
+ */
+int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+	const u32 whoami = VF_PL_REG(A_PL_VF_WHOAMI);
+	const u32 notready1 = 0xffffffff;
+	const u32 notready2 = 0xeeeeeeee;
+	u32 val;
+
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	msleep(500);
+	val = t4_read_reg(adapter, whoami);
+	if (val != notready1 && val != notready2)
+		return 0;
+	else
+		return -EIO;
+}
+
+
+/**
+ *      t4vf_fw_reset - issue a reset to FW
+ *      @adapter: the adapter
+ *
+ *	Issues a reset command to FW.  For a Physical Function this would
+ *	result in the Firmware reseting all of its state.  For a Virtual
+ *	Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+	struct fw_reset_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+				      F_FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ *	t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
+ *	@adapter: the adapter
+ *
+ *	Retrieves various core SGE parameters in the form of hardware SGE
+ *	register values.  The caller is responsible for decoding these as
+ *	needed.  The SGE parameters are stored in @adapter->params.sge.
+ */
+int t4vf_get_sge_params(struct adapter *adapter)
+{
+	struct sge_params *sp = &adapter->params.sge;
+	u32 params[7], vals[7];
+	u32 whoami;
+	unsigned int pf, s_hps;
+	int i, v;
+
+	params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+	params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+	params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+	params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+	params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+	params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+	params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		     V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+	v = t4vf_query_params(adapter, 7, params, vals);
+	if (v != FW_SUCCESS)
+		return v;
+
+	sp->sge_control = vals[0];
+	sp->counter_val[0] = G_THRESHOLD_0(vals[6]);
+	sp->counter_val[1] = G_THRESHOLD_1(vals[6]);
+	sp->counter_val[2] = G_THRESHOLD_2(vals[6]);
+	sp->counter_val[3] = G_THRESHOLD_3(vals[6]);
+	sp->timer_val[0] = core_ticks_to_us(adapter, G_TIMERVALUE0(vals[2]));
+	sp->timer_val[1] = core_ticks_to_us(adapter, G_TIMERVALUE1(vals[2]));
+	sp->timer_val[2] = core_ticks_to_us(adapter, G_TIMERVALUE2(vals[3]));
+	sp->timer_val[3] = core_ticks_to_us(adapter, G_TIMERVALUE3(vals[3]));
+	sp->timer_val[4] = core_ticks_to_us(adapter, G_TIMERVALUE4(vals[4]));
+	sp->timer_val[5] = core_ticks_to_us(adapter, G_TIMERVALUE5(vals[4]));
+
+	sp->fl_starve_threshold = G_EGRTHRESHOLD(vals[5]) * 2 + 1;
+	if (is_t4(adapter))
+		sp->fl_starve_threshold2 = sp->fl_starve_threshold;
+	else if (is_t5(adapter))
+		sp->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(vals[5]) * 2 + 1;
+	else
+		sp->fl_starve_threshold2 = G_T6_EGRTHRESHOLDPACKING(vals[5]) * 2 + 1;
+
+	/*
+	 * We need the Queues/Page and Host Page Size for our VF.
+	 * This is based on the PF from which we're instantiated.
+	 */
+	whoami = t4_read_reg(adapter, VF_PL_REG(A_PL_VF_WHOAMI));
+	pf = G_SOURCEPF(whoami);
+
+	s_hps = (S_HOSTPAGESIZEPF0 +
+	    (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * pf);
+	sp->page_shift = ((vals[1] >> s_hps) & M_HOSTPAGESIZEPF0) + 10;
+
+	for (i = 0; i < SGE_FLBUF_SIZES; i++) {
+		params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+		    V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0 + (4 * i)));
+		v = t4vf_query_params(adapter, 1, params, vals);
+		if (v != FW_SUCCESS)
+			return v;
+
+		sp->sge_fl_buffer_size[i] = vals[0];
+	}
+
+	/*
+	 * T4 uses a single control field to specify both the PCIe Padding and
+	 * Packing Boundary.  T5 introduced the ability to specify these
+	 * separately with the Padding Boundary in SGE_CONTROL and and Packing
+	 * Boundary in SGE_CONTROL2.  So for T5 and later we need to grab
+	 * SGE_CONTROL in order to determine how ingress packet data will be
+	 * laid out in Packed Buffer Mode.  Unfortunately, older versions of
+	 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
+	 * failure grabbing it we throw an error since we can't figure out the
+	 * right value.
+	 */
+	sp->spg_len = sp->sge_control & F_EGRSTATUSPAGESIZE ? 128 : 64;
+	sp->fl_pktshift = G_PKTSHIFT(sp->sge_control);
+	if (chip_id(adapter) <= CHELSIO_T5) {
+		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) +
+		    X_INGPADBOUNDARY_SHIFT);
+	} else {
+		sp->pad_boundary = 1 << (G_INGPADBOUNDARY(sp->sge_control) +
+		    X_T6_INGPADBOUNDARY_SHIFT);
+	}
+	if (is_t4(adapter))
+		sp->pack_boundary = sp->pad_boundary;
+	else {
+		params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+			     V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+		v = t4vf_query_params(adapter, 1, params, vals);
+		if (v != FW_SUCCESS) {
+			CH_ERR(adapter, "Unable to get SGE Control2; "
+			       "probably old firmware.\n");
+			return v;
+		}
+		if (G_INGPACKBOUNDARY(vals[0]) == 0)
+			sp->pack_boundary = 16;
+		else
+			sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(vals[0]) +
+			    5);
+	}
+
+	/*
+	 * For T5 and later we want to use the new BAR2 Doorbells.
+	 * Unfortunately, older firmware didn't allow the this register to be
+	 * read.
+	 */
+	if (!is_t4(adapter)) {
+		unsigned int s_qpp;
+
+		params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+			     V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+		params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+			     V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+		v = t4vf_query_params(adapter, 2, params, vals);
+		if (v != FW_SUCCESS) {
+			CH_WARN(adapter, "Unable to get VF SGE Queues/Page; "
+				"probably old firmware.\n");
+			return v;
+		}
+
+		s_qpp = (S_QUEUESPERPAGEPF0 +
+			 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * pf);
+		sp->eq_s_qpp = ((vals[0] >> s_qpp) & M_QUEUESPERPAGEPF0);
+		sp->iq_s_qpp = ((vals[1] >> s_qpp) & M_QUEUESPERPAGEPF0);
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ *	@adapter: the adapter
+ *
+ *	Retrieves global RSS mode and parameters with which we have to live
+ *	and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+	struct rss_params *rss = &adapter->params.rss;
+	struct fw_rss_glb_config_cmd cmd, rpl;
+	int v;
+
+	/*
+	 * Execute an RSS Global Configuration read command to retrieve
+	 * our RSS configuration.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+				      F_FW_CMD_REQUEST |
+				      F_FW_CMD_READ);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v != FW_SUCCESS)
+		return v;
+
+	/*
+	 * Transate the big-endian RSS Global Configuration into our
+	 * cpu-endian format based on the RSS mode.  We also do first level
+	 * filtering at this point to weed out modes which don't support
+	 * VF Drivers ...
+	 */
+	rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE(
+			be32_to_cpu(rpl.u.manual.mode_pkd));
+	switch (rss->mode) {
+	case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+		u32 word = be32_to_cpu(
+				rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+		rss->u.basicvirtual.synmapen =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+		rss->u.basicvirtual.syn4tupenipv6 =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+		rss->u.basicvirtual.syn2tupenipv6 =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+		rss->u.basicvirtual.syn4tupenipv4 =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+		rss->u.basicvirtual.syn2tupenipv4 =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+
+		rss->u.basicvirtual.ofdmapen =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+
+		rss->u.basicvirtual.tnlmapen =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+		rss->u.basicvirtual.tnlalllookup =
+			((word  & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+
+		rss->u.basicvirtual.hashtoeplitz =
+			((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+		/* we need at least Tunnel Map Enable to be set */
+		if (!rss->u.basicvirtual.tnlmapen)
+			return -EINVAL;
+		break;
+	}
+
+	default:
+		/* all unknown/unsupported RSS modes result in an error */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ *	t4vf_get_vfres - retrieve VF resource limits
+ *	@adapter: the adapter
+ *
+ *	Retrieves configured resource limits and capabilities for a virtual
+ *	function.  The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+	struct vf_resources *vfres = &adapter->params.vfres;
+	struct fw_pfvf_cmd cmd, rpl;
+	int v;
+	u32 word;
+
+	/*
+	 * Execute PFVF Read command to get VF resource limits; bail out early
+	 * with error on command failure.
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+				    F_FW_CMD_REQUEST |
+				    F_FW_CMD_READ);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+	if (v != FW_SUCCESS)
+		return v;
+
+	/*
+	 * Extract VF resource limits and return success.
+	 */
+	word = be32_to_cpu(rpl.niqflint_niq);
+	vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+	vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+	word = be32_to_cpu(rpl.type_to_neq);
+	vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+	vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+	word = be32_to_cpu(rpl.tc_to_nexactf);
+	vfres->tc = G_FW_PFVF_CMD_TC(word);
+	vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+	vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+	word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+	vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+	vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+	vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+
+	return 0;
+}
+
+/**
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+	int err;
+
+	/*
+	 * Wait for the device to become ready before proceeding ...
+	 */
+	err = t4vf_wait_dev_ready(adapter);
+	if (err)
+		return err;
+
+	adapter->params.chipid = pci_get_device(adapter->dev) >> 12;
+	if (adapter->params.chipid >= 0xa) {
+		adapter->params.chipid -= (0xa - 0x4);
+		adapter->params.fpga = 1;
+	}
+	
+	/*
+	 * Default port and clock for debugging in case we can't reach
+	 * firmware.
+	 */
+	adapter->params.nports = 1;
+	adapter->params.vfres.pmask = 1;
+	adapter->params.vpd.cclk = 50000;
+
+	adapter->chip_params = t4_get_chip_params(chip_id(adapter));
+	if (adapter->chip_params == NULL)
+		return -EINVAL;
+
+	return 0;
+}


Property changes on: trunk/sys/dev/cxgbe/common/t4vf_hw.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt	2018-05-28 00:17:55 UTC (rev 10120)
@@ -10,46 +10,72 @@
 
 [global]
 	rss_glb_config_mode = basicvirtual
-	rss_glb_config_options = tnlmapen, hashtoeplitz, tnlalllkp
+	rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
 
 	sge_timer_value = 1, 5, 10, 50, 100, 200	# usecs
 
-	# TP_SHIFT_CNT
-	reg[0x7dc0] = 0x64f8849
+	# enable TP_OUT_CONFIG.IPIDSPLITMODE
+	reg[0x7d04] = 0x00010000/0x00010000
 
+	# disable TP_PARA_REG3.RxFragEn
+	reg[0x7d6c] = 0x00000000/0x00007000
+
+	reg[0x7dc0] = 0x0e2f8849		# TP_SHIFT_CNT
+
 	filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
+	filterMask = protocol, fcoe
 
-	# TP rx and tx payload memory (% of the total EDRAM + DDR3).
-	tp_pmrx = 38
-	tp_pmtx = 60
+	tp_pmrx = 36, 512
 	tp_pmrx_pagesize = 64K
+
+	# TP number of RX channels (0 = auto)
+	tp_nrxch = 0
+
+	tp_pmtx = 46, 512
 	tp_pmtx_pagesize = 64K
 
+	# TP number of TX channels (0 = auto)
+	tp_ntxch = 0
+
+	# TP OFLD MTUs
+	tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+	# cluster, lan, or wan.
+	tp_tcptuning = lan
+
 # PFs 0-3.  These get 8 MSI/8 MSI-X vectors each.  VFs are supported by
 # these 4 PFs only.  Not used here at all.
 [function "0"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "0/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "1"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "1/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "2"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "2/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "3"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "3/*"]
 	nvi = 1
+	rssnvi = 0
 
 # PF4 is the resource-rich PF that the bus/nexus driver attaches to.
 # It gets 32 MSI/128 MSI-X vectors.
@@ -57,18 +83,23 @@
 	wx_caps = all
 	r_caps = all
 	nvi = 32
-	niqflint = 256
-	nethctrl = 128
-	neq = 256
+	rssnvi = 8
+	niqflint = 512
+	nethctrl = 1024
+	neq = 2048
 	nexactf = 328
 	cmask = all
 	pmask = all
 
 	# driver will mask off features it won't use
-	protocol = ofld
+	protocol = ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu
 
 	tp_l2t = 4096
 	tp_ddp = 2
+	tp_ddp_iscsi = 2
+	tp_stag = 2
+	tp_pbl = 5
+	tp_rq = 7
 
 	# TCAM has 8K cells; each region must start at a multiple of 128 cell.
 	# Each entry in these categories takes 4 cells each.  nhash will use the
@@ -83,11 +114,13 @@
 # Not used right now.
 [function "5"]
 	nvi = 1
+	rssnvi = 0
 
 # PF6 is the FCoE Controller PF. It gets 32 MSI/40 MSI-X vectors.
 # Not used right now.
 [function "6"]
 	nvi = 1
+	rssnvi = 0
 
 # The following function, 1023, is not an actual PCIE function but is used to
 # configure and reserve firmware internal resources that come from the global
@@ -96,6 +129,7 @@
 	wx_caps = all
 	r_caps = all
 	nvi = 4
+	rssnvi = 0
 	cmask = all
 	pmask = all
 	nexactf = 8
@@ -137,7 +171,7 @@
 
 [fini]
 	version = 0x1
-	checksum = 0xfdebb6ef
+	checksum = 0x76b034e0
 #
-# $FreeBSD: stable/9/sys/dev/cxgbe/firmware/t4fw_cfg.txt 247434 2013-02-28 00:44:54Z np $
+# $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t4fw_cfg.txt 308313 2016-11-04 20:38:26Z jhb $
 #


Property changes on: trunk/sys/dev/cxgbe/firmware/t4fw_cfg.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,6 +1,6 @@
 # Chelsio T4 Factory Default configuration file.
 #
-# Copyright (C) 2010-2012 Chelsio Communications.  All rights reserved.
+# Copyright (C) 2010-2017 Chelsio Communications.  All rights reserved.
 #
 #   DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES.  MODIFICATION OF
 #   THIS FILE WILL RESULT IN A NON-FUNCTIONAL T4 ADAPTER AND MAY RESULT
@@ -109,28 +109,49 @@
 	reg[0x10a8] = 0x2000/0x2000	# SGE_DOORBELL_CONTROL
 	sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
 
-	reg[0x7dc0] = 0x64f8849		# TP_SHIFT_CNT
+	# enable TP_OUT_CONFIG.IPIDSPLITMODE
+	reg[0x7d04] = 0x00010000/0x00010000
 
-	# Selection of tuples for LE filter lookup, fields (and widths which
-	# must sum to <= 36): { IP Fragment (1), MPS Match Type (3),
-	# IP Protocol (8), [Inner] VLAN (17), Port (3), FCoE (1) }
-	#
+	# disable TP_PARA_REG3.RxFragEn
+	reg[0x7d6c] = 0x00000000/0x00007000
+
+	reg[0x7dc0] = 0x0e2f8849		# TP_SHIFT_CNT
+
+	# TP_VLAN_PRI_MAP to select filter tuples
+	# filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+	#		  protocol, tos, vlan, vnic_id, port, fcoe
+	# valid filterModes are described the Terminator 4 Data Book
 	filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
 
+	# filter tuples enforced in LE active region (equal to or subset of filterMode)
+	filterMask = protocol, fcoe
+
 	# Percentage of dynamic memory (in either the EDRAM or external MEM)
 	# to use for TP RX payload
-	tp_pmrx = 30
+	tp_pmrx = 34 
 
 	# TP RX payload page size
 	tp_pmrx_pagesize = 64K
 
+	# TP number of RX channels
+	tp_nrxch = 0		# 0 (auto) = 1
+
 	# Percentage of dynamic memory (in either the EDRAM or external MEM)
 	# to use for TP TX payload
-	tp_pmtx = 50
+	tp_pmtx = 32
 
 	# TP TX payload page size
 	tp_pmtx_pagesize = 64K
 
+	# TP number of TX channels
+	tp_ntxch = 0		# 0 (auto) = equal number of ports
+
+	# TP OFLD MTUs
+	tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+	# ULPRX iSCSI Page Sizes
+	reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K 
+
 # Some "definitions" to make the rest of this a bit more readable.  We support
 # 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
 # per function per port ...
@@ -355,11 +376,11 @@
 	nhash = 12288		# number of hash region entries
 	protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu
 	tp_l2t = 3072
-	tp_ddp = 2
+	tp_ddp = 3
 	tp_ddp_iscsi = 2
-	tp_stag = 2
-	tp_pbl = 5
-	tp_rq = 7
+	tp_stag = 3
+	tp_pbl = 10
+	tp_rq = 13
 
 # We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
 # need to have Virtual Interfaces on each of the four ports with up to NCPUS
@@ -377,7 +398,7 @@
 	pmask = all		# access to all four ports ...
 	nserver = 16
 	nhash = 2048
-	tp_l2t = 1024
+	tp_l2t = 1020
 	protocol = iscsi_initiator_fofld
 	tp_ddp_iscsi = 2
 	iscsi_ntask = 2048
@@ -398,8 +419,9 @@
 	cmask = all		# access to all channels
 	pmask = all		# access to all four ports ...
 	nhash = 2048
+	tp_l2t = 4
 	protocol = fcoe_initiator
-	tp_ddp = 2
+	tp_ddp = 1
 	fcoe_nfcf = 16
 	fcoe_nvnp = 32
 	fcoe_nssn = 1024
@@ -481,6 +503,8 @@
 # dwm:		minimum delta between high and low watermark (in units of 100
 #		Bytes)
 #
+#
+
 [port "0"]
 	dcb = ppp, dcbx		# configure for DCB PPP and enable DCBX offload
 	bg_mem = 25
@@ -488,6 +512,9 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
 [port "1"]
 	dcb = ppp, dcbx
@@ -496,6 +523,9 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
 [port "2"]
 	dcb = ppp, dcbx
@@ -504,6 +534,9 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
 [port "3"]
 	dcb = ppp, dcbx
@@ -512,10 +545,13 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
 [fini]
-	version = 0x1425000b
-	checksum = 0x7690f7a5
+	version = 0x01000028
+	checksum = 0x5ceab421
 
 # Total resources used by above allocations:
 #   Virtual Interfaces: 104
@@ -525,5 +561,5 @@
 #   MSI-X Vectors: 736
 #   Virtual Functions: 64
 #
-# $FreeBSD: stable/9/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt 237925 2012-07-01 13:43:30Z np $
+# $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt 319270 2017-05-31 00:16:43Z np $
 #


Property changes on: trunk/sys/dev/cxgbe/firmware/t4fw_cfg_uwire.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/firmware/t4fw_interface.h
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t4fw_interface.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t4fw_interface.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2012 Chelsio Communications, Inc.
+ * Copyright (c) 2012-2017 Chelsio Communications, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/firmware/t4fw_interface.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t4fw_interface.h 331719 2018-03-29 01:20:58Z np $
  *
  */
 
@@ -73,19 +74,37 @@
 	FW_SCSI_OVER_FLOW_ERR   = 140,	/* */
 	FW_SCSI_DDP_ERR		= 141,	/* DDP error*/
 	FW_SCSI_TASK_ERR	= 142,	/* No SCSI tasks available */
+	FW_SCSI_IO_BLOCK	= 143,	/* IO is going to be blocked due to resource failure */
 };
 
 /******************************************************************************
+ *   M E M O R Y   T Y P E s
+ ******************************/
+
+enum fw_memtype {
+	FW_MEMTYPE_EDC0		= 0x0,
+	FW_MEMTYPE_EDC1		= 0x1,
+	FW_MEMTYPE_EXTMEM	= 0x2,
+	FW_MEMTYPE_FLASH	= 0x4,
+	FW_MEMTYPE_INTERNAL	= 0x5,
+	FW_MEMTYPE_EXTMEM1	= 0x6,
+	FW_MEMTYPE_HMA          = 0x7,
+};
+
+/******************************************************************************
  *   W O R K   R E Q U E S T s
  ********************************/
 
 enum fw_wr_opcodes {
+	FW_FRAG_WR		= 0x1d,
 	FW_FILTER_WR		= 0x02,
 	FW_ULPTX_WR		= 0x04,
 	FW_TP_WR		= 0x05,
 	FW_ETH_TX_PKT_WR	= 0x08,
+	FW_ETH_TX_PKT2_WR	= 0x44,
 	FW_ETH_TX_PKTS_WR	= 0x09,
-	FW_ETH_TX_UO_WR		= 0x1c,
+	FW_ETH_TX_PKTS2_WR	= 0x78,
+	FW_ETH_TX_EO_WR		= 0x1c,
 	FW_EQ_FLUSH_WR		= 0x1b,
 	FW_OFLD_CONNECTION_WR	= 0x2f,
 	FW_FLOWC_WR		= 0x0a,
@@ -99,6 +118,7 @@
 	FW_RI_RECV_WR		= 0x17,
 	FW_RI_BIND_MW_WR	= 0x18,
 	FW_RI_FR_NSMR_WR	= 0x19,
+	FW_RI_FR_NSMR_TPTE_WR	= 0x20,
 	FW_RI_INV_LSTAG_WR	= 0x1a,
 	FW_RI_SEND_IMMEDIATE_WR	= 0x15,
 	FW_RI_ATOMIC_WR		= 0x16,
@@ -118,7 +138,18 @@
 	FW_SCSI_TGT_RSP_WR	= 0x37,
 	FW_POFCOE_TCB_WR	= 0x42,
 	FW_POFCOE_ULPTX_WR	= 0x43,
-	FW_LASTC2E_WR		= 0x70
+	FW_ISCSI_TX_DATA_WR	= 0x45,
+	FW_PTP_TX_PKT_WR        = 0x46,
+	FW_TLSTX_DATA_WR	= 0x68,
+	FW_CRYPTO_LOOKASIDE_WR	= 0x6d,
+	FW_COISCSI_TGT_WR	= 0x70,
+	FW_COISCSI_TGT_CONN_WR	= 0x71,
+	FW_COISCSI_TGT_XMIT_WR	= 0x72,
+	FW_COISCSI_STATS_WR	 = 0x73,
+	FW_ISNS_WR		= 0x75,
+	FW_ISNS_XMIT_WR		= 0x76,
+	FW_FILTER2_WR		= 0x77,
+	FW_LASTC2E_WR		= 0x80
 };
 
 /*
@@ -203,6 +234,24 @@
 #define V_FW_WR_LEN16(x)	((x) << S_FW_WR_LEN16)
 #define G_FW_WR_LEN16(x)	(((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16)
 
+struct fw_frag_wr {
+	__be32 op_to_fragoff16;
+	__be32 flowid_len16;
+	__be64 r4;
+};
+
+#define S_FW_FRAG_WR_EOF	15
+#define M_FW_FRAG_WR_EOF	0x1
+#define V_FW_FRAG_WR_EOF(x)	((x) << S_FW_FRAG_WR_EOF)
+#define G_FW_FRAG_WR_EOF(x)	(((x) >> S_FW_FRAG_WR_EOF) & M_FW_FRAG_WR_EOF)
+#define F_FW_FRAG_WR_EOF	V_FW_FRAG_WR_EOF(1U)
+
+#define S_FW_FRAG_WR_FRAGOFF16		8
+#define M_FW_FRAG_WR_FRAGOFF16		0x7f
+#define V_FW_FRAG_WR_FRAGOFF16(x)	((x) << S_FW_FRAG_WR_FRAGOFF16)
+#define G_FW_FRAG_WR_FRAGOFF16(x)	\
+    (((x) >> S_FW_FRAG_WR_FRAGOFF16) & M_FW_FRAG_WR_FRAGOFF16)
+
 /* valid filter configurations for compressed tuple
  * Encodings: TPL - Compressed TUPLE for filter in addition to 4-tuple
  * FR - FRAGMENT, FC - FCoE, MT - MPS MATCH TYPE, M - MPS MATCH,
@@ -257,6 +306,17 @@
 	FW_FILTER_WR_EINVAL,
 };
 
+enum fw_filter_wr_nat_mode {
+	FW_FILTER_WR_NATMODE_NONE = 0,
+	FW_FILTER_WR_NATMODE_DIP ,
+	FW_FILTER_WR_NATMODE_DIPDP,
+	FW_FILTER_WR_NATMODE_DIPDPSIP,
+	FW_FILTER_WR_NATMODE_DIPDPSP,
+	FW_FILTER_WR_NATMODE_SIPSP,
+	FW_FILTER_WR_NATMODE_DIPSIPSP,
+	FW_FILTER_WR_NATMODE_FOURTUPLE,
+};
+
 struct fw_filter_wr {
 	__be32 op_pkd;
 	__be32 len16_pkd;
@@ -289,6 +349,51 @@
 	__u8   sma[6];
 };
 
+struct fw_filter2_wr {
+	__be32 op_pkd;
+	__be32 len16_pkd;
+	__be64 r3;
+	__be32 tid_to_iq;
+	__be32 del_filter_to_l2tix;
+	__be16 ethtype;
+	__be16 ethtypem;
+	__u8   frag_to_ovlan_vldm;
+	__u8   smac_sel;
+	__be16 rx_chan_rx_rpl_iq;
+	__be32 maci_to_matchtypem;
+	__u8   ptcl;
+	__u8   ptclm;
+	__u8   ttyp;
+	__u8   ttypm;
+	__be16 ivlan;
+	__be16 ivlanm;
+	__be16 ovlan;
+	__be16 ovlanm;
+	__u8   lip[16];
+	__u8   lipm[16];
+	__u8   fip[16];
+	__u8   fipm[16];
+	__be16 lp;
+	__be16 lpm;
+	__be16 fp;
+	__be16 fpm;
+	__be16 r7;
+	__u8   sma[6];
+	__be16 r8;
+	__u8   filter_type_swapmac;
+	__u8   natmode_to_ulp_type;
+	__be16 newlport;
+	__be16 newfport;
+	__u8   newlip[16];
+	__u8   newfip[16];
+	__be32 natseqcheck;
+	__be32 r9;
+	__be64 r10;
+	__be64 r11;
+	__be64 r12;
+	__be64 r13;
+};
+
 #define S_FW_FILTER_WR_TID	12
 #define M_FW_FILTER_WR_TID	0xfffff
 #define V_FW_FILTER_WR_TID(x)	((x) << S_FW_FILTER_WR_TID)
@@ -473,6 +578,39 @@
 #define G_FW_FILTER_WR_RX_RPL_IQ(x)	\
     (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ)
 
+#define S_FW_FILTER2_WR_FILTER_TYPE	1
+#define M_FW_FILTER2_WR_FILTER_TYPE	0x1
+#define V_FW_FILTER2_WR_FILTER_TYPE(x)	((x) << S_FW_FILTER2_WR_FILTER_TYPE)
+#define G_FW_FILTER2_WR_FILTER_TYPE(x)	\
+    (((x) >> S_FW_FILTER2_WR_FILTER_TYPE) & M_FW_FILTER2_WR_FILTER_TYPE)
+#define F_FW_FILTER2_WR_FILTER_TYPE	V_FW_FILTER2_WR_FILTER_TYPE(1U)
+
+#define S_FW_FILTER2_WR_SWAPMAC		0
+#define M_FW_FILTER2_WR_SWAPMAC		0x1
+#define V_FW_FILTER2_WR_SWAPMAC(x)	((x) << S_FW_FILTER2_WR_SWAPMAC)
+#define G_FW_FILTER2_WR_SWAPMAC(x)	\
+    (((x) >> S_FW_FILTER2_WR_SWAPMAC) & M_FW_FILTER2_WR_SWAPMAC)
+#define F_FW_FILTER2_WR_SWAPMAC		V_FW_FILTER2_WR_SWAPMAC(1U)
+
+#define S_FW_FILTER2_WR_NATMODE		5
+#define M_FW_FILTER2_WR_NATMODE		0x7
+#define V_FW_FILTER2_WR_NATMODE(x)	((x) << S_FW_FILTER2_WR_NATMODE)
+#define G_FW_FILTER2_WR_NATMODE(x)	\
+    (((x) >> S_FW_FILTER2_WR_NATMODE) & M_FW_FILTER2_WR_NATMODE)
+
+#define S_FW_FILTER2_WR_NATFLAGCHECK	4
+#define M_FW_FILTER2_WR_NATFLAGCHECK	0x1
+#define V_FW_FILTER2_WR_NATFLAGCHECK(x)	((x) << S_FW_FILTER2_WR_NATFLAGCHECK)
+#define G_FW_FILTER2_WR_NATFLAGCHECK(x)	\
+    (((x) >> S_FW_FILTER2_WR_NATFLAGCHECK) & M_FW_FILTER2_WR_NATFLAGCHECK)
+#define F_FW_FILTER2_WR_NATFLAGCHECK	V_FW_FILTER2_WR_NATFLAGCHECK(1U)
+
+#define S_FW_FILTER2_WR_ULP_TYPE	0
+#define M_FW_FILTER2_WR_ULP_TYPE	0xf
+#define V_FW_FILTER2_WR_ULP_TYPE(x)	((x) << S_FW_FILTER2_WR_ULP_TYPE)
+#define G_FW_FILTER2_WR_ULP_TYPE(x)	\
+    (((x) >> S_FW_FILTER2_WR_ULP_TYPE) & M_FW_FILTER2_WR_ULP_TYPE)
+
 #define S_FW_FILTER_WR_MACI	23
 #define M_FW_FILTER_WR_MACI	0x1ff
 #define V_FW_FILTER_WR_MACI(x)	((x) << S_FW_FILTER_WR_MACI)
@@ -547,6 +685,64 @@
 #define G_FW_ETH_TX_PKT_WR_IMMDLEN(x)	\
     (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN)
 
+struct fw_eth_tx_pkt2_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be32 r3;
+	__be32 L4ChkDisable_to_IpHdrLen;
+};
+
+#define S_FW_ETH_TX_PKT2_WR_IMMDLEN	0
+#define M_FW_ETH_TX_PKT2_WR_IMMDLEN	0x1ff
+#define V_FW_ETH_TX_PKT2_WR_IMMDLEN(x)	((x) << S_FW_ETH_TX_PKT2_WR_IMMDLEN)
+#define G_FW_ETH_TX_PKT2_WR_IMMDLEN(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_IMMDLEN) & M_FW_ETH_TX_PKT2_WR_IMMDLEN)
+
+#define S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE	31
+#define M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE	0x1
+#define V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x)	\
+    ((x) << S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE)
+#define G_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_L4CHKDISABLE) & \
+     M_FW_ETH_TX_PKT2_WR_L4CHKDISABLE)
+#define F_FW_ETH_TX_PKT2_WR_L4CHKDISABLE	\
+    V_FW_ETH_TX_PKT2_WR_L4CHKDISABLE(1U)
+
+#define S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE	30
+#define M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE	0x1
+#define V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x)	\
+    ((x) << S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE)
+#define G_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_L3CHKDISABLE) & \
+     M_FW_ETH_TX_PKT2_WR_L3CHKDISABLE)
+#define F_FW_ETH_TX_PKT2_WR_L3CHKDISABLE	\
+    V_FW_ETH_TX_PKT2_WR_L3CHKDISABLE(1U)
+
+#define S_FW_ETH_TX_PKT2_WR_IVLAN	28
+#define M_FW_ETH_TX_PKT2_WR_IVLAN	0x1
+#define V_FW_ETH_TX_PKT2_WR_IVLAN(x)	((x) << S_FW_ETH_TX_PKT2_WR_IVLAN)
+#define G_FW_ETH_TX_PKT2_WR_IVLAN(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_IVLAN) & M_FW_ETH_TX_PKT2_WR_IVLAN)
+#define F_FW_ETH_TX_PKT2_WR_IVLAN	V_FW_ETH_TX_PKT2_WR_IVLAN(1U)
+
+#define S_FW_ETH_TX_PKT2_WR_IVLANTAG	12
+#define M_FW_ETH_TX_PKT2_WR_IVLANTAG	0xffff
+#define V_FW_ETH_TX_PKT2_WR_IVLANTAG(x)	((x) << S_FW_ETH_TX_PKT2_WR_IVLANTAG)
+#define G_FW_ETH_TX_PKT2_WR_IVLANTAG(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_IVLANTAG) & M_FW_ETH_TX_PKT2_WR_IVLANTAG)
+
+#define S_FW_ETH_TX_PKT2_WR_CHKTYPE	8
+#define M_FW_ETH_TX_PKT2_WR_CHKTYPE	0xf
+#define V_FW_ETH_TX_PKT2_WR_CHKTYPE(x)	((x) << S_FW_ETH_TX_PKT2_WR_CHKTYPE)
+#define G_FW_ETH_TX_PKT2_WR_CHKTYPE(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_CHKTYPE) & M_FW_ETH_TX_PKT2_WR_CHKTYPE)
+
+#define S_FW_ETH_TX_PKT2_WR_IPHDRLEN	0
+#define M_FW_ETH_TX_PKT2_WR_IPHDRLEN	0xff
+#define V_FW_ETH_TX_PKT2_WR_IPHDRLEN(x)	((x) << S_FW_ETH_TX_PKT2_WR_IPHDRLEN)
+#define G_FW_ETH_TX_PKT2_WR_IPHDRLEN(x)	\
+    (((x) >> S_FW_ETH_TX_PKT2_WR_IPHDRLEN) & M_FW_ETH_TX_PKT2_WR_IPHDRLEN)
+
 struct fw_eth_tx_pkts_wr {
 	__be32 op_pkd;
 	__be32 equiq_to_len16;
@@ -556,21 +752,105 @@
 	__u8   type;
 };
 
-struct fw_eth_tx_uo_wr {
+#define S_FW_PTP_TX_PKT_WR_IMMDLEN      0
+#define M_FW_PTP_TX_PKT_WR_IMMDLEN      0x1ff
+#define V_FW_PTP_TX_PKT_WR_IMMDLEN(x)   ((x) << S_FW_PTP_TX_PKT_WR_IMMDLEN)
+#define G_FW_PTP_TX_PKT_WR_IMMDLEN(x)   \
+    (((x) >> S_FW_PTP_TX_PKT_WR_IMMDLEN) & M_FW_PTP_TX_PKT_WR_IMMDLEN)
+
+struct fw_eth_tx_pkt_ptp_wr {
 	__be32 op_immdlen;
 	__be32 equiq_to_len16;
 	__be64 r3;
-	__u8   r4;
-	__u8   ethlen;
-	__be16 iplen;
-	__u8   udplen;
-	__u8   rtplen;
-	__be16 r5;
-	__be16 mss;
-	__be16 schedpktsize;
-	__be32 length;
 };
 
+enum fw_eth_tx_eo_type {
+	FW_ETH_TX_EO_TYPE_UDPSEG,
+	FW_ETH_TX_EO_TYPE_TCPSEG,
+	FW_ETH_TX_EO_TYPE_NVGRESEG,
+	FW_ETH_TX_EO_TYPE_VXLANSEG,
+	FW_ETH_TX_EO_TYPE_GENEVESEG,
+};
+
+struct fw_eth_tx_eo_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be64 r3;
+	union fw_eth_tx_eo {
+		struct fw_eth_tx_eo_udpseg {
+			__u8   type;
+			__u8   ethlen;
+			__be16 iplen;
+			__u8   udplen;
+			__u8   rtplen;
+			__be16 r4;
+			__be16 mss;
+			__be16 schedpktsize;
+			__be32 plen;
+		} udpseg;
+		struct fw_eth_tx_eo_tcpseg {
+			__u8   type;
+			__u8   ethlen;
+			__be16 iplen;
+			__u8   tcplen;
+			__u8   tsclk_tsoff;
+			__be16 r4;
+			__be16 mss;
+			__be16 r5;
+			__be32 plen;
+		} tcpseg;
+		struct fw_eth_tx_eo_nvgreseg {
+			__u8   type;
+			__u8   iphdroffout;
+			__be16 grehdroff;
+			__be16 iphdroffin;
+			__be16 tcphdroffin;
+			__be16 mss;
+			__be16 r4;
+			__be32 plen;
+		} nvgreseg;
+		struct fw_eth_tx_eo_vxlanseg {
+			__u8   type;
+			__u8   iphdroffout;
+			__be16 vxlanhdroff;
+			__be16 iphdroffin;
+			__be16 tcphdroffin;
+			__be16 mss;
+			__be16 r4;
+			__be32 plen;
+
+		} vxlanseg;
+		struct fw_eth_tx_eo_geneveseg {
+			__u8   type;
+			__u8   iphdroffout;
+			__be16 genevehdroff;
+			__be16 iphdroffin;
+			__be16 tcphdroffin;
+			__be16 mss;
+			__be16 r4;
+			__be32 plen;
+		} geneveseg;
+	} u;
+};
+
+#define S_FW_ETH_TX_EO_WR_IMMDLEN	0
+#define M_FW_ETH_TX_EO_WR_IMMDLEN	0x1ff
+#define V_FW_ETH_TX_EO_WR_IMMDLEN(x)	((x) << S_FW_ETH_TX_EO_WR_IMMDLEN)
+#define G_FW_ETH_TX_EO_WR_IMMDLEN(x)	\
+    (((x) >> S_FW_ETH_TX_EO_WR_IMMDLEN) & M_FW_ETH_TX_EO_WR_IMMDLEN)
+
+#define S_FW_ETH_TX_EO_WR_TSCLK		6
+#define M_FW_ETH_TX_EO_WR_TSCLK		0x3
+#define V_FW_ETH_TX_EO_WR_TSCLK(x)	((x) << S_FW_ETH_TX_EO_WR_TSCLK)
+#define G_FW_ETH_TX_EO_WR_TSCLK(x)	\
+    (((x) >> S_FW_ETH_TX_EO_WR_TSCLK) & M_FW_ETH_TX_EO_WR_TSCLK)
+
+#define S_FW_ETH_TX_EO_WR_TSOFF		0
+#define M_FW_ETH_TX_EO_WR_TSOFF		0x3f
+#define V_FW_ETH_TX_EO_WR_TSOFF(x)	((x) << S_FW_ETH_TX_EO_WR_TSOFF)
+#define G_FW_ETH_TX_EO_WR_TSOFF(x)	\
+    (((x) >> S_FW_ETH_TX_EO_WR_TSOFF) & M_FW_ETH_TX_EO_WR_TSOFF)
+
 struct fw_eq_flush_wr {
 	__u8   opcode;
 	__u8   r1[3];
@@ -703,13 +983,13 @@
 	FW_FLOWC_MNEM_TCPSTATE_TIMEWAIT	= 10, /* not expected */
 };
 
-enum fw_flowc_mnem_uostate {
-	FW_FLOWC_MNEM_UOSTATE_CLOSED	= 0, /* illegal */
-	FW_FLOWC_MNEM_UOSTATE_ESTABLISHED = 1, /* default */
-	FW_FLOWC_MNEM_UOSTATE_CLOSING	= 2, /* graceful close, after sending
+enum fw_flowc_mnem_eostate {
+	FW_FLOWC_MNEM_EOSTATE_CLOSED	= 0, /* illegal */
+	FW_FLOWC_MNEM_EOSTATE_ESTABLISHED = 1, /* default */
+	FW_FLOWC_MNEM_EOSTATE_CLOSING	= 2, /* graceful close, after sending
 					      * outstanding payload
 					      */
-	FW_FLOWC_MNEM_UOSTATE_ABORTING	= 3, /* immediate close, after
+	FW_FLOWC_MNEM_EOSTATE_ABORTING	= 3, /* immediate close, after
 					      * discarding outstanding payload
 					      */
 };
@@ -725,9 +1005,13 @@
 	FW_FLOWC_MNEM_MSS		= 7,
 	FW_FLOWC_MNEM_TXDATAPLEN_MAX	= 8,
 	FW_FLOWC_MNEM_TCPSTATE		= 9,
-	FW_FLOWC_MNEM_UOSTATE		= 10,
+	FW_FLOWC_MNEM_EOSTATE		= 10,
 	FW_FLOWC_MNEM_SCHEDCLASS	= 11,
 	FW_FLOWC_MNEM_DCBPRIO		= 12,
+	FW_FLOWC_MNEM_SND_SCALE		= 13,
+	FW_FLOWC_MNEM_RCV_SCALE		= 14,
+	FW_FLOWC_MNEM_ULP_MODE		= 15,
+	FW_FLOWC_MNEM_MAX		= 16,
 };
 
 struct fw_flowc_mnemval {
@@ -754,72 +1038,98 @@
 	__be32 op_to_immdlen;
 	__be32 flowid_len16;
 	__be32 plen;
-	__be32 tunnel_to_proxy;
+	__be32 lsodisable_to_flags;
 };
 
-#define S_FW_OFLD_TX_DATA_WR_TUNNEL	19
-#define M_FW_OFLD_TX_DATA_WR_TUNNEL	0x1
-#define V_FW_OFLD_TX_DATA_WR_TUNNEL(x)	((x) << S_FW_OFLD_TX_DATA_WR_TUNNEL)
-#define G_FW_OFLD_TX_DATA_WR_TUNNEL(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_TUNNEL) & M_FW_OFLD_TX_DATA_WR_TUNNEL)
-#define F_FW_OFLD_TX_DATA_WR_TUNNEL	V_FW_OFLD_TX_DATA_WR_TUNNEL(1U)
+#define S_FW_OFLD_TX_DATA_WR_LSODISABLE		31
+#define M_FW_OFLD_TX_DATA_WR_LSODISABLE		0x1
+#define V_FW_OFLD_TX_DATA_WR_LSODISABLE(x)	\
+    ((x) << S_FW_OFLD_TX_DATA_WR_LSODISABLE)
+#define G_FW_OFLD_TX_DATA_WR_LSODISABLE(x)	\
+    (((x) >> S_FW_OFLD_TX_DATA_WR_LSODISABLE) & \
+     M_FW_OFLD_TX_DATA_WR_LSODISABLE)
+#define F_FW_OFLD_TX_DATA_WR_LSODISABLE	V_FW_OFLD_TX_DATA_WR_LSODISABLE(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_SAVE	18
-#define M_FW_OFLD_TX_DATA_WR_SAVE	0x1
-#define V_FW_OFLD_TX_DATA_WR_SAVE(x)	((x) << S_FW_OFLD_TX_DATA_WR_SAVE)
-#define G_FW_OFLD_TX_DATA_WR_SAVE(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_SAVE) & M_FW_OFLD_TX_DATA_WR_SAVE)
-#define F_FW_OFLD_TX_DATA_WR_SAVE	V_FW_OFLD_TX_DATA_WR_SAVE(1U)
+#define S_FW_OFLD_TX_DATA_WR_ALIGNPLD		30
+#define M_FW_OFLD_TX_DATA_WR_ALIGNPLD		0x1
+#define V_FW_OFLD_TX_DATA_WR_ALIGNPLD(x)	\
+    ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLD)
+#define G_FW_OFLD_TX_DATA_WR_ALIGNPLD(x)	\
+    (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLD) & M_FW_OFLD_TX_DATA_WR_ALIGNPLD)
+#define F_FW_OFLD_TX_DATA_WR_ALIGNPLD	V_FW_OFLD_TX_DATA_WR_ALIGNPLD(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_FLUSH	17
-#define M_FW_OFLD_TX_DATA_WR_FLUSH	0x1
-#define V_FW_OFLD_TX_DATA_WR_FLUSH(x)	((x) << S_FW_OFLD_TX_DATA_WR_FLUSH)
-#define G_FW_OFLD_TX_DATA_WR_FLUSH(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_FLUSH) & M_FW_OFLD_TX_DATA_WR_FLUSH)
-#define F_FW_OFLD_TX_DATA_WR_FLUSH	V_FW_OFLD_TX_DATA_WR_FLUSH(1U)
+#define S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE	29
+#define M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE	0x1
+#define V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x)	\
+    ((x) << S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)
+#define G_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(x)	\
+    (((x) >> S_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE) & \
+     M_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE)
+#define F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE	\
+    V_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_URGENT	16
-#define M_FW_OFLD_TX_DATA_WR_URGENT	0x1
-#define V_FW_OFLD_TX_DATA_WR_URGENT(x)	((x) << S_FW_OFLD_TX_DATA_WR_URGENT)
-#define G_FW_OFLD_TX_DATA_WR_URGENT(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_URGENT) & M_FW_OFLD_TX_DATA_WR_URGENT)
-#define F_FW_OFLD_TX_DATA_WR_URGENT	V_FW_OFLD_TX_DATA_WR_URGENT(1U)
+#define S_FW_OFLD_TX_DATA_WR_FLAGS	0
+#define M_FW_OFLD_TX_DATA_WR_FLAGS	0xfffffff
+#define V_FW_OFLD_TX_DATA_WR_FLAGS(x)	((x) << S_FW_OFLD_TX_DATA_WR_FLAGS)
+#define G_FW_OFLD_TX_DATA_WR_FLAGS(x)	\
+    (((x) >> S_FW_OFLD_TX_DATA_WR_FLAGS) & M_FW_OFLD_TX_DATA_WR_FLAGS)
 
-#define S_FW_OFLD_TX_DATA_WR_MORE	15
-#define M_FW_OFLD_TX_DATA_WR_MORE	0x1
-#define V_FW_OFLD_TX_DATA_WR_MORE(x)	((x) << S_FW_OFLD_TX_DATA_WR_MORE)
-#define G_FW_OFLD_TX_DATA_WR_MORE(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_MORE) & M_FW_OFLD_TX_DATA_WR_MORE)
-#define F_FW_OFLD_TX_DATA_WR_MORE	V_FW_OFLD_TX_DATA_WR_MORE(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_SHOVE	14
-#define M_FW_OFLD_TX_DATA_WR_SHOVE	0x1
-#define V_FW_OFLD_TX_DATA_WR_SHOVE(x)	((x) << S_FW_OFLD_TX_DATA_WR_SHOVE)
-#define G_FW_OFLD_TX_DATA_WR_SHOVE(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_SHOVE) & M_FW_OFLD_TX_DATA_WR_SHOVE)
-#define F_FW_OFLD_TX_DATA_WR_SHOVE	V_FW_OFLD_TX_DATA_WR_SHOVE(1U)
+/* Use fw_ofld_tx_data_wr structure */
+#define S_FW_ISCSI_TX_DATA_WR_FLAGS_HI		10
+#define M_FW_ISCSI_TX_DATA_WR_FLAGS_HI		0x3fffff
+#define V_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_HI)
+#define G_FW_ISCSI_TX_DATA_WR_FLAGS_HI(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_HI) & M_FW_ISCSI_TX_DATA_WR_FLAGS_HI)
 
-#define S_FW_OFLD_TX_DATA_WR_ULPMODE	10
-#define M_FW_OFLD_TX_DATA_WR_ULPMODE	0xf
-#define V_FW_OFLD_TX_DATA_WR_ULPMODE(x)	((x) << S_FW_OFLD_TX_DATA_WR_ULPMODE)
-#define G_FW_OFLD_TX_DATA_WR_ULPMODE(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_ULPMODE) & M_FW_OFLD_TX_DATA_WR_ULPMODE)
+#define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO	9
+#define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO	0x1
+#define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO)
+#define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO) & \
+     M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO)
+#define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO	\
+    V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_ISO(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_ULPSUBMODE		6
-#define M_FW_OFLD_TX_DATA_WR_ULPSUBMODE		0xf
-#define V_FW_OFLD_TX_DATA_WR_ULPSUBMODE(x)	\
-    ((x) << S_FW_OFLD_TX_DATA_WR_ULPSUBMODE)
-#define G_FW_OFLD_TX_DATA_WR_ULPSUBMODE(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_ULPSUBMODE) & \
-     M_FW_OFLD_TX_DATA_WR_ULPSUBMODE)
+#define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI	8
+#define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI	0x1
+#define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI)
+#define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI) & \
+     M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI)
+#define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI	\
+    V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_PI(1U)
 
-#define S_FW_OFLD_TX_DATA_WR_PROXY	5
-#define M_FW_OFLD_TX_DATA_WR_PROXY	0x1
-#define V_FW_OFLD_TX_DATA_WR_PROXY(x)	((x) << S_FW_OFLD_TX_DATA_WR_PROXY)
-#define G_FW_OFLD_TX_DATA_WR_PROXY(x)	\
-    (((x) >> S_FW_OFLD_TX_DATA_WR_PROXY) & M_FW_OFLD_TX_DATA_WR_PROXY)
-#define F_FW_OFLD_TX_DATA_WR_PROXY	V_FW_OFLD_TX_DATA_WR_PROXY(1U)
+#define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC		7
+#define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC		0x1
+#define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC) & \
+     M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC	\
+    V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_DCRC(1U)
 
+#define S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC		6
+#define M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC		0x1
+#define V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define G_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC) & \
+     M_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define F_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC	\
+    V_FW_ISCSI_TX_DATA_WR_ULPSUBMODE_HCRC(1U)
+
+#define S_FW_ISCSI_TX_DATA_WR_FLAGS_LO		0
+#define M_FW_ISCSI_TX_DATA_WR_FLAGS_LO		0x3f
+#define V_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x)	\
+    ((x) << S_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+#define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x)	\
+    (((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+
 struct fw_cmd_wr {
 	__be32 op_dma;
 	__be32 len16_pkd;
@@ -872,7 +1182,8 @@
 	FW_RI_FAST_REGISTER		= 0xd,
 	FW_RI_LOCAL_INV			= 0xe,
 #endif
-	FW_RI_SGE_EC_CR_RETURN		= 0xf
+	FW_RI_SGE_EC_CR_RETURN		= 0xf,
+	FW_RI_WRITE_IMMEDIATE	= FW_RI_RDMA_INIT,
 };
 
 enum fw_ri_wr_flags {
@@ -881,7 +1192,8 @@
 	FW_RI_SOLICITED_EVENT_FLAG	= 0x04,
 	FW_RI_READ_FENCE_FLAG		= 0x08,
 	FW_RI_LOCAL_FENCE_FLAG		= 0x10,
-	FW_RI_RDMA_READ_INVALIDATE	= 0x20
+	FW_RI_RDMA_READ_INVALIDATE	= 0x20,
+	FW_RI_RDMA_WRITE_WITH_IMMEDIATE	= 0x40
 };
 
 enum fw_ri_mpa_attrs {
@@ -1124,7 +1436,7 @@
 		struct fw_ri_scqe {
 		__be32	qpid_n_stat_rxtx_type;
 		__be32	plen;
-		__be32	reserved;
+		__be32	stag;
 		__be32	wrid;
 		} scqe;
 		struct fw_ri_rcqe {
@@ -1133,6 +1445,13 @@
 		__be32	stag;
 		__be32	msn;
 		} rcqe;
+		struct fw_ri_rcqe_imm {
+		__be32	qpid_n_stat_rxtx_type;
+		__be32	plen;
+		__be32	mo;
+		__be32	msn;
+		__u64	imm_data;
+		} imm_data_rcqe;
 	} u;
 };
 
@@ -1171,6 +1490,7 @@
 	FW_RI_RES_TYPE_SQ,
 	FW_RI_RES_TYPE_RQ,
 	FW_RI_RES_TYPE_CQ,
+	FW_RI_RES_TYPE_SRQ,
 };
 
 enum fw_ri_res_op {
@@ -1204,6 +1524,20 @@
 			__be32 r6_lo;
 			__be64 r7;
 		} cq;
+		struct fw_ri_res_srq {
+			__u8   restype;
+			__u8   op;
+			__be16 r3;
+			__be32 eqid;
+			__be32 r4[2];
+			__be32 fetchszm_to_iqid;
+			__be32 dcaen_to_eqsize;
+			__be64 eqaddr;
+			__be32 srqid;
+			__be32 pdid;
+			__be32 hwsrqsize;
+			__be32 hwsrqaddr;
+		} srq;
 	} u;
 };
 
@@ -1216,6 +1550,12 @@
 #endif
 };
 
+#define S_FW_RI_RES_WR_VFN		8
+#define M_FW_RI_RES_WR_VFN		0xff
+#define V_FW_RI_RES_WR_VFN(x)		((x) << S_FW_RI_RES_WR_VFN)
+#define G_FW_RI_RES_WR_VFN(x)		\
+    (((x) >> S_FW_RI_RES_WR_VFN) & M_FW_RI_RES_WR_VFN)
+
 #define S_FW_RI_RES_WR_NRES	0
 #define M_FW_RI_RES_WR_NRES	0xff
 #define V_FW_RI_RES_WR_NRES(x)	((x) << S_FW_RI_RES_WR_NRES)
@@ -1439,7 +1779,7 @@
 	__u16  wrid;
 	__u8   r1[3];
 	__u8   len16;
-	__be64 r2;
+	__u64  immd_data;
 	__be32 plen;
 	__be32 stag_sink;
 	__be64 to_sink;
@@ -1577,6 +1917,18 @@
 #define G_FW_RI_FR_NSMR_WR_DCACPU(x)	\
     (((x) >> S_FW_RI_FR_NSMR_WR_DCACPU) & M_FW_RI_FR_NSMR_WR_DCACPU)
 
+struct fw_ri_fr_nsmr_tpte_wr {
+	__u8   opcode;
+	__u8   flags;
+	__u16  wrid;
+	__u8   r1[3];
+	__u8   len16;
+	__be32 r2;
+	__be32 stag;
+	struct fw_ri_tpte tpte;
+	__be64 pbl[2];
+};
+
 struct fw_ri_inv_lstag_wr {
 	__u8   opcode;
 	__u8   flags;
@@ -1669,6 +2021,10 @@
 	FW_RI_INIT_P2PTYPE_DISABLED		= 0xf,
 };
 
+enum fw_ri_init_rqeqid_srq {
+	FW_RI_INIT_RQEQID_SRQ			= 1 << 31,
+};
+
 struct fw_ri_wr {
 	__be32 op_compl;
 	__be32 flowid_len16;
@@ -1740,15 +2096,17 @@
 
 #define	FW_FOISCSI_NAME_MAX_LEN		224
 #define	FW_FOISCSI_ALIAS_MAX_LEN	224
+#define	FW_FOISCSI_KEY_MAX_LEN	64
+#define	FW_FOISCSI_VAL_MAX_LEN	256
 #define FW_FOISCSI_CHAP_SEC_MAX_LEN	128
 #define	FW_FOISCSI_INIT_NODE_MAX	8
 
 enum fw_chnet_ifconf_wr_subop {
 	FW_CHNET_IFCONF_WR_SUBOP_NONE = 0,
-	
+
 	FW_CHNET_IFCONF_WR_SUBOP_IPV4_SET,
 	FW_CHNET_IFCONF_WR_SUBOP_IPV4_GET,
-	
+
 	FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_SET,
 	FW_CHNET_IFCONF_WR_SUBOP_VLAN_IPV4_GET,
 
@@ -1764,6 +2122,16 @@
 	FW_CHNET_IFCONF_WR_SUBOP_DHCP_SET,
 	FW_CHNET_IFCONF_WR_SUBOP_DHCP_GET,
 
+	FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_SET,
+	FW_CHNET_IFCONF_WR_SUBOP_DHCPV6_GET,
+
+	FW_CHNET_IFCONF_WR_SUBOP_LINKLOCAL_ADDR_SET,
+	FW_CHNET_IFCONF_WR_SUBOP_RA_BASED_ADDR_SET,
+	FW_CHNET_IFCONF_WR_SUBOP_ADDR_EXPIRED,
+
+	FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING4,
+	FW_CHNET_IFCONF_WR_SUBOP_ICMP_PING6,
+
 	FW_CHNET_IFCONF_WR_SUBOP_MAX,
 };
 
@@ -1770,13 +2138,27 @@
 struct fw_chnet_ifconf_wr {
 	__be32 op_compl;
 	__be32 flowid_len16;
-	__be64 cookie;
+	__u64  cookie;
 	__be32 if_flowid;
 	__u8   idx;
 	__u8   subop;
 	__u8   retval;
 	__u8   r2;
-	__be64 r3;
+	union {
+		__be64 r3;
+		struct fw_chnet_ifconf_ping {
+			__be16 ping_time;
+			__u8   ping_rsptype;
+			__u8   ping_param_rspcode_to_fin_bit;
+			__u8   ping_pktsize;
+			__u8   ping_ttl;
+			__be16 ping_seq;
+		} ping;
+		struct fw_chnet_ifconf_mac {
+			__u8   peer_mac[6];
+			__u8   smac_idx;
+		} mac;
+	} u;
 	struct fw_chnet_ifconf_params {
 		__be32 r0;
 		__be16 vlanid;
@@ -1790,22 +2172,36 @@
 				__be64 r1;
 			} ipv4;
 			struct fw_chnet_ifconf_ipv6 {
-				__be64 linklocal_lo;
-				__be64 linklocal_hi;
+				__u8   prefix_len;
+				__u8   r0;
+				__be16 r1;
+				__be32 r2;
+				__be64 addr_hi;
+				__be64 addr_lo;
 				__be64 router_hi;
 				__be64 router_lo;
-				__be64 aconf_hi;
-				__be64 aconf_lo;
-				__be64 linklocal_aconf_hi;
-				__be64 linklocal_aconf_lo;
-				__be64 router_aconf_hi;
-				__be64 router_aconf_lo;
-				__be64 r0;
 			} ipv6;
 		} in_attr;
 	} param;
 };
 
+#define S_FW_CHNET_IFCONF_WR_PING_MACBIT	1
+#define M_FW_CHNET_IFCONF_WR_PING_MACBIT	0x1
+#define V_FW_CHNET_IFCONF_WR_PING_MACBIT(x)	\
+    ((x) << S_FW_CHNET_IFCONF_WR_PING_MACBIT)
+#define G_FW_CHNET_IFCONF_WR_PING_MACBIT(x)	\
+    (((x) >> S_FW_CHNET_IFCONF_WR_PING_MACBIT) & \
+     M_FW_CHNET_IFCONF_WR_PING_MACBIT)
+#define F_FW_CHNET_IFCONF_WR_PING_MACBIT	\
+    V_FW_CHNET_IFCONF_WR_PING_MACBIT(1U)
+
+#define S_FW_CHNET_IFCONF_WR_FIN_BIT	0
+#define M_FW_CHNET_IFCONF_WR_FIN_BIT	0x1
+#define V_FW_CHNET_IFCONF_WR_FIN_BIT(x)	((x) << S_FW_CHNET_IFCONF_WR_FIN_BIT)
+#define G_FW_CHNET_IFCONF_WR_FIN_BIT(x)	\
+    (((x) >> S_FW_CHNET_IFCONF_WR_FIN_BIT) & M_FW_CHNET_IFCONF_WR_FIN_BIT)
+#define F_FW_CHNET_IFCONF_WR_FIN_BIT	V_FW_CHNET_IFCONF_WR_FIN_BIT(1U)
+
 enum fw_foiscsi_node_type {
 	FW_FOISCSI_NODE_TYPE_INITIATOR = 0,
 	FW_FOISCSI_NODE_TYPE_TARGET,
@@ -1841,6 +2237,13 @@
 	FW_FOISCSI_WR_SUBOP_MOD = 4,
 };
 
+enum fw_coiscsi_stats_wr_subop {
+	FW_COISCSI_WR_SUBOP_TOT = 1,
+	FW_COISCSI_WR_SUBOP_MAX = 2,
+	FW_COISCSI_WR_SUBOP_CUR = 3,
+	FW_COISCSI_WR_SUBOP_CLR = 4,
+};
+
 enum fw_foiscsi_ctrl_state {
 	FW_FOISCSI_CTRL_STATE_FREE = 0,
 	FW_FOISCSI_CTRL_STATE_ONLINE = 1,
@@ -2130,7 +2533,7 @@
 
 struct fw_foiscsi_node_wr {
 	__be32 op_to_immdlen;
-	__be32 flowid_len16;
+	__be32 no_sess_recv_to_len16;
 	__u64  cookie;
 	__u8   subop;
 	__u8   status;
@@ -2143,6 +2546,7 @@
 	__be16 r3;
 	__u8   iqn[224];
 	__u8   alias[224];
+	__be32 isid_tval_to_isid_cval;
 };
 
 #define S_FW_FOISCSI_NODE_WR_IMMDLEN	0
@@ -2151,8 +2555,46 @@
 #define G_FW_FOISCSI_NODE_WR_IMMDLEN(x)	\
     (((x) >> S_FW_FOISCSI_NODE_WR_IMMDLEN) & M_FW_FOISCSI_NODE_WR_IMMDLEN)
 
+#define S_FW_FOISCSI_NODE_WR_NO_SESS_RECV	28
+#define M_FW_FOISCSI_NODE_WR_NO_SESS_RECV	0x1
+#define V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x)	\
+    ((x) << S_FW_FOISCSI_NODE_WR_NO_SESS_RECV)
+#define G_FW_FOISCSI_NODE_WR_NO_SESS_RECV(x)	\
+    (((x) >> S_FW_FOISCSI_NODE_WR_NO_SESS_RECV) & \
+     M_FW_FOISCSI_NODE_WR_NO_SESS_RECV)
+#define F_FW_FOISCSI_NODE_WR_NO_SESS_RECV	\
+    V_FW_FOISCSI_NODE_WR_NO_SESS_RECV(1U)
+
+#define S_FW_FOISCSI_NODE_WR_ISID_TVAL		30
+#define M_FW_FOISCSI_NODE_WR_ISID_TVAL		0x3
+#define V_FW_FOISCSI_NODE_WR_ISID_TVAL(x)	\
+    ((x) << S_FW_FOISCSI_NODE_WR_ISID_TVAL)
+#define G_FW_FOISCSI_NODE_WR_ISID_TVAL(x)	\
+    (((x) >> S_FW_FOISCSI_NODE_WR_ISID_TVAL) & M_FW_FOISCSI_NODE_WR_ISID_TVAL)
+
+#define S_FW_FOISCSI_NODE_WR_ISID_AVAL		24
+#define M_FW_FOISCSI_NODE_WR_ISID_AVAL		0x3f
+#define V_FW_FOISCSI_NODE_WR_ISID_AVAL(x)	\
+    ((x) << S_FW_FOISCSI_NODE_WR_ISID_AVAL)
+#define G_FW_FOISCSI_NODE_WR_ISID_AVAL(x)	\
+    (((x) >> S_FW_FOISCSI_NODE_WR_ISID_AVAL) & M_FW_FOISCSI_NODE_WR_ISID_AVAL)
+
+#define S_FW_FOISCSI_NODE_WR_ISID_BVAL		8
+#define M_FW_FOISCSI_NODE_WR_ISID_BVAL		0xffff
+#define V_FW_FOISCSI_NODE_WR_ISID_BVAL(x)	\
+    ((x) << S_FW_FOISCSI_NODE_WR_ISID_BVAL)
+#define G_FW_FOISCSI_NODE_WR_ISID_BVAL(x)	\
+    (((x) >> S_FW_FOISCSI_NODE_WR_ISID_BVAL) & M_FW_FOISCSI_NODE_WR_ISID_BVAL)
+
+#define S_FW_FOISCSI_NODE_WR_ISID_CVAL		0
+#define M_FW_FOISCSI_NODE_WR_ISID_CVAL		0xff
+#define V_FW_FOISCSI_NODE_WR_ISID_CVAL(x)	\
+    ((x) << S_FW_FOISCSI_NODE_WR_ISID_CVAL)
+#define G_FW_FOISCSI_NODE_WR_ISID_CVAL(x)	\
+    (((x) >> S_FW_FOISCSI_NODE_WR_ISID_CVAL) & M_FW_FOISCSI_NODE_WR_ISID_CVAL)
+
 struct fw_foiscsi_ctrl_wr {
-	__be32 op_compl;
+	__be32 op_to_no_fin;
 	__be32 flowid_len16;
 	__u64  cookie;
 	__u8   subop;
@@ -2173,7 +2615,7 @@
 		__be32 r1;
 	} sess_attr;
 	struct fw_foiscsi_conn_attr {
-		__be32 hdigest_to_ddp_pgsz;
+		__be32 hdigest_to_tcp_ws_en;
 		__be32 max_rcv_dsl;
 		__be32 ping_tmo;
 		__be16 dst_port;
@@ -2194,6 +2636,13 @@
 	__u8   tgt_name[FW_FOISCSI_NAME_MAX_LEN];
 };
 
+#define S_FW_FOISCSI_CTRL_WR_NO_FIN	0
+#define M_FW_FOISCSI_CTRL_WR_NO_FIN	0x1
+#define V_FW_FOISCSI_CTRL_WR_NO_FIN(x)	((x) << S_FW_FOISCSI_CTRL_WR_NO_FIN)
+#define G_FW_FOISCSI_CTRL_WR_NO_FIN(x)	\
+    (((x) >> S_FW_FOISCSI_CTRL_WR_NO_FIN) & M_FW_FOISCSI_CTRL_WR_NO_FIN)
+#define F_FW_FOISCSI_CTRL_WR_NO_FIN	V_FW_FOISCSI_CTRL_WR_NO_FIN(1U)
+
 #define S_FW_FOISCSI_CTRL_WR_SESS_TYPE		30
 #define M_FW_FOISCSI_CTRL_WR_SESS_TYPE		0x3
 #define V_FW_FOISCSI_CTRL_WR_SESS_TYPE(x)	\
@@ -2282,21 +2731,342 @@
 #define G_FW_FOISCSI_CTRL_WR_DDP_PGSZ(x)	\
     (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGSZ) & M_FW_FOISCSI_CTRL_WR_DDP_PGSZ)
 
+#define S_FW_FOISCSI_CTRL_WR_IPV6	20
+#define M_FW_FOISCSI_CTRL_WR_IPV6	0x1
+#define V_FW_FOISCSI_CTRL_WR_IPV6(x)	((x) << S_FW_FOISCSI_CTRL_WR_IPV6)
+#define G_FW_FOISCSI_CTRL_WR_IPV6(x)	\
+    (((x) >> S_FW_FOISCSI_CTRL_WR_IPV6) & M_FW_FOISCSI_CTRL_WR_IPV6)
+#define F_FW_FOISCSI_CTRL_WR_IPV6	V_FW_FOISCSI_CTRL_WR_IPV6(1U)
+
+#define S_FW_FOISCSI_CTRL_WR_DDP_PGIDX		16
+#define M_FW_FOISCSI_CTRL_WR_DDP_PGIDX		0xf
+#define V_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x)	\
+    ((x) << S_FW_FOISCSI_CTRL_WR_DDP_PGIDX)
+#define G_FW_FOISCSI_CTRL_WR_DDP_PGIDX(x)	\
+    (((x) >> S_FW_FOISCSI_CTRL_WR_DDP_PGIDX) & M_FW_FOISCSI_CTRL_WR_DDP_PGIDX)
+
+#define S_FW_FOISCSI_CTRL_WR_TCP_WS	12
+#define M_FW_FOISCSI_CTRL_WR_TCP_WS	0xf
+#define V_FW_FOISCSI_CTRL_WR_TCP_WS(x)	((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS)
+#define G_FW_FOISCSI_CTRL_WR_TCP_WS(x)	\
+    (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS) & M_FW_FOISCSI_CTRL_WR_TCP_WS)
+
+#define S_FW_FOISCSI_CTRL_WR_TCP_WS_EN		11
+#define M_FW_FOISCSI_CTRL_WR_TCP_WS_EN		0x1
+#define V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x)	\
+    ((x) << S_FW_FOISCSI_CTRL_WR_TCP_WS_EN)
+#define G_FW_FOISCSI_CTRL_WR_TCP_WS_EN(x)	\
+    (((x) >> S_FW_FOISCSI_CTRL_WR_TCP_WS_EN) & M_FW_FOISCSI_CTRL_WR_TCP_WS_EN)
+#define F_FW_FOISCSI_CTRL_WR_TCP_WS_EN	V_FW_FOISCSI_CTRL_WR_TCP_WS_EN(1U)
+
 struct fw_foiscsi_chap_wr {
-	__be32 op_compl;
+	__be32 op_to_kv_flag;
 	__be32 flowid_len16;
 	__u64  cookie;
 	__u8   status;
-	__u8   id_len;
-	__u8   sec_len;
+	union fw_foiscsi_len {
+		struct fw_foiscsi_chap_lens {
+			__u8   id_len;
+			__u8   sec_len;
+		} chapl;
+		struct fw_foiscsi_vend_kv_lens {
+			__u8   key_len;
+			__u8   val_len;
+		} vend_kvl;
+	} lenu;
 	__u8   node_type;
 	__be16 node_id;
 	__u8   r3[2];
-	__u8   chap_id[FW_FOISCSI_NAME_MAX_LEN];
-	__u8   chap_sec[FW_FOISCSI_CHAP_SEC_MAX_LEN];
+	union fw_foiscsi_chap_vend {
+		struct fw_foiscsi_chap {
+			__u8   chap_id[224];
+			__u8   chap_sec[128];
+		} chap;
+		struct fw_foiscsi_vend_kv {
+			__u8   vend_key[64];
+			__u8   vend_val[256];
+		} vend_kv;
+	} u;
 };
 
+#define S_FW_FOISCSI_CHAP_WR_KV_FLAG	20
+#define M_FW_FOISCSI_CHAP_WR_KV_FLAG	0x1
+#define V_FW_FOISCSI_CHAP_WR_KV_FLAG(x)	((x) << S_FW_FOISCSI_CHAP_WR_KV_FLAG)
+#define G_FW_FOISCSI_CHAP_WR_KV_FLAG(x)	\
+    (((x) >> S_FW_FOISCSI_CHAP_WR_KV_FLAG) & M_FW_FOISCSI_CHAP_WR_KV_FLAG)
+#define F_FW_FOISCSI_CHAP_WR_KV_FLAG	V_FW_FOISCSI_CHAP_WR_KV_FLAG(1U)
+
 /******************************************************************************
+ *  C O i S C S I  W O R K R E Q U E S T S
+ ********************************************/
+
+enum fw_chnet_addr_type {
+	FW_CHNET_ADDD_TYPE_NONE = 0,
+	FW_CHNET_ADDR_TYPE_IPV4,
+	FW_CHNET_ADDR_TYPE_IPV6,
+};
+
+enum fw_msg_wr_type {
+	FW_MSG_WR_TYPE_RPL = 0,
+	FW_MSG_WR_TYPE_ERR,
+	FW_MSG_WR_TYPE_PLD,
+};
+
+struct fw_coiscsi_tgt_wr {
+	__be32 op_compl;
+	__be32 flowid_len16;
+	__u64  cookie;
+	__u8   subop;
+	__u8   status;
+	__be16 r4;
+	__be32 flags;
+	struct fw_coiscsi_tgt_conn_attr {
+		__be32 in_tid;
+		__be16 in_port;
+		__u8   in_type;
+		__u8   r6;
+		union fw_coiscsi_tgt_conn_attr_addr {
+			struct fw_coiscsi_tgt_conn_attr_in_addr {
+				__be32 addr;
+				__be32 r7;
+				__be32 r8[2];
+			} in_addr;
+			struct fw_coiscsi_tgt_conn_attr_in_addr6 {
+				__be64 addr[2];
+			} in_addr6;
+		} u;
+	} conn_attr;
+};
+
+struct fw_coiscsi_tgt_conn_wr {
+	__be32 op_compl;
+	__be32 flowid_len16;
+	__u64  cookie;
+	__u8   subop;
+	__u8   status;
+	__be16 iq_id;
+	__be32 in_stid;
+	__be32 io_id;
+	__be32 flags_fin;
+	union {
+		struct fw_coiscsi_tgt_conn_tcp {
+			__be16 in_sport;
+			__be16 in_dport;
+			__u8   wscale_wsen;
+			__u8   r4[3];
+			union fw_coiscsi_tgt_conn_tcp_addr {
+				struct fw_coiscsi_tgt_conn_tcp_in_addr {
+					__be32 saddr;
+					__be32 daddr;
+				} in_addr;
+				struct fw_coiscsi_tgt_conn_tcp_in_addr6 {
+					__be64 saddr[2];
+					__be64 daddr[2];
+				} in_addr6;
+			} u;
+		} conn_tcp;
+		struct fw_coiscsi_tgt_conn_stats {
+			__be32 ddp_reqs;
+			__be32 ddp_cmpls;
+			__be16 ddp_aborts;
+			__be16 ddp_bps;
+		} stats;
+	} u;
+	struct fw_coiscsi_tgt_conn_iscsi {
+		__be32 hdigest_to_ddp_pgsz;
+		__be32 tgt_id;
+		__be16 max_r2t;
+		__be16 r5;
+		__be32 max_burst;
+		__be32 max_rdsl;
+		__be32 max_tdsl;
+		__be32 cur_sn;
+		__be32 r6;
+	} conn_iscsi;
+};
+
+#define S_FW_COISCSI_TGT_CONN_WR_FIN	0
+#define M_FW_COISCSI_TGT_CONN_WR_FIN	0x1
+#define V_FW_COISCSI_TGT_CONN_WR_FIN(x)	((x) << S_FW_COISCSI_TGT_CONN_WR_FIN)
+#define G_FW_COISCSI_TGT_CONN_WR_FIN(x)	\
+    (((x) >> S_FW_COISCSI_TGT_CONN_WR_FIN) & M_FW_COISCSI_TGT_CONN_WR_FIN)
+#define F_FW_COISCSI_TGT_CONN_WR_FIN	V_FW_COISCSI_TGT_CONN_WR_FIN(1U)
+
+#define S_FW_COISCSI_TGT_CONN_WR_WSCALE		1
+#define M_FW_COISCSI_TGT_CONN_WR_WSCALE		0xf
+#define V_FW_COISCSI_TGT_CONN_WR_WSCALE(x)	\
+    ((x) << S_FW_COISCSI_TGT_CONN_WR_WSCALE)
+#define G_FW_COISCSI_TGT_CONN_WR_WSCALE(x)	\
+    (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSCALE) & \
+     M_FW_COISCSI_TGT_CONN_WR_WSCALE)
+
+#define S_FW_COISCSI_TGT_CONN_WR_WSEN		0
+#define M_FW_COISCSI_TGT_CONN_WR_WSEN		0x1
+#define V_FW_COISCSI_TGT_CONN_WR_WSEN(x)	\
+    ((x) << S_FW_COISCSI_TGT_CONN_WR_WSEN)
+#define G_FW_COISCSI_TGT_CONN_WR_WSEN(x)	\
+    (((x) >> S_FW_COISCSI_TGT_CONN_WR_WSEN) & M_FW_COISCSI_TGT_CONN_WR_WSEN)
+#define F_FW_COISCSI_TGT_CONN_WR_WSEN	V_FW_COISCSI_TGT_CONN_WR_WSEN(1U)
+
+struct fw_coiscsi_tgt_xmit_wr {
+	__be32 op_to_immdlen;
+	union {
+		struct cmpl_stat {
+			__be32 cmpl_status_pkd;
+		} cs;
+		struct flowid_len {
+			__be32 flowid_len16;
+		} fllen;
+	} u;
+	__u64  cookie;
+	__be16 iq_id;
+	__be16 r3;
+	__be32 pz_off;
+	__be32 t_xfer_len;
+	union {
+		__be32 tag;
+		__be32 datasn;
+		__be32 ddp_status;
+	} cu;
+};
+
+#define S_FW_COISCSI_TGT_XMIT_WR_DDGST		23
+#define M_FW_COISCSI_TGT_XMIT_WR_DDGST		0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_DDGST(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_DDGST)
+#define G_FW_COISCSI_TGT_XMIT_WR_DDGST(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDGST) & M_FW_COISCSI_TGT_XMIT_WR_DDGST)
+#define F_FW_COISCSI_TGT_XMIT_WR_DDGST	V_FW_COISCSI_TGT_XMIT_WR_DDGST(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_HDGST		22
+#define M_FW_COISCSI_TGT_XMIT_WR_HDGST		0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_HDGST(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_HDGST)
+#define G_FW_COISCSI_TGT_XMIT_WR_HDGST(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_HDGST) & M_FW_COISCSI_TGT_XMIT_WR_HDGST)
+#define F_FW_COISCSI_TGT_XMIT_WR_HDGST	V_FW_COISCSI_TGT_XMIT_WR_HDGST(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_DDP	20
+#define M_FW_COISCSI_TGT_XMIT_WR_DDP	0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_DDP(x)	((x) << S_FW_COISCSI_TGT_XMIT_WR_DDP)
+#define G_FW_COISCSI_TGT_XMIT_WR_DDP(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_DDP) & M_FW_COISCSI_TGT_XMIT_WR_DDP)
+#define F_FW_COISCSI_TGT_XMIT_WR_DDP	V_FW_COISCSI_TGT_XMIT_WR_DDP(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_ABORT		19
+#define M_FW_COISCSI_TGT_XMIT_WR_ABORT		0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_ABORT(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_ABORT)
+#define G_FW_COISCSI_TGT_XMIT_WR_ABORT(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_ABORT) & M_FW_COISCSI_TGT_XMIT_WR_ABORT)
+#define F_FW_COISCSI_TGT_XMIT_WR_ABORT	V_FW_COISCSI_TGT_XMIT_WR_ABORT(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_FINAL		18
+#define M_FW_COISCSI_TGT_XMIT_WR_FINAL		0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_FINAL(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_FINAL)
+#define G_FW_COISCSI_TGT_XMIT_WR_FINAL(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_FINAL) & M_FW_COISCSI_TGT_XMIT_WR_FINAL)
+#define F_FW_COISCSI_TGT_XMIT_WR_FINAL	V_FW_COISCSI_TGT_XMIT_WR_FINAL(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_PADLEN		16
+#define M_FW_COISCSI_TGT_XMIT_WR_PADLEN		0x3
+#define V_FW_COISCSI_TGT_XMIT_WR_PADLEN(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_PADLEN)
+#define G_FW_COISCSI_TGT_XMIT_WR_PADLEN(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_PADLEN) & \
+     M_FW_COISCSI_TGT_XMIT_WR_PADLEN)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN	15
+#define M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN	0x1
+#define V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN)
+#define G_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_INCSTATSN) & \
+     M_FW_COISCSI_TGT_XMIT_WR_INCSTATSN)
+#define F_FW_COISCSI_TGT_XMIT_WR_INCSTATSN	\
+    V_FW_COISCSI_TGT_XMIT_WR_INCSTATSN(1U)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN	0
+#define M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN	0xff
+#define V_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN)
+#define G_FW_COISCSI_TGT_XMIT_WR_IMMDLEN(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_IMMDLEN) & \
+     M_FW_COISCSI_TGT_XMIT_WR_IMMDLEN)
+
+#define S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS	8
+#define M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS	0xff
+#define V_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x)	\
+    ((x) << S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS)
+#define G_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS(x)	\
+    (((x) >> S_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS) & \
+     M_FW_COISCSI_TGT_XMIT_WR_CMPL_STATUS)
+
+struct fw_coiscsi_stats_wr {
+	__be32 op_compl;
+	__be32 flowid_len16;
+	__u64  cookie;
+	__u8   subop;
+	__u8   status;
+	union fw_coiscsi_stats {
+		struct fw_coiscsi_resource {
+			__u8   num_ipv4_tgt;
+			__u8   num_ipv6_tgt;
+			__be16 num_l2t_entries;
+			__be16 num_csocks;
+			__be16 num_tasks;
+			__be16 num_ppods_zone[11];
+			__be32 num_bufll64;
+			__u8   r2[12];
+		} rsrc;
+	} u;
+};
+
+struct fw_isns_wr {
+	__be32 op_compl;
+	__be32 flowid_len16;
+	__u64  cookie;
+	__u8   subop;
+	__u8   status;
+	__be16 iq_id;
+	__be16 vlanid;
+	__be16 r4;
+	struct fw_tcp_conn_attr {
+		__be32 in_tid;
+		__be16 in_port;
+		__u8   in_type;
+		__u8   r6;
+		union fw_tcp_conn_attr_addr {
+			struct fw_tcp_conn_attr_in_addr {
+				__be32 addr;
+				__be32 r7;
+				__be32 r8[2];
+			} in_addr;
+			struct fw_tcp_conn_attr_in_addr6 {
+				__be64 addr[2];
+			} in_addr6;
+		} u;
+	} conn_attr;
+};
+
+struct fw_isns_xmit_wr {
+	__be32 op_to_immdlen;
+	__be32 flowid_len16;
+	__u64  cookie;
+	__be16 iq_id;
+	__be16 r4;
+	__be32 xfer_len;
+	__be64 r5;
+};
+
+#define S_FW_ISNS_XMIT_WR_IMMDLEN	0
+#define M_FW_ISNS_XMIT_WR_IMMDLEN	0xff
+#define V_FW_ISNS_XMIT_WR_IMMDLEN(x)	((x) << S_FW_ISNS_XMIT_WR_IMMDLEN)
+#define G_FW_ISNS_XMIT_WR_IMMDLEN(x)	\
+    (((x) >> S_FW_ISNS_XMIT_WR_IMMDLEN) & M_FW_ISNS_XMIT_WR_IMMDLEN)
+
+/******************************************************************************
  *  F O F C O E   W O R K R E Q U E S T s
  *******************************************/
 
@@ -2799,16 +3569,16 @@
 struct fw_pofcoe_tcb_wr {
 	__be32 op_compl;
 	__be32 equiq_to_len16;
-	__be64 cookie;
+	__be32 r4;
+	__be32 xfer_len;
 	__be32 tid_to_port;
 	__be16 x_id;
 	__be16 vlan_id;
+	__be64 cookie;
 	__be32 s_id;
 	__be32 d_id;
 	__be32 tag;
-	__be32 xfer_len;
-	__be32 r4;
-	__be16 r5;
+	__be16 r6;
 	__be16 iqid;
 };
 
@@ -2844,7 +3614,372 @@
 	__u64  cookie;
 };
 
+/*******************************************************************
+ *  T10 DIF related definition
+ *******************************************************************/
+struct fw_tx_pi_header {
+	__be16 op_to_inline;
+	__u8   pi_interval_tag_type;
+	__u8   num_pi;
+	__be32 pi_start4_pi_end4;
+	__u8   tag_gen_enabled_pkd;
+	__u8   num_pi_dsg;
+	__be16 app_tag;
+	__be32 ref_tag;
+};
 
+#define S_FW_TX_PI_HEADER_OP	8
+#define M_FW_TX_PI_HEADER_OP	0xff
+#define V_FW_TX_PI_HEADER_OP(x)	((x) << S_FW_TX_PI_HEADER_OP)
+#define G_FW_TX_PI_HEADER_OP(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_OP) & M_FW_TX_PI_HEADER_OP)
+
+#define S_FW_TX_PI_HEADER_ULPTXMORE	7
+#define M_FW_TX_PI_HEADER_ULPTXMORE	0x1
+#define V_FW_TX_PI_HEADER_ULPTXMORE(x)	((x) << S_FW_TX_PI_HEADER_ULPTXMORE)
+#define G_FW_TX_PI_HEADER_ULPTXMORE(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_ULPTXMORE) & M_FW_TX_PI_HEADER_ULPTXMORE)
+#define F_FW_TX_PI_HEADER_ULPTXMORE	V_FW_TX_PI_HEADER_ULPTXMORE(1U)
+
+#define S_FW_TX_PI_HEADER_PI_CONTROL	4
+#define M_FW_TX_PI_HEADER_PI_CONTROL	0x7
+#define V_FW_TX_PI_HEADER_PI_CONTROL(x)	((x) << S_FW_TX_PI_HEADER_PI_CONTROL)
+#define G_FW_TX_PI_HEADER_PI_CONTROL(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_PI_CONTROL) & M_FW_TX_PI_HEADER_PI_CONTROL)
+
+#define S_FW_TX_PI_HEADER_GUARD_TYPE	2
+#define M_FW_TX_PI_HEADER_GUARD_TYPE	0x1
+#define V_FW_TX_PI_HEADER_GUARD_TYPE(x)	((x) << S_FW_TX_PI_HEADER_GUARD_TYPE)
+#define G_FW_TX_PI_HEADER_GUARD_TYPE(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_GUARD_TYPE) & M_FW_TX_PI_HEADER_GUARD_TYPE)
+#define F_FW_TX_PI_HEADER_GUARD_TYPE	V_FW_TX_PI_HEADER_GUARD_TYPE(1U)
+
+#define S_FW_TX_PI_HEADER_VALIDATE	1
+#define M_FW_TX_PI_HEADER_VALIDATE	0x1
+#define V_FW_TX_PI_HEADER_VALIDATE(x)	((x) << S_FW_TX_PI_HEADER_VALIDATE)
+#define G_FW_TX_PI_HEADER_VALIDATE(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_VALIDATE) & M_FW_TX_PI_HEADER_VALIDATE)
+#define F_FW_TX_PI_HEADER_VALIDATE	V_FW_TX_PI_HEADER_VALIDATE(1U)
+
+#define S_FW_TX_PI_HEADER_INLINE	0
+#define M_FW_TX_PI_HEADER_INLINE	0x1
+#define V_FW_TX_PI_HEADER_INLINE(x)	((x) << S_FW_TX_PI_HEADER_INLINE)
+#define G_FW_TX_PI_HEADER_INLINE(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_INLINE) & M_FW_TX_PI_HEADER_INLINE)
+#define F_FW_TX_PI_HEADER_INLINE	V_FW_TX_PI_HEADER_INLINE(1U)
+
+#define S_FW_TX_PI_HEADER_PI_INTERVAL		7
+#define M_FW_TX_PI_HEADER_PI_INTERVAL		0x1
+#define V_FW_TX_PI_HEADER_PI_INTERVAL(x)	\
+    ((x) << S_FW_TX_PI_HEADER_PI_INTERVAL)
+#define G_FW_TX_PI_HEADER_PI_INTERVAL(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_PI_INTERVAL) & M_FW_TX_PI_HEADER_PI_INTERVAL)
+#define F_FW_TX_PI_HEADER_PI_INTERVAL	V_FW_TX_PI_HEADER_PI_INTERVAL(1U)
+
+#define S_FW_TX_PI_HEADER_TAG_TYPE	5
+#define M_FW_TX_PI_HEADER_TAG_TYPE	0x3
+#define V_FW_TX_PI_HEADER_TAG_TYPE(x)	((x) << S_FW_TX_PI_HEADER_TAG_TYPE)
+#define G_FW_TX_PI_HEADER_TAG_TYPE(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_TAG_TYPE) & M_FW_TX_PI_HEADER_TAG_TYPE)
+
+#define S_FW_TX_PI_HEADER_PI_START4	22
+#define M_FW_TX_PI_HEADER_PI_START4	0x3ff
+#define V_FW_TX_PI_HEADER_PI_START4(x)	((x) << S_FW_TX_PI_HEADER_PI_START4)
+#define G_FW_TX_PI_HEADER_PI_START4(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_PI_START4) & M_FW_TX_PI_HEADER_PI_START4)
+
+#define S_FW_TX_PI_HEADER_PI_END4	0
+#define M_FW_TX_PI_HEADER_PI_END4	0x3fffff
+#define V_FW_TX_PI_HEADER_PI_END4(x)	((x) << S_FW_TX_PI_HEADER_PI_END4)
+#define G_FW_TX_PI_HEADER_PI_END4(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_PI_END4) & M_FW_TX_PI_HEADER_PI_END4)
+
+#define S_FW_TX_PI_HEADER_TAG_GEN_ENABLED	6
+#define M_FW_TX_PI_HEADER_TAG_GEN_ENABLED	0x3
+#define V_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x)	\
+    ((x) << S_FW_TX_PI_HEADER_TAG_GEN_ENABLED)
+#define G_FW_TX_PI_HEADER_TAG_GEN_ENABLED(x)	\
+    (((x) >> S_FW_TX_PI_HEADER_TAG_GEN_ENABLED) & \
+     M_FW_TX_PI_HEADER_TAG_GEN_ENABLED)
+
+enum fw_pi_error_type {
+	FW_PI_ERROR_GUARD_CHECK_FAILED = 0,
+};
+
+struct fw_pi_error {
+	__be32 err_type_pkd;
+	__be32 flowid_len16;
+	__be16 r2;
+	__be16 app_tag;
+	__be32 ref_tag;
+	__be32  pisc[4];
+};
+
+#define S_FW_PI_ERROR_ERR_TYPE		24
+#define M_FW_PI_ERROR_ERR_TYPE		0xff
+#define V_FW_PI_ERROR_ERR_TYPE(x)	((x) << S_FW_PI_ERROR_ERR_TYPE)
+#define G_FW_PI_ERROR_ERR_TYPE(x)	\
+    (((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE)
+
+struct fw_tlstx_data_wr {
+        __be32 op_to_immdlen;
+        __be32 flowid_len16;
+        __be32 plen;
+        __be32 lsodisable_to_flags;
+        __be32 r5;
+        __be32 ctxloc_to_exp;
+        __be16 mfs;
+        __be16 adjustedplen_pkd;
+        __be16 expinplenmax_pkd;
+        __u8   pdusinplenmax_pkd;
+        __u8   r10;
+};
+
+#define S_FW_TLSTX_DATA_WR_OPCODE       24
+#define M_FW_TLSTX_DATA_WR_OPCODE       0xff
+#define V_FW_TLSTX_DATA_WR_OPCODE(x)    ((x) << S_FW_TLSTX_DATA_WR_OPCODE)
+#define G_FW_TLSTX_DATA_WR_OPCODE(x)    \
+    (((x) >> S_FW_TLSTX_DATA_WR_OPCODE) & M_FW_TLSTX_DATA_WR_OPCODE)
+
+#define S_FW_TLSTX_DATA_WR_COMPL        21
+#define M_FW_TLSTX_DATA_WR_COMPL        0x1
+#define V_FW_TLSTX_DATA_WR_COMPL(x)     ((x) << S_FW_TLSTX_DATA_WR_COMPL)
+#define G_FW_TLSTX_DATA_WR_COMPL(x)     \
+    (((x) >> S_FW_TLSTX_DATA_WR_COMPL) & M_FW_TLSTX_DATA_WR_COMPL)
+#define F_FW_TLSTX_DATA_WR_COMPL        V_FW_TLSTX_DATA_WR_COMPL(1U)
+
+#define S_FW_TLSTX_DATA_WR_IMMDLEN      0
+#define M_FW_TLSTX_DATA_WR_IMMDLEN      0xff
+#define V_FW_TLSTX_DATA_WR_IMMDLEN(x)   ((x) << S_FW_TLSTX_DATA_WR_IMMDLEN)
+#define G_FW_TLSTX_DATA_WR_IMMDLEN(x)   \
+    (((x) >> S_FW_TLSTX_DATA_WR_IMMDLEN) & M_FW_TLSTX_DATA_WR_IMMDLEN)
+
+#define S_FW_TLSTX_DATA_WR_FLOWID       8
+#define M_FW_TLSTX_DATA_WR_FLOWID       0xfffff
+#define V_FW_TLSTX_DATA_WR_FLOWID(x)    ((x) << S_FW_TLSTX_DATA_WR_FLOWID)
+#define G_FW_TLSTX_DATA_WR_FLOWID(x)    \
+    (((x) >> S_FW_TLSTX_DATA_WR_FLOWID) & M_FW_TLSTX_DATA_WR_FLOWID)
+
+#define S_FW_TLSTX_DATA_WR_LEN16        0
+#define M_FW_TLSTX_DATA_WR_LEN16        0xff
+#define V_FW_TLSTX_DATA_WR_LEN16(x)     ((x) << S_FW_TLSTX_DATA_WR_LEN16)
+#define G_FW_TLSTX_DATA_WR_LEN16(x)     \
+    (((x) >> S_FW_TLSTX_DATA_WR_LEN16) & M_FW_TLSTX_DATA_WR_LEN16)
+
+#define S_FW_TLSTX_DATA_WR_LSODISABLE   31
+#define M_FW_TLSTX_DATA_WR_LSODISABLE   0x1
+#define V_FW_TLSTX_DATA_WR_LSODISABLE(x) \
+    ((x) << S_FW_TLSTX_DATA_WR_LSODISABLE)
+#define G_FW_TLSTX_DATA_WR_LSODISABLE(x) \
+    (((x) >> S_FW_TLSTX_DATA_WR_LSODISABLE) & M_FW_TLSTX_DATA_WR_LSODISABLE)
+#define F_FW_TLSTX_DATA_WR_LSODISABLE   V_FW_TLSTX_DATA_WR_LSODISABLE(1U)
+
+#define S_FW_TLSTX_DATA_WR_ALIGNPLD     30
+#define M_FW_TLSTX_DATA_WR_ALIGNPLD     0x1
+#define V_FW_TLSTX_DATA_WR_ALIGNPLD(x)  ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLD)
+#define G_FW_TLSTX_DATA_WR_ALIGNPLD(x)  \
+    (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLD) & M_FW_TLSTX_DATA_WR_ALIGNPLD)
+#define F_FW_TLSTX_DATA_WR_ALIGNPLD     V_FW_TLSTX_DATA_WR_ALIGNPLD(1U)
+
+#define S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 29
+#define M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE 0x1
+#define V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \
+    ((x) << S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE)
+#define G_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(x) \
+    (((x) >> S_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE) & \
+     M_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE)
+#define F_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE V_FW_TLSTX_DATA_WR_ALIGNPLDSHOVE(1U)
+
+#define S_FW_TLSTX_DATA_WR_FLAGS        0
+#define M_FW_TLSTX_DATA_WR_FLAGS        0xfffffff
+#define V_FW_TLSTX_DATA_WR_FLAGS(x)     ((x) << S_FW_TLSTX_DATA_WR_FLAGS)
+#define G_FW_TLSTX_DATA_WR_FLAGS(x)     \
+    (((x) >> S_FW_TLSTX_DATA_WR_FLAGS) & M_FW_TLSTX_DATA_WR_FLAGS)
+
+#define S_FW_TLSTX_DATA_WR_CTXLOC       30
+#define M_FW_TLSTX_DATA_WR_CTXLOC       0x3
+#define V_FW_TLSTX_DATA_WR_CTXLOC(x)    ((x) << S_FW_TLSTX_DATA_WR_CTXLOC)
+#define G_FW_TLSTX_DATA_WR_CTXLOC(x)    \
+    (((x) >> S_FW_TLSTX_DATA_WR_CTXLOC) & M_FW_TLSTX_DATA_WR_CTXLOC)
+
+#define S_FW_TLSTX_DATA_WR_IVDSGL       29
+#define M_FW_TLSTX_DATA_WR_IVDSGL       0x1
+#define V_FW_TLSTX_DATA_WR_IVDSGL(x)    ((x) << S_FW_TLSTX_DATA_WR_IVDSGL)
+#define G_FW_TLSTX_DATA_WR_IVDSGL(x)    \
+    (((x) >> S_FW_TLSTX_DATA_WR_IVDSGL) & M_FW_TLSTX_DATA_WR_IVDSGL)
+#define F_FW_TLSTX_DATA_WR_IVDSGL       V_FW_TLSTX_DATA_WR_IVDSGL(1U)
+
+#define S_FW_TLSTX_DATA_WR_KEYSIZE      24
+#define M_FW_TLSTX_DATA_WR_KEYSIZE      0x1f
+#define V_FW_TLSTX_DATA_WR_KEYSIZE(x)   ((x) << S_FW_TLSTX_DATA_WR_KEYSIZE)
+#define G_FW_TLSTX_DATA_WR_KEYSIZE(x)   \
+    (((x) >> S_FW_TLSTX_DATA_WR_KEYSIZE) & M_FW_TLSTX_DATA_WR_KEYSIZE)
+
+#define S_FW_TLSTX_DATA_WR_NUMIVS       14
+#define M_FW_TLSTX_DATA_WR_NUMIVS       0xff
+#define V_FW_TLSTX_DATA_WR_NUMIVS(x)    ((x) << S_FW_TLSTX_DATA_WR_NUMIVS)
+#define G_FW_TLSTX_DATA_WR_NUMIVS(x)    \
+    (((x) >> S_FW_TLSTX_DATA_WR_NUMIVS) & M_FW_TLSTX_DATA_WR_NUMIVS)
+
+#define S_FW_TLSTX_DATA_WR_EXP          0
+#define M_FW_TLSTX_DATA_WR_EXP          0x3fff
+#define V_FW_TLSTX_DATA_WR_EXP(x)       ((x) << S_FW_TLSTX_DATA_WR_EXP)
+#define G_FW_TLSTX_DATA_WR_EXP(x)       \
+    (((x) >> S_FW_TLSTX_DATA_WR_EXP) & M_FW_TLSTX_DATA_WR_EXP)
+
+#define S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 1
+#define M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN 0x7fff
+#define V_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \
+    ((x) << S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN)
+#define G_FW_TLSTX_DATA_WR_ADJUSTEDPLEN(x) \
+    (((x) >> S_FW_TLSTX_DATA_WR_ADJUSTEDPLEN) & \
+     M_FW_TLSTX_DATA_WR_ADJUSTEDPLEN)
+
+#define S_FW_TLSTX_DATA_WR_EXPINPLENMAX 4
+#define M_FW_TLSTX_DATA_WR_EXPINPLENMAX 0xfff
+#define V_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \
+    ((x) << S_FW_TLSTX_DATA_WR_EXPINPLENMAX)
+#define G_FW_TLSTX_DATA_WR_EXPINPLENMAX(x) \
+    (((x) >> S_FW_TLSTX_DATA_WR_EXPINPLENMAX) & \
+     M_FW_TLSTX_DATA_WR_EXPINPLENMAX)
+
+#define S_FW_TLSTX_DATA_WR_PDUSINPLENMAX 2
+#define M_FW_TLSTX_DATA_WR_PDUSINPLENMAX 0x3f
+#define V_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \
+    ((x) << S_FW_TLSTX_DATA_WR_PDUSINPLENMAX)
+#define G_FW_TLSTX_DATA_WR_PDUSINPLENMAX(x) \
+    (((x) >> S_FW_TLSTX_DATA_WR_PDUSINPLENMAX) & \
+     M_FW_TLSTX_DATA_WR_PDUSINPLENMAX)
+
+struct fw_crypto_lookaside_wr {
+        __be32 op_to_cctx_size;
+        __be32 len16_pkd;
+        __be32 session_id;
+        __be32 rx_chid_to_rx_q_id;
+        __be32 key_addr;
+        __be32 pld_size_hash_size;
+        __be64 cookie;
+};
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_OPCODE 24
+#define M_FW_CRYPTO_LOOKASIDE_WR_OPCODE 0xff
+#define V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_OPCODE)
+#define G_FW_CRYPTO_LOOKASIDE_WR_OPCODE(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_OPCODE) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_OPCODE)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_COMPL 23
+#define M_FW_CRYPTO_LOOKASIDE_WR_COMPL 0x1
+#define V_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_COMPL)
+#define G_FW_CRYPTO_LOOKASIDE_WR_COMPL(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_COMPL) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_COMPL)
+#define F_FW_CRYPTO_LOOKASIDE_WR_COMPL V_FW_CRYPTO_LOOKASIDE_WR_COMPL(1U)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 15
+#define M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN 0xff
+#define V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN)
+#define G_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 5
+#define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC 0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC)
+#define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0
+#define M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE 0x1f
+#define V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE)
+#define G_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_LEN16 0
+#define M_FW_CRYPTO_LOOKASIDE_WR_LEN16 0xff
+#define V_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LEN16)
+#define G_FW_CRYPTO_LOOKASIDE_WR_LEN16(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LEN16) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_LEN16)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 29
+#define M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID 0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID)
+#define G_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_CHID) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_RX_CHID)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_LCB  27
+#define M_FW_CRYPTO_LOOKASIDE_WR_LCB  0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_LCB)
+#define G_FW_CRYPTO_LOOKASIDE_WR_LCB(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_LCB) & M_FW_CRYPTO_LOOKASIDE_WR_LCB)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_PHASH 25
+#define M_FW_CRYPTO_LOOKASIDE_WR_PHASH 0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PHASH)
+#define G_FW_CRYPTO_LOOKASIDE_WR_PHASH(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PHASH) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_PHASH)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_IV   23
+#define M_FW_CRYPTO_LOOKASIDE_WR_IV   0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_IV(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_IV)
+#define G_FW_CRYPTO_LOOKASIDE_WR_IV(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_IV) & M_FW_CRYPTO_LOOKASIDE_WR_IV)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_FQIDX  15
+#define M_FW_CRYPTO_LOOKASIDE_WR_FQIDX  0xff
+#define V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \
+	((x) << S_FW_CRYPTO_LOOKASIDE_WR_FQIDX)
+#define G_FW_CRYPTO_LOOKASIDE_WR_FQIDX(x) \
+	(((x) >> S_FW_CRYPTO_LOOKASIDE_WR_FQIDX) &\
+	  M_FW_CRYPTO_LOOKASIDE_WR_FQIDX)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_TX_CH 10
+#define M_FW_CRYPTO_LOOKASIDE_WR_TX_CH 0x3
+#define V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_TX_CH)
+#define G_FW_CRYPTO_LOOKASIDE_WR_TX_CH(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_TX_CH) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_TX_CH)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0
+#define M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID 0x3ff
+#define V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID)
+#define G_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 24
+#define M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE 0xff
+#define V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE)
+#define G_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE)
+
+#define S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 17
+#define M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE 0x7f
+#define V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \
+    ((x) << S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE)
+#define G_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(x) \
+    (((x) >> S_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE) & \
+     M_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE)
+
 /******************************************************************************
  *  C O M M A N D s
  *********************/
@@ -2909,6 +4044,10 @@
 	FW_FCOE_SPARAMS_CMD            = 0x35,
 	FW_FCOE_STATS_CMD              = 0x37,
 	FW_FCOE_FCF_CMD                = 0x38,
+	FW_DCB_IEEE_CMD		       = 0x3a,
+	FW_DIAG_CMD		       = 0x3d,
+	FW_PTP_CMD                     = 0x3e,
+	FW_HMA_CMD                     = 0x3f,
 	FW_LASTC2E_CMD                 = 0x40,
 	FW_ERROR_CMD                   = 0x80,
 	FW_DEBUG_CMD                   = 0x81,
@@ -2996,6 +4135,10 @@
 	FW_LDST_ADDRSPC_FUNC_I2C  = 0x002A, /* legacy */
 	FW_LDST_ADDRSPC_LE	  = 0x0030,
 	FW_LDST_ADDRSPC_I2C       = 0x0038,
+	FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040,
+	FW_LDST_ADDRSPC_PCIE_DBG  = 0x0041,
+	FW_LDST_ADDRSPC_PCIE_PHY  = 0x0042,
+	FW_LDST_ADDRSPC_CIM_Q	  = 0x0048,
 };
 
 /*
@@ -3047,15 +4190,34 @@
 			__be16 vctl;
 			__be16 rval;
 		} mdio;
-		struct fw_ldst_mps {
-			__be16 fid_ctl;
-			__be16 rplcpf_pkd;
-			__be32 rplc127_96;
-			__be32 rplc95_64;
-			__be32 rplc63_32;
-			__be32 rplc31_0;
-			__be32 atrb;
-			__be16 vlan[16];
+		struct fw_ldst_cim_rq {
+			__u8   req_first64[8];
+			__u8   req_second64[8];
+			__u8   resp_first64[8];
+			__u8   resp_second64[8];
+			__be32 r3[2];
+		} cim_rq;
+		union fw_ldst_mps {
+			struct fw_ldst_mps_rplc {
+				__be16 fid_idx;
+				__be16 rplcpf_pkd;
+				__be32 rplc255_224;
+				__be32 rplc223_192;
+				__be32 rplc191_160;
+				__be32 rplc159_128;
+				__be32 rplc127_96;
+				__be32 rplc95_64;
+				__be32 rplc63_32;
+				__be32 rplc31_0;
+			} rplc;
+			struct fw_ldst_mps_atrb {
+				__be16 fid_mpsid;
+				__be16 r2[3];
+				__be32 r3[2];
+				__be32 r4;
+				__be32 atrb;
+				__be16 vlan[16];
+			} atrb;
 		} mps;
 		struct fw_ldst_func {
 			__u8   access_ctl;
@@ -3105,18 +4267,18 @@
 #define G_FW_LDST_CMD_ADDRSPACE(x)	\
     (((x) >> S_FW_LDST_CMD_ADDRSPACE) & M_FW_LDST_CMD_ADDRSPACE)
 
-#define S_FW_LDST_CMD_CYCLES	16
-#define M_FW_LDST_CMD_CYCLES	0xffff
-#define V_FW_LDST_CMD_CYCLES(x)	((x) << S_FW_LDST_CMD_CYCLES)
-#define G_FW_LDST_CMD_CYCLES(x)	\
+#define S_FW_LDST_CMD_CYCLES		16
+#define M_FW_LDST_CMD_CYCLES		0xffff
+#define V_FW_LDST_CMD_CYCLES(x)		((x) << S_FW_LDST_CMD_CYCLES)
+#define G_FW_LDST_CMD_CYCLES(x)		\
     (((x) >> S_FW_LDST_CMD_CYCLES) & M_FW_LDST_CMD_CYCLES)
 
-#define S_FW_LDST_CMD_MSG	31
-#define M_FW_LDST_CMD_MSG	0x1
-#define V_FW_LDST_CMD_MSG(x)	((x) << S_FW_LDST_CMD_MSG)
-#define G_FW_LDST_CMD_MSG(x)	\
+#define S_FW_LDST_CMD_MSG		31
+#define M_FW_LDST_CMD_MSG		0x1
+#define V_FW_LDST_CMD_MSG(x)		((x) << S_FW_LDST_CMD_MSG)
+#define G_FW_LDST_CMD_MSG(x)		\
     (((x) >> S_FW_LDST_CMD_MSG) & M_FW_LDST_CMD_MSG)
-#define F_FW_LDST_CMD_MSG	V_FW_LDST_CMD_MSG(1U)
+#define F_FW_LDST_CMD_MSG		V_FW_LDST_CMD_MSG(1U)
 
 #define S_FW_LDST_CMD_CTXTFLUSH		30
 #define M_FW_LDST_CMD_CTXTFLUSH		0x1
@@ -3123,67 +4285,76 @@
 #define V_FW_LDST_CMD_CTXTFLUSH(x)	((x) << S_FW_LDST_CMD_CTXTFLUSH)
 #define G_FW_LDST_CMD_CTXTFLUSH(x)	\
     (((x) >> S_FW_LDST_CMD_CTXTFLUSH) & M_FW_LDST_CMD_CTXTFLUSH)
-#define F_FW_LDST_CMD_CTXTFLUSH	V_FW_LDST_CMD_CTXTFLUSH(1U)
+#define F_FW_LDST_CMD_CTXTFLUSH		V_FW_LDST_CMD_CTXTFLUSH(1U)
 
-#define S_FW_LDST_CMD_PADDR	8
-#define M_FW_LDST_CMD_PADDR	0x1f
-#define V_FW_LDST_CMD_PADDR(x)	((x) << S_FW_LDST_CMD_PADDR)
-#define G_FW_LDST_CMD_PADDR(x)	\
+#define S_FW_LDST_CMD_PADDR		8
+#define M_FW_LDST_CMD_PADDR		0x1f
+#define V_FW_LDST_CMD_PADDR(x)		((x) << S_FW_LDST_CMD_PADDR)
+#define G_FW_LDST_CMD_PADDR(x)		\
     (((x) >> S_FW_LDST_CMD_PADDR) & M_FW_LDST_CMD_PADDR)
 
-#define S_FW_LDST_CMD_MMD	0
-#define M_FW_LDST_CMD_MMD	0x1f
-#define V_FW_LDST_CMD_MMD(x)	((x) << S_FW_LDST_CMD_MMD)
-#define G_FW_LDST_CMD_MMD(x)	\
+#define S_FW_LDST_CMD_MMD		0
+#define M_FW_LDST_CMD_MMD		0x1f
+#define V_FW_LDST_CMD_MMD(x)		((x) << S_FW_LDST_CMD_MMD)
+#define G_FW_LDST_CMD_MMD(x)		\
     (((x) >> S_FW_LDST_CMD_MMD) & M_FW_LDST_CMD_MMD)
 
-#define S_FW_LDST_CMD_FID	15
-#define M_FW_LDST_CMD_FID	0x1
-#define V_FW_LDST_CMD_FID(x)	((x) << S_FW_LDST_CMD_FID)
-#define G_FW_LDST_CMD_FID(x)	\
+#define S_FW_LDST_CMD_FID		15
+#define M_FW_LDST_CMD_FID		0x1
+#define V_FW_LDST_CMD_FID(x)		((x) << S_FW_LDST_CMD_FID)
+#define G_FW_LDST_CMD_FID(x)		\
     (((x) >> S_FW_LDST_CMD_FID) & M_FW_LDST_CMD_FID)
-#define F_FW_LDST_CMD_FID	V_FW_LDST_CMD_FID(1U)
+#define F_FW_LDST_CMD_FID		V_FW_LDST_CMD_FID(1U)
 
-#define S_FW_LDST_CMD_CTL	0
-#define M_FW_LDST_CMD_CTL	0x7fff
-#define V_FW_LDST_CMD_CTL(x)	((x) << S_FW_LDST_CMD_CTL)
-#define G_FW_LDST_CMD_CTL(x)	\
-    (((x) >> S_FW_LDST_CMD_CTL) & M_FW_LDST_CMD_CTL)
+#define S_FW_LDST_CMD_IDX		0
+#define M_FW_LDST_CMD_IDX		0x7fff
+#define V_FW_LDST_CMD_IDX(x)		((x) << S_FW_LDST_CMD_IDX)
+#define G_FW_LDST_CMD_IDX(x)		\
+    (((x) >> S_FW_LDST_CMD_IDX) & M_FW_LDST_CMD_IDX)
 
-#define S_FW_LDST_CMD_RPLCPF	0
-#define M_FW_LDST_CMD_RPLCPF	0xff
-#define V_FW_LDST_CMD_RPLCPF(x)	((x) << S_FW_LDST_CMD_RPLCPF)
-#define G_FW_LDST_CMD_RPLCPF(x)	\
+#define S_FW_LDST_CMD_RPLCPF		0
+#define M_FW_LDST_CMD_RPLCPF		0xff
+#define V_FW_LDST_CMD_RPLCPF(x)		((x) << S_FW_LDST_CMD_RPLCPF)
+#define G_FW_LDST_CMD_RPLCPF(x)		\
     (((x) >> S_FW_LDST_CMD_RPLCPF) & M_FW_LDST_CMD_RPLCPF)
 
-#define S_FW_LDST_CMD_CTRL	7
-#define M_FW_LDST_CMD_CTRL	0x1
-#define V_FW_LDST_CMD_CTRL(x)	((x) << S_FW_LDST_CMD_CTRL)
-#define G_FW_LDST_CMD_CTRL(x)	\
+#define S_FW_LDST_CMD_MPSID		0
+#define M_FW_LDST_CMD_MPSID		0x7fff
+#define V_FW_LDST_CMD_MPSID(x)		((x) << S_FW_LDST_CMD_MPSID)
+#define G_FW_LDST_CMD_MPSID(x)		\
+    (((x) >> S_FW_LDST_CMD_MPSID) & M_FW_LDST_CMD_MPSID)
+
+#define S_FW_LDST_CMD_CTRL		7
+#define M_FW_LDST_CMD_CTRL		0x1
+#define V_FW_LDST_CMD_CTRL(x)		((x) << S_FW_LDST_CMD_CTRL)
+#define G_FW_LDST_CMD_CTRL(x)		\
     (((x) >> S_FW_LDST_CMD_CTRL) & M_FW_LDST_CMD_CTRL)
-#define F_FW_LDST_CMD_CTRL	V_FW_LDST_CMD_CTRL(1U)
+#define F_FW_LDST_CMD_CTRL		V_FW_LDST_CMD_CTRL(1U)
 
-#define S_FW_LDST_CMD_LC	4
-#define M_FW_LDST_CMD_LC	0x1
-#define V_FW_LDST_CMD_LC(x)	((x) << S_FW_LDST_CMD_LC)
-#define G_FW_LDST_CMD_LC(x)	(((x) >> S_FW_LDST_CMD_LC) & M_FW_LDST_CMD_LC)
-#define F_FW_LDST_CMD_LC	V_FW_LDST_CMD_LC(1U)
+#define S_FW_LDST_CMD_LC		4
+#define M_FW_LDST_CMD_LC		0x1
+#define V_FW_LDST_CMD_LC(x)		((x) << S_FW_LDST_CMD_LC)
+#define G_FW_LDST_CMD_LC(x)		\
+    (((x) >> S_FW_LDST_CMD_LC) & M_FW_LDST_CMD_LC)
+#define F_FW_LDST_CMD_LC		V_FW_LDST_CMD_LC(1U)
 
-#define S_FW_LDST_CMD_AI	3
-#define M_FW_LDST_CMD_AI	0x1
-#define V_FW_LDST_CMD_AI(x)	((x) << S_FW_LDST_CMD_AI)
-#define G_FW_LDST_CMD_AI(x)	(((x) >> S_FW_LDST_CMD_AI) & M_FW_LDST_CMD_AI)
-#define F_FW_LDST_CMD_AI	V_FW_LDST_CMD_AI(1U)
+#define S_FW_LDST_CMD_AI		3
+#define M_FW_LDST_CMD_AI		0x1
+#define V_FW_LDST_CMD_AI(x)		((x) << S_FW_LDST_CMD_AI)
+#define G_FW_LDST_CMD_AI(x)		\
+    (((x) >> S_FW_LDST_CMD_AI) & M_FW_LDST_CMD_AI)
+#define F_FW_LDST_CMD_AI		V_FW_LDST_CMD_AI(1U)
 
-#define S_FW_LDST_CMD_FN	0
-#define M_FW_LDST_CMD_FN	0x7
-#define V_FW_LDST_CMD_FN(x)	((x) << S_FW_LDST_CMD_FN)
-#define G_FW_LDST_CMD_FN(x)	(((x) >> S_FW_LDST_CMD_FN) & M_FW_LDST_CMD_FN)
+#define S_FW_LDST_CMD_FN		0
+#define M_FW_LDST_CMD_FN		0x7
+#define V_FW_LDST_CMD_FN(x)		((x) << S_FW_LDST_CMD_FN)
+#define G_FW_LDST_CMD_FN(x)		\
+    (((x) >> S_FW_LDST_CMD_FN) & M_FW_LDST_CMD_FN)
 
-#define S_FW_LDST_CMD_SELECT	4
-#define M_FW_LDST_CMD_SELECT	0xf
-#define V_FW_LDST_CMD_SELECT(x)	((x) << S_FW_LDST_CMD_SELECT)
-#define G_FW_LDST_CMD_SELECT(x)	\
+#define S_FW_LDST_CMD_SELECT		4
+#define M_FW_LDST_CMD_SELECT		0xf
+#define V_FW_LDST_CMD_SELECT(x)		((x) << S_FW_LDST_CMD_SELECT)
+#define G_FW_LDST_CMD_SELECT(x)		\
     (((x) >> S_FW_LDST_CMD_SELECT) & M_FW_LDST_CMD_SELECT)
 
 #define S_FW_LDST_CMD_NACCESS		0
@@ -3192,16 +4363,16 @@
 #define G_FW_LDST_CMD_NACCESS(x)	\
     (((x) >> S_FW_LDST_CMD_NACCESS) & M_FW_LDST_CMD_NACCESS)
 
-#define S_FW_LDST_CMD_NSET	14
-#define M_FW_LDST_CMD_NSET	0x3
-#define V_FW_LDST_CMD_NSET(x)	((x) << S_FW_LDST_CMD_NSET)
-#define G_FW_LDST_CMD_NSET(x)	\
+#define S_FW_LDST_CMD_NSET		14
+#define M_FW_LDST_CMD_NSET		0x3
+#define V_FW_LDST_CMD_NSET(x)		((x) << S_FW_LDST_CMD_NSET)
+#define G_FW_LDST_CMD_NSET(x)		\
     (((x) >> S_FW_LDST_CMD_NSET) & M_FW_LDST_CMD_NSET)
 
-#define S_FW_LDST_CMD_PID	6
-#define M_FW_LDST_CMD_PID	0x3
-#define V_FW_LDST_CMD_PID(x)	((x) << S_FW_LDST_CMD_PID)
-#define G_FW_LDST_CMD_PID(x)	\
+#define S_FW_LDST_CMD_PID		6
+#define M_FW_LDST_CMD_PID		0x3
+#define V_FW_LDST_CMD_PID(x)		((x) << S_FW_LDST_CMD_PID)
+#define G_FW_LDST_CMD_PID(x)		\
     (((x) >> S_FW_LDST_CMD_PID) & M_FW_LDST_CMD_PID)
 
 struct fw_reset_cmd {
@@ -3211,12 +4382,12 @@
 	__be32 halt_pkd;
 };
 
-#define S_FW_RESET_CMD_HALT	31
-#define M_FW_RESET_CMD_HALT	0x1
-#define V_FW_RESET_CMD_HALT(x)	((x) << S_FW_RESET_CMD_HALT)
-#define G_FW_RESET_CMD_HALT(x)	\
+#define S_FW_RESET_CMD_HALT		31
+#define M_FW_RESET_CMD_HALT		0x1
+#define V_FW_RESET_CMD_HALT(x)		((x) << S_FW_RESET_CMD_HALT)
+#define G_FW_RESET_CMD_HALT(x)		\
     (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT)
-#define F_FW_RESET_CMD_HALT	V_FW_RESET_CMD_HALT(1U)
+#define F_FW_RESET_CMD_HALT		V_FW_RESET_CMD_HALT(1U)
 
 enum {
 	FW_HELLO_CMD_STAGE_OS		= 0,
@@ -3232,19 +4403,19 @@
 	__be32 fwrev;
 };
 
-#define S_FW_HELLO_CMD_ERR	31
-#define M_FW_HELLO_CMD_ERR	0x1
-#define V_FW_HELLO_CMD_ERR(x)	((x) << S_FW_HELLO_CMD_ERR)
-#define G_FW_HELLO_CMD_ERR(x)	\
+#define S_FW_HELLO_CMD_ERR		31
+#define M_FW_HELLO_CMD_ERR		0x1
+#define V_FW_HELLO_CMD_ERR(x)		((x) << S_FW_HELLO_CMD_ERR)
+#define G_FW_HELLO_CMD_ERR(x)		\
     (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR)
-#define F_FW_HELLO_CMD_ERR	V_FW_HELLO_CMD_ERR(1U)
+#define F_FW_HELLO_CMD_ERR		V_FW_HELLO_CMD_ERR(1U)
 
-#define S_FW_HELLO_CMD_INIT	30
-#define M_FW_HELLO_CMD_INIT	0x1
-#define V_FW_HELLO_CMD_INIT(x)	((x) << S_FW_HELLO_CMD_INIT)
-#define G_FW_HELLO_CMD_INIT(x)	\
+#define S_FW_HELLO_CMD_INIT		30
+#define M_FW_HELLO_CMD_INIT		0x1
+#define V_FW_HELLO_CMD_INIT(x)		((x) << S_FW_HELLO_CMD_INIT)
+#define G_FW_HELLO_CMD_INIT(x)		\
     (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT)
-#define F_FW_HELLO_CMD_INIT	V_FW_HELLO_CMD_INIT(1U)
+#define F_FW_HELLO_CMD_INIT		V_FW_HELLO_CMD_INIT(1U)
 
 #define S_FW_HELLO_CMD_MASTERDIS	29
 #define M_FW_HELLO_CMD_MASTERDIS	0x1
@@ -3279,10 +4450,10 @@
 #define G_FW_HELLO_CMD_MBASYNCNOT(x)	\
     (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT)
 
-#define S_FW_HELLO_CMD_STAGE	17
-#define M_FW_HELLO_CMD_STAGE	0x7
-#define V_FW_HELLO_CMD_STAGE(x)	((x) << S_FW_HELLO_CMD_STAGE)
-#define G_FW_HELLO_CMD_STAGE(x)	\
+#define S_FW_HELLO_CMD_STAGE		17
+#define M_FW_HELLO_CMD_STAGE		0x7
+#define V_FW_HELLO_CMD_STAGE(x)		((x) << S_FW_HELLO_CMD_STAGE)
+#define G_FW_HELLO_CMD_STAGE(x)		\
     (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE)
 
 #define S_FW_HELLO_CMD_CLEARINIT	16
@@ -3347,6 +4518,7 @@
 #define FW_T4VF_MPS_BASE_ADDR      0x0100
 #define FW_T4VF_PL_BASE_ADDR       0x0200
 #define FW_T4VF_MBDATA_BASE_ADDR   0x0240
+#define FW_T6VF_MBDATA_BASE_ADDR   0x0280 /* aligned to mbox size 128B */
 #define FW_T4VF_CIM_BASE_ADDR      0x0300
 
 #define FW_T4VF_REGMAP_START       0x0000
@@ -3374,6 +4546,8 @@
 	FW_CAPS_CONFIG_NIC_IDS		= 0x00000004,
 	FW_CAPS_CONFIG_NIC_UM		= 0x00000008,
 	FW_CAPS_CONFIG_NIC_UM_ISGL	= 0x00000010,
+	FW_CAPS_CONFIG_NIC_HASHFILTER	= 0x00000020,
+	FW_CAPS_CONFIG_NIC_ETHOFLD	= 0x00000040,
 };
 
 enum fw_caps_config_toe {
@@ -3392,8 +4566,16 @@
 	FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
 	FW_CAPS_CONFIG_ISCSI_INITIATOR_SSNOFLD = 0x00000010,
 	FW_CAPS_CONFIG_ISCSI_TARGET_SSNOFLD = 0x00000020,
+	FW_CAPS_CONFIG_ISCSI_T10DIF = 0x00000040,
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_CMDOFLD = 0x00000080,
+	FW_CAPS_CONFIG_ISCSI_TARGET_CMDOFLD = 0x00000100,
 };
 
+enum fw_caps_config_crypto {
+	FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
+	FW_CAPS_CONFIG_TLSKEYS = 0x00000002,
+};
+
 enum fw_caps_config_fcoe {
 	FW_CAPS_CONFIG_FCOE_INITIATOR	= 0x00000001,
 	FW_CAPS_CONFIG_FCOE_TARGET	= 0x00000002,
@@ -3403,11 +4585,12 @@
 };
 
 enum fw_memtype_cf {
-	FW_MEMTYPE_CF_EDC0		= 0x0,
-	FW_MEMTYPE_CF_EDC1		= 0x1,
-	FW_MEMTYPE_CF_EXTMEM		= 0x2,
-	FW_MEMTYPE_CF_FLASH		= 0x4,
-	FW_MEMTYPE_CF_INTERNAL		= 0x5,
+	FW_MEMTYPE_CF_EDC0		= FW_MEMTYPE_EDC0,
+	FW_MEMTYPE_CF_EDC1		= FW_MEMTYPE_EDC1,
+	FW_MEMTYPE_CF_EXTMEM		= FW_MEMTYPE_EXTMEM,
+	FW_MEMTYPE_CF_FLASH		= FW_MEMTYPE_FLASH,
+	FW_MEMTYPE_CF_INTERNAL		= FW_MEMTYPE_INTERNAL,
+	FW_MEMTYPE_CF_EXTMEM1		= FW_MEMTYPE_EXTMEM1,
 };
 
 struct fw_caps_config_cmd {
@@ -3422,7 +4605,7 @@
 	__be16 niccaps;
 	__be16 toecaps;
 	__be16 rdmacaps;
-	__be16 r4;
+	__be16 cryptocaps;
 	__be16 iscsicaps;
 	__be16 fcoecaps;
 	__be32 cfcsum;
@@ -3437,19 +4620,19 @@
     (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID)
 #define F_FW_CAPS_CONFIG_CMD_CFVALID	V_FW_CAPS_CONFIG_CMD_CFVALID(1U)
 
-#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF		24
-#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF		0x7
-#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x)	\
+#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF	24
+#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF	0x7
+#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
     ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
-#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x)	\
+#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
     (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \
      M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
 
-#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF	16
-#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF	0xff
-#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x)	\
+#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16
+#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff
+#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
     ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
-#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x)	\
+#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
     (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \
      M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
 
@@ -3461,6 +4644,7 @@
 	FW_PARAMS_MNEM_PFVF		= 2,	/* function params */
 	FW_PARAMS_MNEM_REG		= 3,	/* limited register access */
 	FW_PARAMS_MNEM_DMAQ		= 4,	/* dma queue params */
+	FW_PARAMS_MNEM_CHNET		= 5,	/* chnet params */
 	FW_PARAMS_MNEM_LAST
 };
 
@@ -3482,16 +4666,69 @@
 	FW_PARAMS_PARAM_DEV_INTFVER_ISCSIPDU = 0x08,
 	FW_PARAMS_PARAM_DEV_INTFVER_ISCSI = 0x09,
 	FW_PARAMS_PARAM_DEV_INTFVER_FCOE = 0x0A,
-	FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
-	FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
-	FW_PARAMS_PARAM_DEV_CF = 0x0D,
-	FW_PARAMS_PARAM_DEV_BYPASS = 0x0E,
-	FW_PARAMS_PARAM_DEV_PHYFW = 0x0F,
-	FW_PARAMS_PARAM_DEV_LOAD = 0x10,
-	FW_PARAMS_PARAM_DEV_DIAG = 0x11,
+	FW_PARAMS_PARAM_DEV_FWREV	= 0x0B,
+	FW_PARAMS_PARAM_DEV_TPREV	= 0x0C,
+	FW_PARAMS_PARAM_DEV_CF		= 0x0D,
+	FW_PARAMS_PARAM_DEV_BYPASS	= 0x0E,
+	FW_PARAMS_PARAM_DEV_PHYFW	= 0x0F,
+	FW_PARAMS_PARAM_DEV_LOAD	= 0x10,
+	FW_PARAMS_PARAM_DEV_DIAG	= 0x11,
+	FW_PARAMS_PARAM_DEV_UCLK	= 0x12, /* uP clock in khz */
+	FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD
+						 */
+	FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER= 0x14,/* max supported ADAPTER IRD
+						 */
+	FW_PARAMS_PARAM_DEV_INTFVER_FCOEPDU = 0x15,
+	FW_PARAMS_PARAM_DEV_MCINIT	= 0x16,
+	FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+	FW_PARAMS_PARAM_DEV_FWCACHE	= 0x18,
+	FW_PARAMS_PARAM_DEV_RSSINFO	= 0x19,
+	FW_PARAMS_PARAM_DEV_SCFGREV	= 0x1A,
+	FW_PARAMS_PARAM_DEV_VPDREV	= 0x1B,
+	FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR	= 0x1C,
+	FW_PARAMS_PARAM_DEV_FILTER2_WR	= 0x1D,
+
+	FW_PARAMS_PARAM_DEV_MPSBGMAP	= 0x1E,
+	FW_PARAMS_PARAM_DEV_TPCHMAP	= 0x1F,
+	FW_PARAMS_PARAM_DEV_HMA_SIZE	= 0x20,
+	FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM	= 0x21,
+	FW_PARAMS_PARAM_DEV_RING_BACKBONE	= 0x22,
+	FW_PARAMS_PARAM_DEV_PPOD_EDRAM	= 0x23,
 };
 
 /*
+ * dev bypass parameters; actions and modes
+ */
+enum fw_params_param_dev_bypass {
+
+	/* actions
+	 */
+	FW_PARAMS_PARAM_DEV_BYPASS_PFAIL = 0x00,
+	FW_PARAMS_PARAM_DEV_BYPASS_CURRENT = 0x01,
+
+	/* modes
+	 */
+	FW_PARAMS_PARAM_DEV_BYPASS_NORMAL = 0x00,
+	FW_PARAMS_PARAM_DEV_BYPASS_DROP	= 0x1,
+	FW_PARAMS_PARAM_DEV_BYPASS_BYPASS = 0x2,
+};
+
+enum fw_params_param_dev_phyfw {
+	FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD = 0x00,
+	FW_PARAMS_PARAM_DEV_PHYFW_VERSION = 0x01,
+};
+
+enum fw_params_param_dev_diag {
+	FW_PARAM_DEV_DIAG_TMP		= 0x00,
+	FW_PARAM_DEV_DIAG_VDD		= 0x01,
+};
+
+enum fw_params_param_dev_fwcache {
+	FW_PARAM_DEV_FWCACHE_FLUSH	= 0x00,
+	FW_PARAM_DEV_FWCACHE_FLUSHINV	= 0x01,
+};
+
+/*
  * physical and virtual function parameters
  */
 enum fw_params_param_pfvf {
@@ -3520,6 +4757,8 @@
 	FW_PARAMS_PARAM_PFVF_SQRQ_END	= 0x16,
 	FW_PARAMS_PARAM_PFVF_CQ_START	= 0x17,
 	FW_PARAMS_PARAM_PFVF_CQ_END	= 0x18,
+	FW_PARAMS_PARAM_PFVF_SRQ_START	= 0x19,
+	FW_PARAMS_PARAM_PFVF_SRQ_END	= 0x1A,
 	FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
 	FW_PARAMS_PARAM_PFVF_VIID	= 0x24,
 	FW_PARAMS_PARAM_PFVF_CPMASK	= 0x25,
@@ -3534,7 +4773,18 @@
 	FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E,
 	FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F,
 	FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30,
-	FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+	FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+	FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
+	FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
+	FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
+        FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+	FW_PARAMS_PARAM_PFVF_RAWF_START	= 0x36,
+	FW_PARAMS_PARAM_PFVF_RAWF_END	= 0x37,
+	FW_PARAMS_PARAM_PFVF_RSSKEYINFO	= 0x38,
+	FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
+	FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+	FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_START = 0x3B,
+	FW_PARAMS_PARAM_PFVF_PPOD_EDRAM_END = 0x3C,
 };
 
 /*
@@ -3544,38 +4794,29 @@
 	FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
 	FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
 	FW_PARAMS_PARAM_DMAQ_IQ_INTIDX	= 0x02,
+	FW_PARAMS_PARAM_DMAQ_IQ_DCA	= 0x03,
 	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
 	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
 	FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
-	FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13
+	FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
+	FW_PARAMS_PARAM_DMAQ_EQ_DCA	= 0x14,
+	FW_PARAMS_PARAM_DMAQ_CONM_CTXT	= 0x20,
+	FW_PARAMS_PARAM_DMAQ_FLM_DCA	= 0x30
 };
 
 /*
- * dev bypass parameters; actions and modes
+ * chnet parameters
  */
-enum fw_params_param_dev_bypass {
-
-	/* actions
-	 */
-	FW_PARAMS_PARAM_DEV_BYPASS_PFAIL = 0x00,
-	FW_PARAMS_PARAM_DEV_BYPASS_CURRENT = 0x01,
-
-	/* modes
-	 */
-	FW_PARAMS_PARAM_DEV_BYPASS_NORMAL = 0x00,
-	FW_PARAMS_PARAM_DEV_BYPASS_DROP	= 0x1,
-	FW_PARAMS_PARAM_DEV_BYPASS_BYPASS = 0x2,
+enum fw_params_param_chnet {
+	FW_PARAMS_PARAM_CHNET_FLAGS		= 0x00,
 };
 
-enum fw_params_phyfw_actions {
-	FW_PARAMS_PARAM_PHYFW_DOWNLOAD	= 0x00,
-	FW_PARAMS_PARAM_PHYFW_VERSION	= 0x01,
+enum fw_params_param_chnet_flags {
+	FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_IPV6	= 0x1,
+	FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_DAD	= 0x2,
+	FW_PARAMS_PARAM_CHNET_FLAGS_ENABLE_MLDV2= 0x4,
 };
 
-enum fw_params_param_dev_diag {
-	FW_PARAM_DEV_DIAG_TMP = 0x00,
-};
-
 #define S_FW_PARAMS_MNEM	24
 #define M_FW_PARAMS_MNEM	0xff
 #define V_FW_PARAMS_MNEM(x)	((x) << S_FW_PARAMS_MNEM)
@@ -3612,6 +4853,45 @@
 #define G_FW_PARAMS_PARAM_YZ(x) \
     (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
 
+#define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 31
+#define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN 0x1
+#define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \
+    ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN)
+#define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN(x) \
+    (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN) & \
+	M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINTEN)
+
+#define S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 24
+#define M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT 0x3
+#define V_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \
+    ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT)
+#define G_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT(x) \
+    (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT) & \
+	M_FW_PARAMS_PARAM_DMAQ_DCA_TPHINT)
+
+#define S_FW_PARAMS_PARAM_DMAQ_DCA_ST	0
+#define M_FW_PARAMS_PARAM_DMAQ_DCA_ST	0x7ff
+#define V_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \
+    ((x) << S_FW_PARAMS_PARAM_DMAQ_DCA_ST)
+#define G_FW_PARAMS_PARAM_DMAQ_DCA_ST(x) \
+    (((x) >> S_FW_PARAMS_PARAM_DMAQ_DCA_ST) & M_FW_PARAMS_PARAM_DMAQ_DCA_ST)
+
+#define S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE	29
+#define M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE	0x7
+#define V_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x)	\
+    ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE)
+#define G_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE(x)	\
+    (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE) & \
+     M_FW_PARAMS_PARAM_DMAQ_INTIDX_QTYPE)
+
+#define S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX	0
+#define M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX	0x3ff
+#define V_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x)	\
+    ((x) << S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX)
+#define G_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX(x)	\
+    (((x) >> S_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX) & \
+     M_FW_PARAMS_PARAM_DMAQ_INTIDX_INTIDX)
+
 struct fw_params_cmd {
 	__be32 op_to_vfn;
 	__be32 retval_len16;
@@ -3621,16 +4901,16 @@
 	} param[7];
 };
 
-#define S_FW_PARAMS_CMD_PFN	8
-#define M_FW_PARAMS_CMD_PFN	0x7
-#define V_FW_PARAMS_CMD_PFN(x)	((x) << S_FW_PARAMS_CMD_PFN)
-#define G_FW_PARAMS_CMD_PFN(x)	\
+#define S_FW_PARAMS_CMD_PFN		8
+#define M_FW_PARAMS_CMD_PFN		0x7
+#define V_FW_PARAMS_CMD_PFN(x)		((x) << S_FW_PARAMS_CMD_PFN)
+#define G_FW_PARAMS_CMD_PFN(x)		\
     (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN)
 
-#define S_FW_PARAMS_CMD_VFN	0
-#define M_FW_PARAMS_CMD_VFN	0xff
-#define V_FW_PARAMS_CMD_VFN(x)	((x) << S_FW_PARAMS_CMD_VFN)
-#define G_FW_PARAMS_CMD_VFN(x)	\
+#define S_FW_PARAMS_CMD_VFN		0
+#define M_FW_PARAMS_CMD_VFN		0xff
+#define V_FW_PARAMS_CMD_VFN(x)		((x) << S_FW_PARAMS_CMD_VFN)
+#define G_FW_PARAMS_CMD_VFN(x)		\
     (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
 
 struct fw_pfvf_cmd {
@@ -3645,16 +4925,16 @@
 	__be32 r4;
 };
 
-#define S_FW_PFVF_CMD_PFN	8
-#define M_FW_PFVF_CMD_PFN	0x7
-#define V_FW_PFVF_CMD_PFN(x)	((x) << S_FW_PFVF_CMD_PFN)
-#define G_FW_PFVF_CMD_PFN(x)	\
+#define S_FW_PFVF_CMD_PFN		8
+#define M_FW_PFVF_CMD_PFN		0x7
+#define V_FW_PFVF_CMD_PFN(x)		((x) << S_FW_PFVF_CMD_PFN)
+#define G_FW_PFVF_CMD_PFN(x)		\
     (((x) >> S_FW_PFVF_CMD_PFN) & M_FW_PFVF_CMD_PFN)
 
-#define S_FW_PFVF_CMD_VFN	0
-#define M_FW_PFVF_CMD_VFN	0xff
-#define V_FW_PFVF_CMD_VFN(x)	((x) << S_FW_PFVF_CMD_VFN)
-#define G_FW_PFVF_CMD_VFN(x)	\
+#define S_FW_PFVF_CMD_VFN		0
+#define M_FW_PFVF_CMD_VFN		0xff
+#define V_FW_PFVF_CMD_VFN(x)		((x) << S_FW_PFVF_CMD_VFN)
+#define G_FW_PFVF_CMD_VFN(x)		\
     (((x) >> S_FW_PFVF_CMD_VFN) & M_FW_PFVF_CMD_VFN)
 
 #define S_FW_PFVF_CMD_NIQFLINT		20
@@ -3663,46 +4943,47 @@
 #define G_FW_PFVF_CMD_NIQFLINT(x)	\
     (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
 
-#define S_FW_PFVF_CMD_NIQ	0
-#define M_FW_PFVF_CMD_NIQ	0xfffff
-#define V_FW_PFVF_CMD_NIQ(x)	((x) << S_FW_PFVF_CMD_NIQ)
-#define G_FW_PFVF_CMD_NIQ(x)	\
+#define S_FW_PFVF_CMD_NIQ		0
+#define M_FW_PFVF_CMD_NIQ		0xfffff
+#define V_FW_PFVF_CMD_NIQ(x)		((x) << S_FW_PFVF_CMD_NIQ)
+#define G_FW_PFVF_CMD_NIQ(x)		\
     (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
 
-#define S_FW_PFVF_CMD_TYPE	31
-#define M_FW_PFVF_CMD_TYPE	0x1
-#define V_FW_PFVF_CMD_TYPE(x)	((x) << S_FW_PFVF_CMD_TYPE)
-#define G_FW_PFVF_CMD_TYPE(x)	\
+#define S_FW_PFVF_CMD_TYPE		31
+#define M_FW_PFVF_CMD_TYPE		0x1
+#define V_FW_PFVF_CMD_TYPE(x)		((x) << S_FW_PFVF_CMD_TYPE)
+#define G_FW_PFVF_CMD_TYPE(x)		\
     (((x) >> S_FW_PFVF_CMD_TYPE) & M_FW_PFVF_CMD_TYPE)
-#define F_FW_PFVF_CMD_TYPE	V_FW_PFVF_CMD_TYPE(1U)
+#define F_FW_PFVF_CMD_TYPE		V_FW_PFVF_CMD_TYPE(1U)
 
-#define S_FW_PFVF_CMD_CMASK	24
-#define M_FW_PFVF_CMD_CMASK	0xf
-#define V_FW_PFVF_CMD_CMASK(x)	((x) << S_FW_PFVF_CMD_CMASK)
-#define G_FW_PFVF_CMD_CMASK(x)	\
+#define S_FW_PFVF_CMD_CMASK		24
+#define M_FW_PFVF_CMD_CMASK		0xf
+#define V_FW_PFVF_CMD_CMASK(x)		((x) << S_FW_PFVF_CMD_CMASK)
+#define G_FW_PFVF_CMD_CMASK(x)		\
     (((x) >> S_FW_PFVF_CMD_CMASK) & M_FW_PFVF_CMD_CMASK)
 
-#define S_FW_PFVF_CMD_PMASK	20
-#define M_FW_PFVF_CMD_PMASK	0xf
-#define V_FW_PFVF_CMD_PMASK(x)	((x) << S_FW_PFVF_CMD_PMASK)
-#define G_FW_PFVF_CMD_PMASK(x)	\
+#define S_FW_PFVF_CMD_PMASK		20
+#define M_FW_PFVF_CMD_PMASK		0xf
+#define V_FW_PFVF_CMD_PMASK(x)		((x) << S_FW_PFVF_CMD_PMASK)
+#define G_FW_PFVF_CMD_PMASK(x)		\
     (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
 
-#define S_FW_PFVF_CMD_NEQ	0
-#define M_FW_PFVF_CMD_NEQ	0xfffff
-#define V_FW_PFVF_CMD_NEQ(x)	((x) << S_FW_PFVF_CMD_NEQ)
-#define G_FW_PFVF_CMD_NEQ(x)	\
+#define S_FW_PFVF_CMD_NEQ		0
+#define M_FW_PFVF_CMD_NEQ		0xfffff
+#define V_FW_PFVF_CMD_NEQ(x)		((x) << S_FW_PFVF_CMD_NEQ)
+#define G_FW_PFVF_CMD_NEQ(x)		\
     (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
 
-#define S_FW_PFVF_CMD_TC	24
-#define M_FW_PFVF_CMD_TC	0xff
-#define V_FW_PFVF_CMD_TC(x)	((x) << S_FW_PFVF_CMD_TC)
-#define G_FW_PFVF_CMD_TC(x)	(((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+#define S_FW_PFVF_CMD_TC		24
+#define M_FW_PFVF_CMD_TC		0xff
+#define V_FW_PFVF_CMD_TC(x)		((x) << S_FW_PFVF_CMD_TC)
+#define G_FW_PFVF_CMD_TC(x)		\
+    (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
 
-#define S_FW_PFVF_CMD_NVI	16
-#define M_FW_PFVF_CMD_NVI	0xff
-#define V_FW_PFVF_CMD_NVI(x)	((x) << S_FW_PFVF_CMD_NVI)
-#define G_FW_PFVF_CMD_NVI(x)	\
+#define S_FW_PFVF_CMD_NVI		16
+#define M_FW_PFVF_CMD_NVI		0xff
+#define V_FW_PFVF_CMD_NVI(x)		((x) << S_FW_PFVF_CMD_NVI)
+#define G_FW_PFVF_CMD_NVI(x)		\
     (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
 
 #define S_FW_PFVF_CMD_NEXACTF		0
@@ -3711,10 +4992,10 @@
 #define G_FW_PFVF_CMD_NEXACTF(x)	\
     (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
 
-#define S_FW_PFVF_CMD_R_CAPS	24
-#define M_FW_PFVF_CMD_R_CAPS	0xff
-#define V_FW_PFVF_CMD_R_CAPS(x)	((x) << S_FW_PFVF_CMD_R_CAPS)
-#define G_FW_PFVF_CMD_R_CAPS(x)	\
+#define S_FW_PFVF_CMD_R_CAPS		24
+#define M_FW_PFVF_CMD_R_CAPS		0xff
+#define V_FW_PFVF_CMD_R_CAPS(x)		((x) << S_FW_PFVF_CMD_R_CAPS)
+#define G_FW_PFVF_CMD_R_CAPS(x)		\
     (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
 
 #define S_FW_PFVF_CMD_WX_CAPS		16
@@ -3736,7 +5017,8 @@
  */
 enum fw_iq_type {
 	FW_IQ_TYPE_FL_INT_CAP,
-	FW_IQ_TYPE_NO_FL_INT_CAP
+	FW_IQ_TYPE_NO_FL_INT_CAP,
+	FW_IQ_TYPE_VF_CQ
 };
 
 struct fw_iq_cmd {
@@ -3760,85 +5042,90 @@
 	__be64 fl1addr;
 };
 
-#define S_FW_IQ_CMD_PFN		8
-#define M_FW_IQ_CMD_PFN		0x7
-#define V_FW_IQ_CMD_PFN(x)	((x) << S_FW_IQ_CMD_PFN)
-#define G_FW_IQ_CMD_PFN(x)	(((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
+#define S_FW_IQ_CMD_PFN			8
+#define M_FW_IQ_CMD_PFN			0x7
+#define V_FW_IQ_CMD_PFN(x)		((x) << S_FW_IQ_CMD_PFN)
+#define G_FW_IQ_CMD_PFN(x)		\
+    (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
 
-#define S_FW_IQ_CMD_VFN		0
-#define M_FW_IQ_CMD_VFN		0xff
-#define V_FW_IQ_CMD_VFN(x)	((x) << S_FW_IQ_CMD_VFN)
-#define G_FW_IQ_CMD_VFN(x)	(((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
+#define S_FW_IQ_CMD_VFN			0
+#define M_FW_IQ_CMD_VFN			0xff
+#define V_FW_IQ_CMD_VFN(x)		((x) << S_FW_IQ_CMD_VFN)
+#define G_FW_IQ_CMD_VFN(x)		\
+    (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
 
-#define S_FW_IQ_CMD_ALLOC	31
-#define M_FW_IQ_CMD_ALLOC	0x1
-#define V_FW_IQ_CMD_ALLOC(x)	((x) << S_FW_IQ_CMD_ALLOC)
-#define G_FW_IQ_CMD_ALLOC(x)	\
+#define S_FW_IQ_CMD_ALLOC		31
+#define M_FW_IQ_CMD_ALLOC		0x1
+#define V_FW_IQ_CMD_ALLOC(x)		((x) << S_FW_IQ_CMD_ALLOC)
+#define G_FW_IQ_CMD_ALLOC(x)		\
     (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC)
-#define F_FW_IQ_CMD_ALLOC	V_FW_IQ_CMD_ALLOC(1U)
+#define F_FW_IQ_CMD_ALLOC		V_FW_IQ_CMD_ALLOC(1U)
 
-#define S_FW_IQ_CMD_FREE	30
-#define M_FW_IQ_CMD_FREE	0x1
-#define V_FW_IQ_CMD_FREE(x)	((x) << S_FW_IQ_CMD_FREE)
-#define G_FW_IQ_CMD_FREE(x)	(((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
-#define F_FW_IQ_CMD_FREE	V_FW_IQ_CMD_FREE(1U)
+#define S_FW_IQ_CMD_FREE		30
+#define M_FW_IQ_CMD_FREE		0x1
+#define V_FW_IQ_CMD_FREE(x)		((x) << S_FW_IQ_CMD_FREE)
+#define G_FW_IQ_CMD_FREE(x)		\
+    (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
+#define F_FW_IQ_CMD_FREE		V_FW_IQ_CMD_FREE(1U)
 
-#define S_FW_IQ_CMD_MODIFY	29
-#define M_FW_IQ_CMD_MODIFY	0x1
-#define V_FW_IQ_CMD_MODIFY(x)	((x) << S_FW_IQ_CMD_MODIFY)
-#define G_FW_IQ_CMD_MODIFY(x)	\
+#define S_FW_IQ_CMD_MODIFY		29
+#define M_FW_IQ_CMD_MODIFY		0x1
+#define V_FW_IQ_CMD_MODIFY(x)		((x) << S_FW_IQ_CMD_MODIFY)
+#define G_FW_IQ_CMD_MODIFY(x)		\
     (((x) >> S_FW_IQ_CMD_MODIFY) & M_FW_IQ_CMD_MODIFY)
-#define F_FW_IQ_CMD_MODIFY	V_FW_IQ_CMD_MODIFY(1U)
+#define F_FW_IQ_CMD_MODIFY		V_FW_IQ_CMD_MODIFY(1U)
 
-#define S_FW_IQ_CMD_IQSTART	28
-#define M_FW_IQ_CMD_IQSTART	0x1
-#define V_FW_IQ_CMD_IQSTART(x)	((x) << S_FW_IQ_CMD_IQSTART)
-#define G_FW_IQ_CMD_IQSTART(x)	\
+#define S_FW_IQ_CMD_IQSTART		28
+#define M_FW_IQ_CMD_IQSTART		0x1
+#define V_FW_IQ_CMD_IQSTART(x)		((x) << S_FW_IQ_CMD_IQSTART)
+#define G_FW_IQ_CMD_IQSTART(x)		\
     (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART)
-#define F_FW_IQ_CMD_IQSTART	V_FW_IQ_CMD_IQSTART(1U)
+#define F_FW_IQ_CMD_IQSTART		V_FW_IQ_CMD_IQSTART(1U)
 
-#define S_FW_IQ_CMD_IQSTOP	27
-#define M_FW_IQ_CMD_IQSTOP	0x1
-#define V_FW_IQ_CMD_IQSTOP(x)	((x) << S_FW_IQ_CMD_IQSTOP)
-#define G_FW_IQ_CMD_IQSTOP(x)	\
+#define S_FW_IQ_CMD_IQSTOP		27
+#define M_FW_IQ_CMD_IQSTOP		0x1
+#define V_FW_IQ_CMD_IQSTOP(x)		((x) << S_FW_IQ_CMD_IQSTOP)
+#define G_FW_IQ_CMD_IQSTOP(x)		\
     (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP)
-#define F_FW_IQ_CMD_IQSTOP	V_FW_IQ_CMD_IQSTOP(1U)
+#define F_FW_IQ_CMD_IQSTOP		V_FW_IQ_CMD_IQSTOP(1U)
 
-#define S_FW_IQ_CMD_TYPE	29
-#define M_FW_IQ_CMD_TYPE	0x7
-#define V_FW_IQ_CMD_TYPE(x)	((x) << S_FW_IQ_CMD_TYPE)
-#define G_FW_IQ_CMD_TYPE(x)	(((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
+#define S_FW_IQ_CMD_TYPE		29
+#define M_FW_IQ_CMD_TYPE		0x7
+#define V_FW_IQ_CMD_TYPE(x)		((x) << S_FW_IQ_CMD_TYPE)
+#define G_FW_IQ_CMD_TYPE(x)		\
+    (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
 
-#define S_FW_IQ_CMD_IQASYNCH	28
-#define M_FW_IQ_CMD_IQASYNCH	0x1
-#define V_FW_IQ_CMD_IQASYNCH(x)	((x) << S_FW_IQ_CMD_IQASYNCH)
-#define G_FW_IQ_CMD_IQASYNCH(x)	\
+#define S_FW_IQ_CMD_IQASYNCH		28
+#define M_FW_IQ_CMD_IQASYNCH		0x1
+#define V_FW_IQ_CMD_IQASYNCH(x)		((x) << S_FW_IQ_CMD_IQASYNCH)
+#define G_FW_IQ_CMD_IQASYNCH(x)		\
     (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH)
-#define F_FW_IQ_CMD_IQASYNCH	V_FW_IQ_CMD_IQASYNCH(1U)
+#define F_FW_IQ_CMD_IQASYNCH		V_FW_IQ_CMD_IQASYNCH(1U)
 
-#define S_FW_IQ_CMD_VIID	16
-#define M_FW_IQ_CMD_VIID	0xfff
-#define V_FW_IQ_CMD_VIID(x)	((x) << S_FW_IQ_CMD_VIID)
-#define G_FW_IQ_CMD_VIID(x)	(((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
+#define S_FW_IQ_CMD_VIID		16
+#define M_FW_IQ_CMD_VIID		0xfff
+#define V_FW_IQ_CMD_VIID(x)		((x) << S_FW_IQ_CMD_VIID)
+#define G_FW_IQ_CMD_VIID(x)		\
+    (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
 
-#define S_FW_IQ_CMD_IQANDST	15
-#define M_FW_IQ_CMD_IQANDST	0x1
-#define V_FW_IQ_CMD_IQANDST(x)	((x) << S_FW_IQ_CMD_IQANDST)
-#define G_FW_IQ_CMD_IQANDST(x)	\
+#define S_FW_IQ_CMD_IQANDST		15
+#define M_FW_IQ_CMD_IQANDST		0x1
+#define V_FW_IQ_CMD_IQANDST(x)		((x) << S_FW_IQ_CMD_IQANDST)
+#define G_FW_IQ_CMD_IQANDST(x)		\
     (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST)
-#define F_FW_IQ_CMD_IQANDST	V_FW_IQ_CMD_IQANDST(1U)
+#define F_FW_IQ_CMD_IQANDST		V_FW_IQ_CMD_IQANDST(1U)
 
-#define S_FW_IQ_CMD_IQANUS	14
-#define M_FW_IQ_CMD_IQANUS	0x1
-#define V_FW_IQ_CMD_IQANUS(x)	((x) << S_FW_IQ_CMD_IQANUS)
-#define G_FW_IQ_CMD_IQANUS(x)	\
+#define S_FW_IQ_CMD_IQANUS		14
+#define M_FW_IQ_CMD_IQANUS		0x1
+#define V_FW_IQ_CMD_IQANUS(x)		((x) << S_FW_IQ_CMD_IQANUS)
+#define G_FW_IQ_CMD_IQANUS(x)		\
     (((x) >> S_FW_IQ_CMD_IQANUS) & M_FW_IQ_CMD_IQANUS)
-#define F_FW_IQ_CMD_IQANUS	V_FW_IQ_CMD_IQANUS(1U)
+#define F_FW_IQ_CMD_IQANUS		V_FW_IQ_CMD_IQANUS(1U)
 
-#define S_FW_IQ_CMD_IQANUD	12
-#define M_FW_IQ_CMD_IQANUD	0x3
-#define V_FW_IQ_CMD_IQANUD(x)	((x) << S_FW_IQ_CMD_IQANUD)
-#define G_FW_IQ_CMD_IQANUD(x)	\
+#define S_FW_IQ_CMD_IQANUD		12
+#define M_FW_IQ_CMD_IQANUD		0x3
+#define V_FW_IQ_CMD_IQANUD(x)		((x) << S_FW_IQ_CMD_IQANUD)
+#define G_FW_IQ_CMD_IQANUD(x)		\
     (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD)
 
 #define S_FW_IQ_CMD_IQANDSTINDEX	0
@@ -3852,7 +5139,7 @@
 #define V_FW_IQ_CMD_IQDROPRSS(x)	((x) << S_FW_IQ_CMD_IQDROPRSS)
 #define G_FW_IQ_CMD_IQDROPRSS(x)	\
     (((x) >> S_FW_IQ_CMD_IQDROPRSS) & M_FW_IQ_CMD_IQDROPRSS)
-#define F_FW_IQ_CMD_IQDROPRSS	V_FW_IQ_CMD_IQDROPRSS(1U)
+#define F_FW_IQ_CMD_IQDROPRSS		V_FW_IQ_CMD_IQDROPRSS(1U)
 
 #define S_FW_IQ_CMD_IQGTSMODE		14
 #define M_FW_IQ_CMD_IQGTSMODE		0x1
@@ -3859,25 +5146,25 @@
 #define V_FW_IQ_CMD_IQGTSMODE(x)	((x) << S_FW_IQ_CMD_IQGTSMODE)
 #define G_FW_IQ_CMD_IQGTSMODE(x)	\
     (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE)
-#define F_FW_IQ_CMD_IQGTSMODE	V_FW_IQ_CMD_IQGTSMODE(1U)
+#define F_FW_IQ_CMD_IQGTSMODE		V_FW_IQ_CMD_IQGTSMODE(1U)
 
-#define S_FW_IQ_CMD_IQPCIECH	12
-#define M_FW_IQ_CMD_IQPCIECH	0x3
-#define V_FW_IQ_CMD_IQPCIECH(x)	((x) << S_FW_IQ_CMD_IQPCIECH)
-#define G_FW_IQ_CMD_IQPCIECH(x)	\
+#define S_FW_IQ_CMD_IQPCIECH		12
+#define M_FW_IQ_CMD_IQPCIECH		0x3
+#define V_FW_IQ_CMD_IQPCIECH(x)		((x) << S_FW_IQ_CMD_IQPCIECH)
+#define G_FW_IQ_CMD_IQPCIECH(x)		\
     (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH)
 
-#define S_FW_IQ_CMD_IQDCAEN	11
-#define M_FW_IQ_CMD_IQDCAEN	0x1
-#define V_FW_IQ_CMD_IQDCAEN(x)	((x) << S_FW_IQ_CMD_IQDCAEN)
-#define G_FW_IQ_CMD_IQDCAEN(x)	\
+#define S_FW_IQ_CMD_IQDCAEN		11
+#define M_FW_IQ_CMD_IQDCAEN		0x1
+#define V_FW_IQ_CMD_IQDCAEN(x)		((x) << S_FW_IQ_CMD_IQDCAEN)
+#define G_FW_IQ_CMD_IQDCAEN(x)		\
     (((x) >> S_FW_IQ_CMD_IQDCAEN) & M_FW_IQ_CMD_IQDCAEN)
-#define F_FW_IQ_CMD_IQDCAEN	V_FW_IQ_CMD_IQDCAEN(1U)
+#define F_FW_IQ_CMD_IQDCAEN		V_FW_IQ_CMD_IQDCAEN(1U)
 
-#define S_FW_IQ_CMD_IQDCACPU	6
-#define M_FW_IQ_CMD_IQDCACPU	0x1f
-#define V_FW_IQ_CMD_IQDCACPU(x)	((x) << S_FW_IQ_CMD_IQDCACPU)
-#define G_FW_IQ_CMD_IQDCACPU(x)	\
+#define S_FW_IQ_CMD_IQDCACPU		6
+#define M_FW_IQ_CMD_IQDCACPU		0x1f
+#define V_FW_IQ_CMD_IQDCACPU(x)		((x) << S_FW_IQ_CMD_IQDCACPU)
+#define G_FW_IQ_CMD_IQDCACPU(x)		\
     (((x) >> S_FW_IQ_CMD_IQDCACPU) & M_FW_IQ_CMD_IQDCACPU)
 
 #define S_FW_IQ_CMD_IQINTCNTTHRESH	4
@@ -3886,36 +5173,39 @@
 #define G_FW_IQ_CMD_IQINTCNTTHRESH(x)	\
     (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH)
 
-#define S_FW_IQ_CMD_IQO		3
-#define M_FW_IQ_CMD_IQO		0x1
-#define V_FW_IQ_CMD_IQO(x)	((x) << S_FW_IQ_CMD_IQO)
-#define G_FW_IQ_CMD_IQO(x)	(((x) >> S_FW_IQ_CMD_IQO) & M_FW_IQ_CMD_IQO)
-#define F_FW_IQ_CMD_IQO	V_FW_IQ_CMD_IQO(1U)
+#define S_FW_IQ_CMD_IQO			3
+#define M_FW_IQ_CMD_IQO			0x1
+#define V_FW_IQ_CMD_IQO(x)		((x) << S_FW_IQ_CMD_IQO)
+#define G_FW_IQ_CMD_IQO(x)		\
+    (((x) >> S_FW_IQ_CMD_IQO) & M_FW_IQ_CMD_IQO)
+#define F_FW_IQ_CMD_IQO			V_FW_IQ_CMD_IQO(1U)
 
-#define S_FW_IQ_CMD_IQCPRIO	2
-#define M_FW_IQ_CMD_IQCPRIO	0x1
-#define V_FW_IQ_CMD_IQCPRIO(x)	((x) << S_FW_IQ_CMD_IQCPRIO)
-#define G_FW_IQ_CMD_IQCPRIO(x)	\
+#define S_FW_IQ_CMD_IQCPRIO		2
+#define M_FW_IQ_CMD_IQCPRIO		0x1
+#define V_FW_IQ_CMD_IQCPRIO(x)		((x) << S_FW_IQ_CMD_IQCPRIO)
+#define G_FW_IQ_CMD_IQCPRIO(x)		\
     (((x) >> S_FW_IQ_CMD_IQCPRIO) & M_FW_IQ_CMD_IQCPRIO)
-#define F_FW_IQ_CMD_IQCPRIO	V_FW_IQ_CMD_IQCPRIO(1U)
+#define F_FW_IQ_CMD_IQCPRIO		V_FW_IQ_CMD_IQCPRIO(1U)
 
-#define S_FW_IQ_CMD_IQESIZE	0
-#define M_FW_IQ_CMD_IQESIZE	0x3
-#define V_FW_IQ_CMD_IQESIZE(x)	((x) << S_FW_IQ_CMD_IQESIZE)
-#define G_FW_IQ_CMD_IQESIZE(x)	\
+#define S_FW_IQ_CMD_IQESIZE		0
+#define M_FW_IQ_CMD_IQESIZE		0x3
+#define V_FW_IQ_CMD_IQESIZE(x)		((x) << S_FW_IQ_CMD_IQESIZE)
+#define G_FW_IQ_CMD_IQESIZE(x)		\
     (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
 
-#define S_FW_IQ_CMD_IQNS	31
-#define M_FW_IQ_CMD_IQNS	0x1
-#define V_FW_IQ_CMD_IQNS(x)	((x) << S_FW_IQ_CMD_IQNS)
-#define G_FW_IQ_CMD_IQNS(x)	(((x) >> S_FW_IQ_CMD_IQNS) & M_FW_IQ_CMD_IQNS)
-#define F_FW_IQ_CMD_IQNS	V_FW_IQ_CMD_IQNS(1U)
+#define S_FW_IQ_CMD_IQNS		31
+#define M_FW_IQ_CMD_IQNS		0x1
+#define V_FW_IQ_CMD_IQNS(x)		((x) << S_FW_IQ_CMD_IQNS)
+#define G_FW_IQ_CMD_IQNS(x)		\
+    (((x) >> S_FW_IQ_CMD_IQNS) & M_FW_IQ_CMD_IQNS)
+#define F_FW_IQ_CMD_IQNS		V_FW_IQ_CMD_IQNS(1U)
 
-#define S_FW_IQ_CMD_IQRO	30
-#define M_FW_IQ_CMD_IQRO	0x1
-#define V_FW_IQ_CMD_IQRO(x)	((x) << S_FW_IQ_CMD_IQRO)
-#define G_FW_IQ_CMD_IQRO(x)	(((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
-#define F_FW_IQ_CMD_IQRO	V_FW_IQ_CMD_IQRO(1U)
+#define S_FW_IQ_CMD_IQRO		30
+#define M_FW_IQ_CMD_IQRO		0x1
+#define V_FW_IQ_CMD_IQRO(x)		((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x)		\
+    (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO		V_FW_IQ_CMD_IQRO(1U)
 
 #define S_FW_IQ_CMD_IQFLINTIQHSEN	28
 #define M_FW_IQ_CMD_IQFLINTIQHSEN	0x3
@@ -3943,6 +5233,13 @@
 #define G_FW_IQ_CMD_FL0CNGCHMAP(x)	\
     (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP)
 
+#define S_FW_IQ_CMD_FL0CONGDROP		16
+#define M_FW_IQ_CMD_FL0CONGDROP		0x1
+#define V_FW_IQ_CMD_FL0CONGDROP(x)	((x) << S_FW_IQ_CMD_FL0CONGDROP)
+#define G_FW_IQ_CMD_FL0CONGDROP(x)	\
+    (((x) >> S_FW_IQ_CMD_FL0CONGDROP) & M_FW_IQ_CMD_FL0CONGDROP)
+#define F_FW_IQ_CMD_FL0CONGDROP		V_FW_IQ_CMD_FL0CONGDROP(1U)
+
 #define S_FW_IQ_CMD_FL0CACHELOCK	15
 #define M_FW_IQ_CMD_FL0CACHELOCK	0x1
 #define V_FW_IQ_CMD_FL0CACHELOCK(x)	((x) << S_FW_IQ_CMD_FL0CACHELOCK)
@@ -3950,12 +5247,12 @@
     (((x) >> S_FW_IQ_CMD_FL0CACHELOCK) & M_FW_IQ_CMD_FL0CACHELOCK)
 #define F_FW_IQ_CMD_FL0CACHELOCK	V_FW_IQ_CMD_FL0CACHELOCK(1U)
 
-#define S_FW_IQ_CMD_FL0DBP	14
-#define M_FW_IQ_CMD_FL0DBP	0x1
-#define V_FW_IQ_CMD_FL0DBP(x)	((x) << S_FW_IQ_CMD_FL0DBP)
-#define G_FW_IQ_CMD_FL0DBP(x)	\
+#define S_FW_IQ_CMD_FL0DBP		14
+#define M_FW_IQ_CMD_FL0DBP		0x1
+#define V_FW_IQ_CMD_FL0DBP(x)		((x) << S_FW_IQ_CMD_FL0DBP)
+#define G_FW_IQ_CMD_FL0DBP(x)		\
     (((x) >> S_FW_IQ_CMD_FL0DBP) & M_FW_IQ_CMD_FL0DBP)
-#define F_FW_IQ_CMD_FL0DBP	V_FW_IQ_CMD_FL0DBP(1U)
+#define F_FW_IQ_CMD_FL0DBP		V_FW_IQ_CMD_FL0DBP(1U)
 
 #define S_FW_IQ_CMD_FL0DATANS		13
 #define M_FW_IQ_CMD_FL0DATANS		0x1
@@ -3962,7 +5259,7 @@
 #define V_FW_IQ_CMD_FL0DATANS(x)	((x) << S_FW_IQ_CMD_FL0DATANS)
 #define G_FW_IQ_CMD_FL0DATANS(x)	\
     (((x) >> S_FW_IQ_CMD_FL0DATANS) & M_FW_IQ_CMD_FL0DATANS)
-#define F_FW_IQ_CMD_FL0DATANS	V_FW_IQ_CMD_FL0DATANS(1U)
+#define F_FW_IQ_CMD_FL0DATANS		V_FW_IQ_CMD_FL0DATANS(1U)
 
 #define S_FW_IQ_CMD_FL0DATARO		12
 #define M_FW_IQ_CMD_FL0DATARO		0x1
@@ -3969,7 +5266,7 @@
 #define V_FW_IQ_CMD_FL0DATARO(x)	((x) << S_FW_IQ_CMD_FL0DATARO)
 #define G_FW_IQ_CMD_FL0DATARO(x)	\
     (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO)
-#define F_FW_IQ_CMD_FL0DATARO	V_FW_IQ_CMD_FL0DATARO(1U)
+#define F_FW_IQ_CMD_FL0DATARO		V_FW_IQ_CMD_FL0DATARO(1U)
 
 #define S_FW_IQ_CMD_FL0CONGCIF		11
 #define M_FW_IQ_CMD_FL0CONGCIF		0x1
@@ -3976,7 +5273,7 @@
 #define V_FW_IQ_CMD_FL0CONGCIF(x)	((x) << S_FW_IQ_CMD_FL0CONGCIF)
 #define G_FW_IQ_CMD_FL0CONGCIF(x)	\
     (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF)
-#define F_FW_IQ_CMD_FL0CONGCIF	V_FW_IQ_CMD_FL0CONGCIF(1U)
+#define F_FW_IQ_CMD_FL0CONGCIF		V_FW_IQ_CMD_FL0CONGCIF(1U)
 
 #define S_FW_IQ_CMD_FL0ONCHIP		10
 #define M_FW_IQ_CMD_FL0ONCHIP		0x1
@@ -3983,7 +5280,7 @@
 #define V_FW_IQ_CMD_FL0ONCHIP(x)	((x) << S_FW_IQ_CMD_FL0ONCHIP)
 #define G_FW_IQ_CMD_FL0ONCHIP(x)	\
     (((x) >> S_FW_IQ_CMD_FL0ONCHIP) & M_FW_IQ_CMD_FL0ONCHIP)
-#define F_FW_IQ_CMD_FL0ONCHIP	V_FW_IQ_CMD_FL0ONCHIP(1U)
+#define F_FW_IQ_CMD_FL0ONCHIP		V_FW_IQ_CMD_FL0ONCHIP(1U)
 
 #define S_FW_IQ_CMD_FL0STATUSPGNS	9
 #define M_FW_IQ_CMD_FL0STATUSPGNS	0x1
@@ -4004,7 +5301,7 @@
 #define V_FW_IQ_CMD_FL0FETCHNS(x)	((x) << S_FW_IQ_CMD_FL0FETCHNS)
 #define G_FW_IQ_CMD_FL0FETCHNS(x)	\
     (((x) >> S_FW_IQ_CMD_FL0FETCHNS) & M_FW_IQ_CMD_FL0FETCHNS)
-#define F_FW_IQ_CMD_FL0FETCHNS	V_FW_IQ_CMD_FL0FETCHNS(1U)
+#define F_FW_IQ_CMD_FL0FETCHNS		V_FW_IQ_CMD_FL0FETCHNS(1U)
 
 #define S_FW_IQ_CMD_FL0FETCHRO		6
 #define M_FW_IQ_CMD_FL0FETCHRO		0x1
@@ -4011,7 +5308,7 @@
 #define V_FW_IQ_CMD_FL0FETCHRO(x)	((x) << S_FW_IQ_CMD_FL0FETCHRO)
 #define G_FW_IQ_CMD_FL0FETCHRO(x)	\
     (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO)
-#define F_FW_IQ_CMD_FL0FETCHRO	V_FW_IQ_CMD_FL0FETCHRO(1U)
+#define F_FW_IQ_CMD_FL0FETCHRO		V_FW_IQ_CMD_FL0FETCHRO(1U)
 
 #define S_FW_IQ_CMD_FL0HOSTFCMODE	4
 #define M_FW_IQ_CMD_FL0HOSTFCMODE	0x3
@@ -4019,19 +5316,19 @@
 #define G_FW_IQ_CMD_FL0HOSTFCMODE(x)	\
     (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE)
 
-#define S_FW_IQ_CMD_FL0CPRIO	3
-#define M_FW_IQ_CMD_FL0CPRIO	0x1
-#define V_FW_IQ_CMD_FL0CPRIO(x)	((x) << S_FW_IQ_CMD_FL0CPRIO)
-#define G_FW_IQ_CMD_FL0CPRIO(x)	\
+#define S_FW_IQ_CMD_FL0CPRIO		3
+#define M_FW_IQ_CMD_FL0CPRIO		0x1
+#define V_FW_IQ_CMD_FL0CPRIO(x)		((x) << S_FW_IQ_CMD_FL0CPRIO)
+#define G_FW_IQ_CMD_FL0CPRIO(x)		\
     (((x) >> S_FW_IQ_CMD_FL0CPRIO) & M_FW_IQ_CMD_FL0CPRIO)
-#define F_FW_IQ_CMD_FL0CPRIO	V_FW_IQ_CMD_FL0CPRIO(1U)
+#define F_FW_IQ_CMD_FL0CPRIO		V_FW_IQ_CMD_FL0CPRIO(1U)
 
-#define S_FW_IQ_CMD_FL0PADEN	2
-#define M_FW_IQ_CMD_FL0PADEN	0x1
-#define V_FW_IQ_CMD_FL0PADEN(x)	((x) << S_FW_IQ_CMD_FL0PADEN)
-#define G_FW_IQ_CMD_FL0PADEN(x)	\
+#define S_FW_IQ_CMD_FL0PADEN		2
+#define M_FW_IQ_CMD_FL0PADEN		0x1
+#define V_FW_IQ_CMD_FL0PADEN(x)		((x) << S_FW_IQ_CMD_FL0PADEN)
+#define G_FW_IQ_CMD_FL0PADEN(x)		\
     (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN)
-#define F_FW_IQ_CMD_FL0PADEN	V_FW_IQ_CMD_FL0PADEN(1U)
+#define F_FW_IQ_CMD_FL0PADEN		V_FW_IQ_CMD_FL0PADEN(1U)
 
 #define S_FW_IQ_CMD_FL0PACKEN		1
 #define M_FW_IQ_CMD_FL0PACKEN		0x1
@@ -4038,7 +5335,7 @@
 #define V_FW_IQ_CMD_FL0PACKEN(x)	((x) << S_FW_IQ_CMD_FL0PACKEN)
 #define G_FW_IQ_CMD_FL0PACKEN(x)	\
     (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN)
-#define F_FW_IQ_CMD_FL0PACKEN	V_FW_IQ_CMD_FL0PACKEN(1U)
+#define F_FW_IQ_CMD_FL0PACKEN		V_FW_IQ_CMD_FL0PACKEN(1U)
 
 #define S_FW_IQ_CMD_FL0CONGEN		0
 #define M_FW_IQ_CMD_FL0CONGEN		0x1
@@ -4045,14 +5342,14 @@
 #define V_FW_IQ_CMD_FL0CONGEN(x)	((x) << S_FW_IQ_CMD_FL0CONGEN)
 #define G_FW_IQ_CMD_FL0CONGEN(x)	\
     (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN)
-#define F_FW_IQ_CMD_FL0CONGEN	V_FW_IQ_CMD_FL0CONGEN(1U)
+#define F_FW_IQ_CMD_FL0CONGEN		V_FW_IQ_CMD_FL0CONGEN(1U)
 
-#define S_FW_IQ_CMD_FL0DCAEN	15
-#define M_FW_IQ_CMD_FL0DCAEN	0x1
-#define V_FW_IQ_CMD_FL0DCAEN(x)	((x) << S_FW_IQ_CMD_FL0DCAEN)
-#define G_FW_IQ_CMD_FL0DCAEN(x)	\
+#define S_FW_IQ_CMD_FL0DCAEN		15
+#define M_FW_IQ_CMD_FL0DCAEN		0x1
+#define V_FW_IQ_CMD_FL0DCAEN(x)		((x) << S_FW_IQ_CMD_FL0DCAEN)
+#define G_FW_IQ_CMD_FL0DCAEN(x)		\
     (((x) >> S_FW_IQ_CMD_FL0DCAEN) & M_FW_IQ_CMD_FL0DCAEN)
-#define F_FW_IQ_CMD_FL0DCAEN	V_FW_IQ_CMD_FL0DCAEN(1U)
+#define F_FW_IQ_CMD_FL0DCAEN		V_FW_IQ_CMD_FL0DCAEN(1U)
 
 #define S_FW_IQ_CMD_FL0DCACPU		10
 #define M_FW_IQ_CMD_FL0DCACPU		0x1f
@@ -4060,16 +5357,16 @@
 #define G_FW_IQ_CMD_FL0DCACPU(x)	\
     (((x) >> S_FW_IQ_CMD_FL0DCACPU) & M_FW_IQ_CMD_FL0DCACPU)
 
-#define S_FW_IQ_CMD_FL0FBMIN	7
-#define M_FW_IQ_CMD_FL0FBMIN	0x7
-#define V_FW_IQ_CMD_FL0FBMIN(x)	((x) << S_FW_IQ_CMD_FL0FBMIN)
-#define G_FW_IQ_CMD_FL0FBMIN(x)	\
+#define S_FW_IQ_CMD_FL0FBMIN		7
+#define M_FW_IQ_CMD_FL0FBMIN		0x7
+#define V_FW_IQ_CMD_FL0FBMIN(x)		((x) << S_FW_IQ_CMD_FL0FBMIN)
+#define G_FW_IQ_CMD_FL0FBMIN(x)		\
     (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN)
 
-#define S_FW_IQ_CMD_FL0FBMAX	4
-#define M_FW_IQ_CMD_FL0FBMAX	0x7
-#define V_FW_IQ_CMD_FL0FBMAX(x)	((x) << S_FW_IQ_CMD_FL0FBMAX)
-#define G_FW_IQ_CMD_FL0FBMAX(x)	\
+#define S_FW_IQ_CMD_FL0FBMAX		4
+#define M_FW_IQ_CMD_FL0FBMAX		0x7
+#define V_FW_IQ_CMD_FL0FBMAX(x)		((x) << S_FW_IQ_CMD_FL0FBMAX)
+#define G_FW_IQ_CMD_FL0FBMAX(x)		\
     (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX)
 
 #define S_FW_IQ_CMD_FL0CIDXFTHRESHO	3
@@ -4091,6 +5388,13 @@
 #define G_FW_IQ_CMD_FL1CNGCHMAP(x)	\
     (((x) >> S_FW_IQ_CMD_FL1CNGCHMAP) & M_FW_IQ_CMD_FL1CNGCHMAP)
 
+#define S_FW_IQ_CMD_FL1CONGDROP		16
+#define M_FW_IQ_CMD_FL1CONGDROP		0x1
+#define V_FW_IQ_CMD_FL1CONGDROP(x)	((x) << S_FW_IQ_CMD_FL1CONGDROP)
+#define G_FW_IQ_CMD_FL1CONGDROP(x)	\
+    (((x) >> S_FW_IQ_CMD_FL1CONGDROP) & M_FW_IQ_CMD_FL1CONGDROP)
+#define F_FW_IQ_CMD_FL1CONGDROP		V_FW_IQ_CMD_FL1CONGDROP(1U)
+
 #define S_FW_IQ_CMD_FL1CACHELOCK	15
 #define M_FW_IQ_CMD_FL1CACHELOCK	0x1
 #define V_FW_IQ_CMD_FL1CACHELOCK(x)	((x) << S_FW_IQ_CMD_FL1CACHELOCK)
@@ -4098,12 +5402,12 @@
     (((x) >> S_FW_IQ_CMD_FL1CACHELOCK) & M_FW_IQ_CMD_FL1CACHELOCK)
 #define F_FW_IQ_CMD_FL1CACHELOCK	V_FW_IQ_CMD_FL1CACHELOCK(1U)
 
-#define S_FW_IQ_CMD_FL1DBP	14
-#define M_FW_IQ_CMD_FL1DBP	0x1
-#define V_FW_IQ_CMD_FL1DBP(x)	((x) << S_FW_IQ_CMD_FL1DBP)
-#define G_FW_IQ_CMD_FL1DBP(x)	\
+#define S_FW_IQ_CMD_FL1DBP		14
+#define M_FW_IQ_CMD_FL1DBP		0x1
+#define V_FW_IQ_CMD_FL1DBP(x)		((x) << S_FW_IQ_CMD_FL1DBP)
+#define G_FW_IQ_CMD_FL1DBP(x)		\
     (((x) >> S_FW_IQ_CMD_FL1DBP) & M_FW_IQ_CMD_FL1DBP)
-#define F_FW_IQ_CMD_FL1DBP	V_FW_IQ_CMD_FL1DBP(1U)
+#define F_FW_IQ_CMD_FL1DBP		V_FW_IQ_CMD_FL1DBP(1U)
 
 #define S_FW_IQ_CMD_FL1DATANS		13
 #define M_FW_IQ_CMD_FL1DATANS		0x1
@@ -4110,7 +5414,7 @@
 #define V_FW_IQ_CMD_FL1DATANS(x)	((x) << S_FW_IQ_CMD_FL1DATANS)
 #define G_FW_IQ_CMD_FL1DATANS(x)	\
     (((x) >> S_FW_IQ_CMD_FL1DATANS) & M_FW_IQ_CMD_FL1DATANS)
-#define F_FW_IQ_CMD_FL1DATANS	V_FW_IQ_CMD_FL1DATANS(1U)
+#define F_FW_IQ_CMD_FL1DATANS		V_FW_IQ_CMD_FL1DATANS(1U)
 
 #define S_FW_IQ_CMD_FL1DATARO		12
 #define M_FW_IQ_CMD_FL1DATARO		0x1
@@ -4117,7 +5421,7 @@
 #define V_FW_IQ_CMD_FL1DATARO(x)	((x) << S_FW_IQ_CMD_FL1DATARO)
 #define G_FW_IQ_CMD_FL1DATARO(x)	\
     (((x) >> S_FW_IQ_CMD_FL1DATARO) & M_FW_IQ_CMD_FL1DATARO)
-#define F_FW_IQ_CMD_FL1DATARO	V_FW_IQ_CMD_FL1DATARO(1U)
+#define F_FW_IQ_CMD_FL1DATARO		V_FW_IQ_CMD_FL1DATARO(1U)
 
 #define S_FW_IQ_CMD_FL1CONGCIF		11
 #define M_FW_IQ_CMD_FL1CONGCIF		0x1
@@ -4124,7 +5428,7 @@
 #define V_FW_IQ_CMD_FL1CONGCIF(x)	((x) << S_FW_IQ_CMD_FL1CONGCIF)
 #define G_FW_IQ_CMD_FL1CONGCIF(x)	\
     (((x) >> S_FW_IQ_CMD_FL1CONGCIF) & M_FW_IQ_CMD_FL1CONGCIF)
-#define F_FW_IQ_CMD_FL1CONGCIF	V_FW_IQ_CMD_FL1CONGCIF(1U)
+#define F_FW_IQ_CMD_FL1CONGCIF		V_FW_IQ_CMD_FL1CONGCIF(1U)
 
 #define S_FW_IQ_CMD_FL1ONCHIP		10
 #define M_FW_IQ_CMD_FL1ONCHIP		0x1
@@ -4131,7 +5435,7 @@
 #define V_FW_IQ_CMD_FL1ONCHIP(x)	((x) << S_FW_IQ_CMD_FL1ONCHIP)
 #define G_FW_IQ_CMD_FL1ONCHIP(x)	\
     (((x) >> S_FW_IQ_CMD_FL1ONCHIP) & M_FW_IQ_CMD_FL1ONCHIP)
-#define F_FW_IQ_CMD_FL1ONCHIP	V_FW_IQ_CMD_FL1ONCHIP(1U)
+#define F_FW_IQ_CMD_FL1ONCHIP		V_FW_IQ_CMD_FL1ONCHIP(1U)
 
 #define S_FW_IQ_CMD_FL1STATUSPGNS	9
 #define M_FW_IQ_CMD_FL1STATUSPGNS	0x1
@@ -4152,7 +5456,7 @@
 #define V_FW_IQ_CMD_FL1FETCHNS(x)	((x) << S_FW_IQ_CMD_FL1FETCHNS)
 #define G_FW_IQ_CMD_FL1FETCHNS(x)	\
     (((x) >> S_FW_IQ_CMD_FL1FETCHNS) & M_FW_IQ_CMD_FL1FETCHNS)
-#define F_FW_IQ_CMD_FL1FETCHNS	V_FW_IQ_CMD_FL1FETCHNS(1U)
+#define F_FW_IQ_CMD_FL1FETCHNS		V_FW_IQ_CMD_FL1FETCHNS(1U)
 
 #define S_FW_IQ_CMD_FL1FETCHRO		6
 #define M_FW_IQ_CMD_FL1FETCHRO		0x1
@@ -4159,7 +5463,7 @@
 #define V_FW_IQ_CMD_FL1FETCHRO(x)	((x) << S_FW_IQ_CMD_FL1FETCHRO)
 #define G_FW_IQ_CMD_FL1FETCHRO(x)	\
     (((x) >> S_FW_IQ_CMD_FL1FETCHRO) & M_FW_IQ_CMD_FL1FETCHRO)
-#define F_FW_IQ_CMD_FL1FETCHRO	V_FW_IQ_CMD_FL1FETCHRO(1U)
+#define F_FW_IQ_CMD_FL1FETCHRO		V_FW_IQ_CMD_FL1FETCHRO(1U)
 
 #define S_FW_IQ_CMD_FL1HOSTFCMODE	4
 #define M_FW_IQ_CMD_FL1HOSTFCMODE	0x3
@@ -4167,19 +5471,19 @@
 #define G_FW_IQ_CMD_FL1HOSTFCMODE(x)	\
     (((x) >> S_FW_IQ_CMD_FL1HOSTFCMODE) & M_FW_IQ_CMD_FL1HOSTFCMODE)
 
-#define S_FW_IQ_CMD_FL1CPRIO	3
-#define M_FW_IQ_CMD_FL1CPRIO	0x1
-#define V_FW_IQ_CMD_FL1CPRIO(x)	((x) << S_FW_IQ_CMD_FL1CPRIO)
-#define G_FW_IQ_CMD_FL1CPRIO(x)	\
+#define S_FW_IQ_CMD_FL1CPRIO		3
+#define M_FW_IQ_CMD_FL1CPRIO		0x1
+#define V_FW_IQ_CMD_FL1CPRIO(x)		((x) << S_FW_IQ_CMD_FL1CPRIO)
+#define G_FW_IQ_CMD_FL1CPRIO(x)		\
     (((x) >> S_FW_IQ_CMD_FL1CPRIO) & M_FW_IQ_CMD_FL1CPRIO)
-#define F_FW_IQ_CMD_FL1CPRIO	V_FW_IQ_CMD_FL1CPRIO(1U)
+#define F_FW_IQ_CMD_FL1CPRIO		V_FW_IQ_CMD_FL1CPRIO(1U)
 
-#define S_FW_IQ_CMD_FL1PADEN	2
-#define M_FW_IQ_CMD_FL1PADEN	0x1
-#define V_FW_IQ_CMD_FL1PADEN(x)	((x) << S_FW_IQ_CMD_FL1PADEN)
-#define G_FW_IQ_CMD_FL1PADEN(x)	\
+#define S_FW_IQ_CMD_FL1PADEN		2
+#define M_FW_IQ_CMD_FL1PADEN		0x1
+#define V_FW_IQ_CMD_FL1PADEN(x)		((x) << S_FW_IQ_CMD_FL1PADEN)
+#define G_FW_IQ_CMD_FL1PADEN(x)		\
     (((x) >> S_FW_IQ_CMD_FL1PADEN) & M_FW_IQ_CMD_FL1PADEN)
-#define F_FW_IQ_CMD_FL1PADEN	V_FW_IQ_CMD_FL1PADEN(1U)
+#define F_FW_IQ_CMD_FL1PADEN		V_FW_IQ_CMD_FL1PADEN(1U)
 
 #define S_FW_IQ_CMD_FL1PACKEN		1
 #define M_FW_IQ_CMD_FL1PACKEN		0x1
@@ -4186,7 +5490,7 @@
 #define V_FW_IQ_CMD_FL1PACKEN(x)	((x) << S_FW_IQ_CMD_FL1PACKEN)
 #define G_FW_IQ_CMD_FL1PACKEN(x)	\
     (((x) >> S_FW_IQ_CMD_FL1PACKEN) & M_FW_IQ_CMD_FL1PACKEN)
-#define F_FW_IQ_CMD_FL1PACKEN	V_FW_IQ_CMD_FL1PACKEN(1U)
+#define F_FW_IQ_CMD_FL1PACKEN		V_FW_IQ_CMD_FL1PACKEN(1U)
 
 #define S_FW_IQ_CMD_FL1CONGEN		0
 #define M_FW_IQ_CMD_FL1CONGEN		0x1
@@ -4193,14 +5497,14 @@
 #define V_FW_IQ_CMD_FL1CONGEN(x)	((x) << S_FW_IQ_CMD_FL1CONGEN)
 #define G_FW_IQ_CMD_FL1CONGEN(x)	\
     (((x) >> S_FW_IQ_CMD_FL1CONGEN) & M_FW_IQ_CMD_FL1CONGEN)
-#define F_FW_IQ_CMD_FL1CONGEN	V_FW_IQ_CMD_FL1CONGEN(1U)
+#define F_FW_IQ_CMD_FL1CONGEN		V_FW_IQ_CMD_FL1CONGEN(1U)
 
-#define S_FW_IQ_CMD_FL1DCAEN	15
-#define M_FW_IQ_CMD_FL1DCAEN	0x1
-#define V_FW_IQ_CMD_FL1DCAEN(x)	((x) << S_FW_IQ_CMD_FL1DCAEN)
-#define G_FW_IQ_CMD_FL1DCAEN(x)	\
+#define S_FW_IQ_CMD_FL1DCAEN		15
+#define M_FW_IQ_CMD_FL1DCAEN		0x1
+#define V_FW_IQ_CMD_FL1DCAEN(x)		((x) << S_FW_IQ_CMD_FL1DCAEN)
+#define G_FW_IQ_CMD_FL1DCAEN(x)		\
     (((x) >> S_FW_IQ_CMD_FL1DCAEN) & M_FW_IQ_CMD_FL1DCAEN)
-#define F_FW_IQ_CMD_FL1DCAEN	V_FW_IQ_CMD_FL1DCAEN(1U)
+#define F_FW_IQ_CMD_FL1DCAEN		V_FW_IQ_CMD_FL1DCAEN(1U)
 
 #define S_FW_IQ_CMD_FL1DCACPU		10
 #define M_FW_IQ_CMD_FL1DCACPU		0x1f
@@ -4208,16 +5512,16 @@
 #define G_FW_IQ_CMD_FL1DCACPU(x)	\
     (((x) >> S_FW_IQ_CMD_FL1DCACPU) & M_FW_IQ_CMD_FL1DCACPU)
 
-#define S_FW_IQ_CMD_FL1FBMIN	7
-#define M_FW_IQ_CMD_FL1FBMIN	0x7
-#define V_FW_IQ_CMD_FL1FBMIN(x)	((x) << S_FW_IQ_CMD_FL1FBMIN)
-#define G_FW_IQ_CMD_FL1FBMIN(x)	\
+#define S_FW_IQ_CMD_FL1FBMIN		7
+#define M_FW_IQ_CMD_FL1FBMIN		0x7
+#define V_FW_IQ_CMD_FL1FBMIN(x)		((x) << S_FW_IQ_CMD_FL1FBMIN)
+#define G_FW_IQ_CMD_FL1FBMIN(x)		\
     (((x) >> S_FW_IQ_CMD_FL1FBMIN) & M_FW_IQ_CMD_FL1FBMIN)
 
-#define S_FW_IQ_CMD_FL1FBMAX	4
-#define M_FW_IQ_CMD_FL1FBMAX	0x7
-#define V_FW_IQ_CMD_FL1FBMAX(x)	((x) << S_FW_IQ_CMD_FL1FBMAX)
-#define G_FW_IQ_CMD_FL1FBMAX(x)	\
+#define S_FW_IQ_CMD_FL1FBMAX		4
+#define M_FW_IQ_CMD_FL1FBMAX		0x7
+#define V_FW_IQ_CMD_FL1FBMAX(x)		((x) << S_FW_IQ_CMD_FL1FBMAX)
+#define G_FW_IQ_CMD_FL1FBMAX(x)		\
     (((x) >> S_FW_IQ_CMD_FL1FBMAX) & M_FW_IQ_CMD_FL1FBMAX)
 
 #define S_FW_IQ_CMD_FL1CIDXFTHRESHO	3
@@ -4243,16 +5547,16 @@
 	__be64 eqaddr;
 };
 
-#define S_FW_EQ_MNGT_CMD_PFN	8
-#define M_FW_EQ_MNGT_CMD_PFN	0x7
-#define V_FW_EQ_MNGT_CMD_PFN(x)	((x) << S_FW_EQ_MNGT_CMD_PFN)
-#define G_FW_EQ_MNGT_CMD_PFN(x)	\
+#define S_FW_EQ_MNGT_CMD_PFN		8
+#define M_FW_EQ_MNGT_CMD_PFN		0x7
+#define V_FW_EQ_MNGT_CMD_PFN(x)		((x) << S_FW_EQ_MNGT_CMD_PFN)
+#define G_FW_EQ_MNGT_CMD_PFN(x)		\
     (((x) >> S_FW_EQ_MNGT_CMD_PFN) & M_FW_EQ_MNGT_CMD_PFN)
 
-#define S_FW_EQ_MNGT_CMD_VFN	0
-#define M_FW_EQ_MNGT_CMD_VFN	0xff
-#define V_FW_EQ_MNGT_CMD_VFN(x)	((x) << S_FW_EQ_MNGT_CMD_VFN)
-#define G_FW_EQ_MNGT_CMD_VFN(x)	\
+#define S_FW_EQ_MNGT_CMD_VFN		0
+#define M_FW_EQ_MNGT_CMD_VFN		0xff
+#define V_FW_EQ_MNGT_CMD_VFN(x)		((x) << S_FW_EQ_MNGT_CMD_VFN)
+#define G_FW_EQ_MNGT_CMD_VFN(x)		\
     (((x) >> S_FW_EQ_MNGT_CMD_VFN) & M_FW_EQ_MNGT_CMD_VFN)
 
 #define S_FW_EQ_MNGT_CMD_ALLOC		31
@@ -4260,7 +5564,7 @@
 #define V_FW_EQ_MNGT_CMD_ALLOC(x)	((x) << S_FW_EQ_MNGT_CMD_ALLOC)
 #define G_FW_EQ_MNGT_CMD_ALLOC(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_ALLOC) & M_FW_EQ_MNGT_CMD_ALLOC)
-#define F_FW_EQ_MNGT_CMD_ALLOC	V_FW_EQ_MNGT_CMD_ALLOC(1U)
+#define F_FW_EQ_MNGT_CMD_ALLOC		V_FW_EQ_MNGT_CMD_ALLOC(1U)
 
 #define S_FW_EQ_MNGT_CMD_FREE		30
 #define M_FW_EQ_MNGT_CMD_FREE		0x1
@@ -4267,7 +5571,7 @@
 #define V_FW_EQ_MNGT_CMD_FREE(x)	((x) << S_FW_EQ_MNGT_CMD_FREE)
 #define G_FW_EQ_MNGT_CMD_FREE(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_FREE) & M_FW_EQ_MNGT_CMD_FREE)
-#define F_FW_EQ_MNGT_CMD_FREE	V_FW_EQ_MNGT_CMD_FREE(1U)
+#define F_FW_EQ_MNGT_CMD_FREE		V_FW_EQ_MNGT_CMD_FREE(1U)
 
 #define S_FW_EQ_MNGT_CMD_MODIFY		29
 #define M_FW_EQ_MNGT_CMD_MODIFY		0x1
@@ -4274,7 +5578,7 @@
 #define V_FW_EQ_MNGT_CMD_MODIFY(x)	((x) << S_FW_EQ_MNGT_CMD_MODIFY)
 #define G_FW_EQ_MNGT_CMD_MODIFY(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_MODIFY) & M_FW_EQ_MNGT_CMD_MODIFY)
-#define F_FW_EQ_MNGT_CMD_MODIFY	V_FW_EQ_MNGT_CMD_MODIFY(1U)
+#define F_FW_EQ_MNGT_CMD_MODIFY		V_FW_EQ_MNGT_CMD_MODIFY(1U)
 
 #define S_FW_EQ_MNGT_CMD_EQSTART	28
 #define M_FW_EQ_MNGT_CMD_EQSTART	0x1
@@ -4288,7 +5592,7 @@
 #define V_FW_EQ_MNGT_CMD_EQSTOP(x)	((x) << S_FW_EQ_MNGT_CMD_EQSTOP)
 #define G_FW_EQ_MNGT_CMD_EQSTOP(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
-#define F_FW_EQ_MNGT_CMD_EQSTOP	V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+#define F_FW_EQ_MNGT_CMD_EQSTOP		V_FW_EQ_MNGT_CMD_EQSTOP(1U)
 
 #define S_FW_EQ_MNGT_CMD_CMPLIQID	20
 #define M_FW_EQ_MNGT_CMD_CMPLIQID	0xfff
@@ -4354,7 +5658,7 @@
 #define V_FW_EQ_MNGT_CMD_CPRIO(x)	((x) << S_FW_EQ_MNGT_CMD_CPRIO)
 #define G_FW_EQ_MNGT_CMD_CPRIO(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_CPRIO) & M_FW_EQ_MNGT_CMD_CPRIO)
-#define F_FW_EQ_MNGT_CMD_CPRIO	V_FW_EQ_MNGT_CMD_CPRIO(1U)
+#define F_FW_EQ_MNGT_CMD_CPRIO		V_FW_EQ_MNGT_CMD_CPRIO(1U)
 
 #define S_FW_EQ_MNGT_CMD_ONCHIP		18
 #define M_FW_EQ_MNGT_CMD_ONCHIP		0x1
@@ -4361,7 +5665,7 @@
 #define V_FW_EQ_MNGT_CMD_ONCHIP(x)	((x) << S_FW_EQ_MNGT_CMD_ONCHIP)
 #define G_FW_EQ_MNGT_CMD_ONCHIP(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_ONCHIP) & M_FW_EQ_MNGT_CMD_ONCHIP)
-#define F_FW_EQ_MNGT_CMD_ONCHIP	V_FW_EQ_MNGT_CMD_ONCHIP(1U)
+#define F_FW_EQ_MNGT_CMD_ONCHIP		V_FW_EQ_MNGT_CMD_ONCHIP(1U)
 
 #define S_FW_EQ_MNGT_CMD_PCIECHN	16
 #define M_FW_EQ_MNGT_CMD_PCIECHN	0x3
@@ -4380,7 +5684,7 @@
 #define V_FW_EQ_MNGT_CMD_DCAEN(x)	((x) << S_FW_EQ_MNGT_CMD_DCAEN)
 #define G_FW_EQ_MNGT_CMD_DCAEN(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_DCAEN) & M_FW_EQ_MNGT_CMD_DCAEN)
-#define F_FW_EQ_MNGT_CMD_DCAEN	V_FW_EQ_MNGT_CMD_DCAEN(1U)
+#define F_FW_EQ_MNGT_CMD_DCAEN		V_FW_EQ_MNGT_CMD_DCAEN(1U)
 
 #define S_FW_EQ_MNGT_CMD_DCACPU		26
 #define M_FW_EQ_MNGT_CMD_DCACPU		0x1f
@@ -4400,11 +5704,11 @@
 #define G_FW_EQ_MNGT_CMD_FBMAX(x)	\
     (((x) >> S_FW_EQ_MNGT_CMD_FBMAX) & M_FW_EQ_MNGT_CMD_FBMAX)
 
-#define S_FW_EQ_MNGT_CMD_CIDXFTHRESHO		19
-#define M_FW_EQ_MNGT_CMD_CIDXFTHRESHO		0x1
-#define V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x)	\
+#define S_FW_EQ_MNGT_CMD_CIDXFTHRESHO	19
+#define M_FW_EQ_MNGT_CMD_CIDXFTHRESHO	0x1
+#define V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \
     ((x) << S_FW_EQ_MNGT_CMD_CIDXFTHRESHO)
-#define G_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x)	\
+#define G_FW_EQ_MNGT_CMD_CIDXFTHRESHO(x) \
     (((x) >> S_FW_EQ_MNGT_CMD_CIDXFTHRESHO) & M_FW_EQ_MNGT_CMD_CIDXFTHRESHO)
 #define F_FW_EQ_MNGT_CMD_CIDXFTHRESHO	V_FW_EQ_MNGT_CMD_CIDXFTHRESHO(1U)
 
@@ -4428,21 +5732,21 @@
 	__be32 fetchszm_to_iqid;
 	__be32 dcaen_to_eqsize;
 	__be64 eqaddr;
-	__be32 viid_pkd;
+	__be32 autoequiqe_to_viid;
 	__be32 r8_lo;
 	__be64 r9;
 };
 
-#define S_FW_EQ_ETH_CMD_PFN	8
-#define M_FW_EQ_ETH_CMD_PFN	0x7
-#define V_FW_EQ_ETH_CMD_PFN(x)	((x) << S_FW_EQ_ETH_CMD_PFN)
-#define G_FW_EQ_ETH_CMD_PFN(x)	\
+#define S_FW_EQ_ETH_CMD_PFN		8
+#define M_FW_EQ_ETH_CMD_PFN		0x7
+#define V_FW_EQ_ETH_CMD_PFN(x)		((x) << S_FW_EQ_ETH_CMD_PFN)
+#define G_FW_EQ_ETH_CMD_PFN(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN)
 
-#define S_FW_EQ_ETH_CMD_VFN	0
-#define M_FW_EQ_ETH_CMD_VFN	0xff
-#define V_FW_EQ_ETH_CMD_VFN(x)	((x) << S_FW_EQ_ETH_CMD_VFN)
-#define G_FW_EQ_ETH_CMD_VFN(x)	\
+#define S_FW_EQ_ETH_CMD_VFN		0
+#define M_FW_EQ_ETH_CMD_VFN		0xff
+#define V_FW_EQ_ETH_CMD_VFN(x)		((x) << S_FW_EQ_ETH_CMD_VFN)
+#define G_FW_EQ_ETH_CMD_VFN(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN)
 
 #define S_FW_EQ_ETH_CMD_ALLOC		31
@@ -4450,14 +5754,14 @@
 #define V_FW_EQ_ETH_CMD_ALLOC(x)	((x) << S_FW_EQ_ETH_CMD_ALLOC)
 #define G_FW_EQ_ETH_CMD_ALLOC(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC)
-#define F_FW_EQ_ETH_CMD_ALLOC	V_FW_EQ_ETH_CMD_ALLOC(1U)
+#define F_FW_EQ_ETH_CMD_ALLOC		V_FW_EQ_ETH_CMD_ALLOC(1U)
 
-#define S_FW_EQ_ETH_CMD_FREE	30
-#define M_FW_EQ_ETH_CMD_FREE	0x1
-#define V_FW_EQ_ETH_CMD_FREE(x)	((x) << S_FW_EQ_ETH_CMD_FREE)
-#define G_FW_EQ_ETH_CMD_FREE(x)	\
+#define S_FW_EQ_ETH_CMD_FREE		30
+#define M_FW_EQ_ETH_CMD_FREE		0x1
+#define V_FW_EQ_ETH_CMD_FREE(x)		((x) << S_FW_EQ_ETH_CMD_FREE)
+#define G_FW_EQ_ETH_CMD_FREE(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE)
-#define F_FW_EQ_ETH_CMD_FREE	V_FW_EQ_ETH_CMD_FREE(1U)
+#define F_FW_EQ_ETH_CMD_FREE		V_FW_EQ_ETH_CMD_FREE(1U)
 
 #define S_FW_EQ_ETH_CMD_MODIFY		29
 #define M_FW_EQ_ETH_CMD_MODIFY		0x1
@@ -4464,7 +5768,7 @@
 #define V_FW_EQ_ETH_CMD_MODIFY(x)	((x) << S_FW_EQ_ETH_CMD_MODIFY)
 #define G_FW_EQ_ETH_CMD_MODIFY(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_MODIFY) & M_FW_EQ_ETH_CMD_MODIFY)
-#define F_FW_EQ_ETH_CMD_MODIFY	V_FW_EQ_ETH_CMD_MODIFY(1U)
+#define F_FW_EQ_ETH_CMD_MODIFY		V_FW_EQ_ETH_CMD_MODIFY(1U)
 
 #define S_FW_EQ_ETH_CMD_EQSTART		28
 #define M_FW_EQ_ETH_CMD_EQSTART		0x1
@@ -4471,7 +5775,7 @@
 #define V_FW_EQ_ETH_CMD_EQSTART(x)	((x) << S_FW_EQ_ETH_CMD_EQSTART)
 #define G_FW_EQ_ETH_CMD_EQSTART(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART)
-#define F_FW_EQ_ETH_CMD_EQSTART	V_FW_EQ_ETH_CMD_EQSTART(1U)
+#define F_FW_EQ_ETH_CMD_EQSTART		V_FW_EQ_ETH_CMD_EQSTART(1U)
 
 #define S_FW_EQ_ETH_CMD_EQSTOP		27
 #define M_FW_EQ_ETH_CMD_EQSTOP		0x1
@@ -4478,12 +5782,12 @@
 #define V_FW_EQ_ETH_CMD_EQSTOP(x)	((x) << S_FW_EQ_ETH_CMD_EQSTOP)
 #define G_FW_EQ_ETH_CMD_EQSTOP(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
-#define F_FW_EQ_ETH_CMD_EQSTOP	V_FW_EQ_ETH_CMD_EQSTOP(1U)
+#define F_FW_EQ_ETH_CMD_EQSTOP		V_FW_EQ_ETH_CMD_EQSTOP(1U)
 
-#define S_FW_EQ_ETH_CMD_EQID	0
-#define M_FW_EQ_ETH_CMD_EQID	0xfffff
-#define V_FW_EQ_ETH_CMD_EQID(x)	((x) << S_FW_EQ_ETH_CMD_EQID)
-#define G_FW_EQ_ETH_CMD_EQID(x)	\
+#define S_FW_EQ_ETH_CMD_EQID		0
+#define M_FW_EQ_ETH_CMD_EQID		0xfffff
+#define V_FW_EQ_ETH_CMD_EQID(x)		((x) << S_FW_EQ_ETH_CMD_EQID)
+#define G_FW_EQ_ETH_CMD_EQID(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
 
 #define S_FW_EQ_ETH_CMD_PHYSEQID	0
@@ -4518,7 +5822,7 @@
 #define V_FW_EQ_ETH_CMD_FETCHNS(x)	((x) << S_FW_EQ_ETH_CMD_FETCHNS)
 #define G_FW_EQ_ETH_CMD_FETCHNS(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_FETCHNS) & M_FW_EQ_ETH_CMD_FETCHNS)
-#define F_FW_EQ_ETH_CMD_FETCHNS	V_FW_EQ_ETH_CMD_FETCHNS(1U)
+#define F_FW_EQ_ETH_CMD_FETCHNS		V_FW_EQ_ETH_CMD_FETCHNS(1U)
 
 #define S_FW_EQ_ETH_CMD_FETCHRO		22
 #define M_FW_EQ_ETH_CMD_FETCHRO		0x1
@@ -4525,7 +5829,7 @@
 #define V_FW_EQ_ETH_CMD_FETCHRO(x)	((x) << S_FW_EQ_ETH_CMD_FETCHRO)
 #define G_FW_EQ_ETH_CMD_FETCHRO(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO)
-#define F_FW_EQ_ETH_CMD_FETCHRO	V_FW_EQ_ETH_CMD_FETCHRO(1U)
+#define F_FW_EQ_ETH_CMD_FETCHRO		V_FW_EQ_ETH_CMD_FETCHRO(1U)
 
 #define S_FW_EQ_ETH_CMD_HOSTFCMODE	20
 #define M_FW_EQ_ETH_CMD_HOSTFCMODE	0x3
@@ -4538,7 +5842,7 @@
 #define V_FW_EQ_ETH_CMD_CPRIO(x)	((x) << S_FW_EQ_ETH_CMD_CPRIO)
 #define G_FW_EQ_ETH_CMD_CPRIO(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_CPRIO) & M_FW_EQ_ETH_CMD_CPRIO)
-#define F_FW_EQ_ETH_CMD_CPRIO	V_FW_EQ_ETH_CMD_CPRIO(1U)
+#define F_FW_EQ_ETH_CMD_CPRIO		V_FW_EQ_ETH_CMD_CPRIO(1U)
 
 #define S_FW_EQ_ETH_CMD_ONCHIP		18
 #define M_FW_EQ_ETH_CMD_ONCHIP		0x1
@@ -4545,7 +5849,7 @@
 #define V_FW_EQ_ETH_CMD_ONCHIP(x)	((x) << S_FW_EQ_ETH_CMD_ONCHIP)
 #define G_FW_EQ_ETH_CMD_ONCHIP(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_ONCHIP) & M_FW_EQ_ETH_CMD_ONCHIP)
-#define F_FW_EQ_ETH_CMD_ONCHIP	V_FW_EQ_ETH_CMD_ONCHIP(1U)
+#define F_FW_EQ_ETH_CMD_ONCHIP		V_FW_EQ_ETH_CMD_ONCHIP(1U)
 
 #define S_FW_EQ_ETH_CMD_PCIECHN		16
 #define M_FW_EQ_ETH_CMD_PCIECHN		0x3
@@ -4553,10 +5857,10 @@
 #define G_FW_EQ_ETH_CMD_PCIECHN(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN)
 
-#define S_FW_EQ_ETH_CMD_IQID	0
-#define M_FW_EQ_ETH_CMD_IQID	0xffff
-#define V_FW_EQ_ETH_CMD_IQID(x)	((x) << S_FW_EQ_ETH_CMD_IQID)
-#define G_FW_EQ_ETH_CMD_IQID(x)	\
+#define S_FW_EQ_ETH_CMD_IQID		0
+#define M_FW_EQ_ETH_CMD_IQID		0xffff
+#define V_FW_EQ_ETH_CMD_IQID(x)		((x) << S_FW_EQ_ETH_CMD_IQID)
+#define G_FW_EQ_ETH_CMD_IQID(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID)
 
 #define S_FW_EQ_ETH_CMD_DCAEN		31
@@ -4564,7 +5868,7 @@
 #define V_FW_EQ_ETH_CMD_DCAEN(x)	((x) << S_FW_EQ_ETH_CMD_DCAEN)
 #define G_FW_EQ_ETH_CMD_DCAEN(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_DCAEN) & M_FW_EQ_ETH_CMD_DCAEN)
-#define F_FW_EQ_ETH_CMD_DCAEN	V_FW_EQ_ETH_CMD_DCAEN(1U)
+#define F_FW_EQ_ETH_CMD_DCAEN		V_FW_EQ_ETH_CMD_DCAEN(1U)
 
 #define S_FW_EQ_ETH_CMD_DCACPU		26
 #define M_FW_EQ_ETH_CMD_DCACPU		0x1f
@@ -4603,10 +5907,24 @@
 #define G_FW_EQ_ETH_CMD_EQSIZE(x)	\
     (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE)
 
-#define S_FW_EQ_ETH_CMD_VIID	16
-#define M_FW_EQ_ETH_CMD_VIID	0xfff
-#define V_FW_EQ_ETH_CMD_VIID(x)	((x) << S_FW_EQ_ETH_CMD_VIID)
-#define G_FW_EQ_ETH_CMD_VIID(x)	\
+#define S_FW_EQ_ETH_CMD_AUTOEQUIQE	31
+#define M_FW_EQ_ETH_CMD_AUTOEQUIQE	0x1
+#define V_FW_EQ_ETH_CMD_AUTOEQUIQE(x)	((x) << S_FW_EQ_ETH_CMD_AUTOEQUIQE)
+#define G_FW_EQ_ETH_CMD_AUTOEQUIQE(x)	\
+    (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUIQE) & M_FW_EQ_ETH_CMD_AUTOEQUIQE)
+#define F_FW_EQ_ETH_CMD_AUTOEQUIQE	V_FW_EQ_ETH_CMD_AUTOEQUIQE(1U)
+
+#define S_FW_EQ_ETH_CMD_AUTOEQUEQE	30
+#define M_FW_EQ_ETH_CMD_AUTOEQUEQE	0x1
+#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x)	((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x)	\
+    (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define F_FW_EQ_ETH_CMD_AUTOEQUEQE	V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U)
+
+#define S_FW_EQ_ETH_CMD_VIID		16
+#define M_FW_EQ_ETH_CMD_VIID		0xfff
+#define V_FW_EQ_ETH_CMD_VIID(x)		((x) << S_FW_EQ_ETH_CMD_VIID)
+#define G_FW_EQ_ETH_CMD_VIID(x)		\
     (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
 
 struct fw_eq_ctrl_cmd {
@@ -4619,16 +5937,16 @@
 	__be64 eqaddr;
 };
 
-#define S_FW_EQ_CTRL_CMD_PFN	8
-#define M_FW_EQ_CTRL_CMD_PFN	0x7
-#define V_FW_EQ_CTRL_CMD_PFN(x)	((x) << S_FW_EQ_CTRL_CMD_PFN)
-#define G_FW_EQ_CTRL_CMD_PFN(x)	\
+#define S_FW_EQ_CTRL_CMD_PFN		8
+#define M_FW_EQ_CTRL_CMD_PFN		0x7
+#define V_FW_EQ_CTRL_CMD_PFN(x)		((x) << S_FW_EQ_CTRL_CMD_PFN)
+#define G_FW_EQ_CTRL_CMD_PFN(x)		\
     (((x) >> S_FW_EQ_CTRL_CMD_PFN) & M_FW_EQ_CTRL_CMD_PFN)
 
-#define S_FW_EQ_CTRL_CMD_VFN	0
-#define M_FW_EQ_CTRL_CMD_VFN	0xff
-#define V_FW_EQ_CTRL_CMD_VFN(x)	((x) << S_FW_EQ_CTRL_CMD_VFN)
-#define G_FW_EQ_CTRL_CMD_VFN(x)	\
+#define S_FW_EQ_CTRL_CMD_VFN		0
+#define M_FW_EQ_CTRL_CMD_VFN		0xff
+#define V_FW_EQ_CTRL_CMD_VFN(x)		((x) << S_FW_EQ_CTRL_CMD_VFN)
+#define G_FW_EQ_CTRL_CMD_VFN(x)		\
     (((x) >> S_FW_EQ_CTRL_CMD_VFN) & M_FW_EQ_CTRL_CMD_VFN)
 
 #define S_FW_EQ_CTRL_CMD_ALLOC		31
@@ -4636,7 +5954,7 @@
 #define V_FW_EQ_CTRL_CMD_ALLOC(x)	((x) << S_FW_EQ_CTRL_CMD_ALLOC)
 #define G_FW_EQ_CTRL_CMD_ALLOC(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_ALLOC) & M_FW_EQ_CTRL_CMD_ALLOC)
-#define F_FW_EQ_CTRL_CMD_ALLOC	V_FW_EQ_CTRL_CMD_ALLOC(1U)
+#define F_FW_EQ_CTRL_CMD_ALLOC		V_FW_EQ_CTRL_CMD_ALLOC(1U)
 
 #define S_FW_EQ_CTRL_CMD_FREE		30
 #define M_FW_EQ_CTRL_CMD_FREE		0x1
@@ -4643,7 +5961,7 @@
 #define V_FW_EQ_CTRL_CMD_FREE(x)	((x) << S_FW_EQ_CTRL_CMD_FREE)
 #define G_FW_EQ_CTRL_CMD_FREE(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_FREE) & M_FW_EQ_CTRL_CMD_FREE)
-#define F_FW_EQ_CTRL_CMD_FREE	V_FW_EQ_CTRL_CMD_FREE(1U)
+#define F_FW_EQ_CTRL_CMD_FREE		V_FW_EQ_CTRL_CMD_FREE(1U)
 
 #define S_FW_EQ_CTRL_CMD_MODIFY		29
 #define M_FW_EQ_CTRL_CMD_MODIFY		0x1
@@ -4650,7 +5968,7 @@
 #define V_FW_EQ_CTRL_CMD_MODIFY(x)	((x) << S_FW_EQ_CTRL_CMD_MODIFY)
 #define G_FW_EQ_CTRL_CMD_MODIFY(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_MODIFY) & M_FW_EQ_CTRL_CMD_MODIFY)
-#define F_FW_EQ_CTRL_CMD_MODIFY	V_FW_EQ_CTRL_CMD_MODIFY(1U)
+#define F_FW_EQ_CTRL_CMD_MODIFY		V_FW_EQ_CTRL_CMD_MODIFY(1U)
 
 #define S_FW_EQ_CTRL_CMD_EQSTART	28
 #define M_FW_EQ_CTRL_CMD_EQSTART	0x1
@@ -4664,7 +5982,7 @@
 #define V_FW_EQ_CTRL_CMD_EQSTOP(x)	((x) << S_FW_EQ_CTRL_CMD_EQSTOP)
 #define G_FW_EQ_CTRL_CMD_EQSTOP(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
-#define F_FW_EQ_CTRL_CMD_EQSTOP	V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+#define F_FW_EQ_CTRL_CMD_EQSTOP		V_FW_EQ_CTRL_CMD_EQSTOP(1U)
 
 #define S_FW_EQ_CTRL_CMD_CMPLIQID	20
 #define M_FW_EQ_CTRL_CMD_CMPLIQID	0xfff
@@ -4730,7 +6048,7 @@
 #define V_FW_EQ_CTRL_CMD_CPRIO(x)	((x) << S_FW_EQ_CTRL_CMD_CPRIO)
 #define G_FW_EQ_CTRL_CMD_CPRIO(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_CPRIO) & M_FW_EQ_CTRL_CMD_CPRIO)
-#define F_FW_EQ_CTRL_CMD_CPRIO	V_FW_EQ_CTRL_CMD_CPRIO(1U)
+#define F_FW_EQ_CTRL_CMD_CPRIO		V_FW_EQ_CTRL_CMD_CPRIO(1U)
 
 #define S_FW_EQ_CTRL_CMD_ONCHIP		18
 #define M_FW_EQ_CTRL_CMD_ONCHIP		0x1
@@ -4737,7 +6055,7 @@
 #define V_FW_EQ_CTRL_CMD_ONCHIP(x)	((x) << S_FW_EQ_CTRL_CMD_ONCHIP)
 #define G_FW_EQ_CTRL_CMD_ONCHIP(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_ONCHIP) & M_FW_EQ_CTRL_CMD_ONCHIP)
-#define F_FW_EQ_CTRL_CMD_ONCHIP	V_FW_EQ_CTRL_CMD_ONCHIP(1U)
+#define F_FW_EQ_CTRL_CMD_ONCHIP		V_FW_EQ_CTRL_CMD_ONCHIP(1U)
 
 #define S_FW_EQ_CTRL_CMD_PCIECHN	16
 #define M_FW_EQ_CTRL_CMD_PCIECHN	0x3
@@ -4756,7 +6074,7 @@
 #define V_FW_EQ_CTRL_CMD_DCAEN(x)	((x) << S_FW_EQ_CTRL_CMD_DCAEN)
 #define G_FW_EQ_CTRL_CMD_DCAEN(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_DCAEN) & M_FW_EQ_CTRL_CMD_DCAEN)
-#define F_FW_EQ_CTRL_CMD_DCAEN	V_FW_EQ_CTRL_CMD_DCAEN(1U)
+#define F_FW_EQ_CTRL_CMD_DCAEN		V_FW_EQ_CTRL_CMD_DCAEN(1U)
 
 #define S_FW_EQ_CTRL_CMD_DCACPU		26
 #define M_FW_EQ_CTRL_CMD_DCACPU		0x1f
@@ -4776,11 +6094,11 @@
 #define G_FW_EQ_CTRL_CMD_FBMAX(x)	\
     (((x) >> S_FW_EQ_CTRL_CMD_FBMAX) & M_FW_EQ_CTRL_CMD_FBMAX)
 
-#define S_FW_EQ_CTRL_CMD_CIDXFTHRESHO		19
-#define M_FW_EQ_CTRL_CMD_CIDXFTHRESHO		0x1
-#define V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x)	\
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESHO	19
+#define M_FW_EQ_CTRL_CMD_CIDXFTHRESHO	0x1
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \
     ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESHO)
-#define G_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x)	\
+#define G_FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) \
     (((x) >> S_FW_EQ_CTRL_CMD_CIDXFTHRESHO) & M_FW_EQ_CTRL_CMD_CIDXFTHRESHO)
 #define F_FW_EQ_CTRL_CMD_CIDXFTHRESHO	V_FW_EQ_CTRL_CMD_CIDXFTHRESHO(1U)
 
@@ -4806,16 +6124,16 @@
 	__be64 eqaddr;
 };
 
-#define S_FW_EQ_OFLD_CMD_PFN	8
-#define M_FW_EQ_OFLD_CMD_PFN	0x7
-#define V_FW_EQ_OFLD_CMD_PFN(x)	((x) << S_FW_EQ_OFLD_CMD_PFN)
-#define G_FW_EQ_OFLD_CMD_PFN(x)	\
+#define S_FW_EQ_OFLD_CMD_PFN		8
+#define M_FW_EQ_OFLD_CMD_PFN		0x7
+#define V_FW_EQ_OFLD_CMD_PFN(x)		((x) << S_FW_EQ_OFLD_CMD_PFN)
+#define G_FW_EQ_OFLD_CMD_PFN(x)		\
     (((x) >> S_FW_EQ_OFLD_CMD_PFN) & M_FW_EQ_OFLD_CMD_PFN)
 
-#define S_FW_EQ_OFLD_CMD_VFN	0
-#define M_FW_EQ_OFLD_CMD_VFN	0xff
-#define V_FW_EQ_OFLD_CMD_VFN(x)	((x) << S_FW_EQ_OFLD_CMD_VFN)
-#define G_FW_EQ_OFLD_CMD_VFN(x)	\
+#define S_FW_EQ_OFLD_CMD_VFN		0
+#define M_FW_EQ_OFLD_CMD_VFN		0xff
+#define V_FW_EQ_OFLD_CMD_VFN(x)		((x) << S_FW_EQ_OFLD_CMD_VFN)
+#define G_FW_EQ_OFLD_CMD_VFN(x)		\
     (((x) >> S_FW_EQ_OFLD_CMD_VFN) & M_FW_EQ_OFLD_CMD_VFN)
 
 #define S_FW_EQ_OFLD_CMD_ALLOC		31
@@ -4823,7 +6141,7 @@
 #define V_FW_EQ_OFLD_CMD_ALLOC(x)	((x) << S_FW_EQ_OFLD_CMD_ALLOC)
 #define G_FW_EQ_OFLD_CMD_ALLOC(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_ALLOC) & M_FW_EQ_OFLD_CMD_ALLOC)
-#define F_FW_EQ_OFLD_CMD_ALLOC	V_FW_EQ_OFLD_CMD_ALLOC(1U)
+#define F_FW_EQ_OFLD_CMD_ALLOC		V_FW_EQ_OFLD_CMD_ALLOC(1U)
 
 #define S_FW_EQ_OFLD_CMD_FREE		30
 #define M_FW_EQ_OFLD_CMD_FREE		0x1
@@ -4830,7 +6148,7 @@
 #define V_FW_EQ_OFLD_CMD_FREE(x)	((x) << S_FW_EQ_OFLD_CMD_FREE)
 #define G_FW_EQ_OFLD_CMD_FREE(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_FREE) & M_FW_EQ_OFLD_CMD_FREE)
-#define F_FW_EQ_OFLD_CMD_FREE	V_FW_EQ_OFLD_CMD_FREE(1U)
+#define F_FW_EQ_OFLD_CMD_FREE		V_FW_EQ_OFLD_CMD_FREE(1U)
 
 #define S_FW_EQ_OFLD_CMD_MODIFY		29
 #define M_FW_EQ_OFLD_CMD_MODIFY		0x1
@@ -4837,7 +6155,7 @@
 #define V_FW_EQ_OFLD_CMD_MODIFY(x)	((x) << S_FW_EQ_OFLD_CMD_MODIFY)
 #define G_FW_EQ_OFLD_CMD_MODIFY(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_MODIFY) & M_FW_EQ_OFLD_CMD_MODIFY)
-#define F_FW_EQ_OFLD_CMD_MODIFY	V_FW_EQ_OFLD_CMD_MODIFY(1U)
+#define F_FW_EQ_OFLD_CMD_MODIFY		V_FW_EQ_OFLD_CMD_MODIFY(1U)
 
 #define S_FW_EQ_OFLD_CMD_EQSTART	28
 #define M_FW_EQ_OFLD_CMD_EQSTART	0x1
@@ -4851,7 +6169,7 @@
 #define V_FW_EQ_OFLD_CMD_EQSTOP(x)	((x) << S_FW_EQ_OFLD_CMD_EQSTOP)
 #define G_FW_EQ_OFLD_CMD_EQSTOP(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
-#define F_FW_EQ_OFLD_CMD_EQSTOP	V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+#define F_FW_EQ_OFLD_CMD_EQSTOP		V_FW_EQ_OFLD_CMD_EQSTOP(1U)
 
 #define S_FW_EQ_OFLD_CMD_EQID		0
 #define M_FW_EQ_OFLD_CMD_EQID		0xfffff
@@ -4911,7 +6229,7 @@
 #define V_FW_EQ_OFLD_CMD_CPRIO(x)	((x) << S_FW_EQ_OFLD_CMD_CPRIO)
 #define G_FW_EQ_OFLD_CMD_CPRIO(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_CPRIO) & M_FW_EQ_OFLD_CMD_CPRIO)
-#define F_FW_EQ_OFLD_CMD_CPRIO	V_FW_EQ_OFLD_CMD_CPRIO(1U)
+#define F_FW_EQ_OFLD_CMD_CPRIO		V_FW_EQ_OFLD_CMD_CPRIO(1U)
 
 #define S_FW_EQ_OFLD_CMD_ONCHIP		18
 #define M_FW_EQ_OFLD_CMD_ONCHIP		0x1
@@ -4918,7 +6236,7 @@
 #define V_FW_EQ_OFLD_CMD_ONCHIP(x)	((x) << S_FW_EQ_OFLD_CMD_ONCHIP)
 #define G_FW_EQ_OFLD_CMD_ONCHIP(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_ONCHIP) & M_FW_EQ_OFLD_CMD_ONCHIP)
-#define F_FW_EQ_OFLD_CMD_ONCHIP	V_FW_EQ_OFLD_CMD_ONCHIP(1U)
+#define F_FW_EQ_OFLD_CMD_ONCHIP		V_FW_EQ_OFLD_CMD_ONCHIP(1U)
 
 #define S_FW_EQ_OFLD_CMD_PCIECHN	16
 #define M_FW_EQ_OFLD_CMD_PCIECHN	0x3
@@ -4937,7 +6255,7 @@
 #define V_FW_EQ_OFLD_CMD_DCAEN(x)	((x) << S_FW_EQ_OFLD_CMD_DCAEN)
 #define G_FW_EQ_OFLD_CMD_DCAEN(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_DCAEN) & M_FW_EQ_OFLD_CMD_DCAEN)
-#define F_FW_EQ_OFLD_CMD_DCAEN	V_FW_EQ_OFLD_CMD_DCAEN(1U)
+#define F_FW_EQ_OFLD_CMD_DCAEN		V_FW_EQ_OFLD_CMD_DCAEN(1U)
 
 #define S_FW_EQ_OFLD_CMD_DCACPU		26
 #define M_FW_EQ_OFLD_CMD_DCACPU		0x1f
@@ -4957,11 +6275,11 @@
 #define G_FW_EQ_OFLD_CMD_FBMAX(x)	\
     (((x) >> S_FW_EQ_OFLD_CMD_FBMAX) & M_FW_EQ_OFLD_CMD_FBMAX)
 
-#define S_FW_EQ_OFLD_CMD_CIDXFTHRESHO		19
-#define M_FW_EQ_OFLD_CMD_CIDXFTHRESHO		0x1
-#define V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x)	\
+#define S_FW_EQ_OFLD_CMD_CIDXFTHRESHO	19
+#define M_FW_EQ_OFLD_CMD_CIDXFTHRESHO	0x1
+#define V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \
     ((x) << S_FW_EQ_OFLD_CMD_CIDXFTHRESHO)
-#define G_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x)	\
+#define G_FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) \
     (((x) >> S_FW_EQ_OFLD_CMD_CIDXFTHRESHO) & M_FW_EQ_OFLD_CMD_CIDXFTHRESHO)
 #define F_FW_EQ_OFLD_CMD_CIDXFTHRESHO	V_FW_EQ_OFLD_CMD_CIDXFTHRESHO(1U)
 
@@ -5023,74 +6341,80 @@
 	__be64 r10;
 };
 
-#define S_FW_VI_CMD_PFN		8
-#define M_FW_VI_CMD_PFN		0x7
-#define V_FW_VI_CMD_PFN(x)	((x) << S_FW_VI_CMD_PFN)
-#define G_FW_VI_CMD_PFN(x)	(((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
+#define S_FW_VI_CMD_PFN			8
+#define M_FW_VI_CMD_PFN			0x7
+#define V_FW_VI_CMD_PFN(x)		((x) << S_FW_VI_CMD_PFN)
+#define G_FW_VI_CMD_PFN(x)		\
+    (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
 
-#define S_FW_VI_CMD_VFN		0
-#define M_FW_VI_CMD_VFN		0xff
-#define V_FW_VI_CMD_VFN(x)	((x) << S_FW_VI_CMD_VFN)
-#define G_FW_VI_CMD_VFN(x)	(((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
+#define S_FW_VI_CMD_VFN			0
+#define M_FW_VI_CMD_VFN			0xff
+#define V_FW_VI_CMD_VFN(x)		((x) << S_FW_VI_CMD_VFN)
+#define G_FW_VI_CMD_VFN(x)		\
+    (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
 
-#define S_FW_VI_CMD_ALLOC	31
-#define M_FW_VI_CMD_ALLOC	0x1
-#define V_FW_VI_CMD_ALLOC(x)	((x) << S_FW_VI_CMD_ALLOC)
-#define G_FW_VI_CMD_ALLOC(x)	\
+#define S_FW_VI_CMD_ALLOC		31
+#define M_FW_VI_CMD_ALLOC		0x1
+#define V_FW_VI_CMD_ALLOC(x)		((x) << S_FW_VI_CMD_ALLOC)
+#define G_FW_VI_CMD_ALLOC(x)		\
     (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC)
-#define F_FW_VI_CMD_ALLOC	V_FW_VI_CMD_ALLOC(1U)
+#define F_FW_VI_CMD_ALLOC		V_FW_VI_CMD_ALLOC(1U)
 
-#define S_FW_VI_CMD_FREE	30
-#define M_FW_VI_CMD_FREE	0x1
-#define V_FW_VI_CMD_FREE(x)	((x) << S_FW_VI_CMD_FREE)
-#define G_FW_VI_CMD_FREE(x)	(((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
-#define F_FW_VI_CMD_FREE	V_FW_VI_CMD_FREE(1U)
+#define S_FW_VI_CMD_FREE		30
+#define M_FW_VI_CMD_FREE		0x1
+#define V_FW_VI_CMD_FREE(x)		((x) << S_FW_VI_CMD_FREE)
+#define G_FW_VI_CMD_FREE(x)		\
+    (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
+#define F_FW_VI_CMD_FREE		V_FW_VI_CMD_FREE(1U)
 
-#define S_FW_VI_CMD_TYPE	15
-#define M_FW_VI_CMD_TYPE	0x1
-#define V_FW_VI_CMD_TYPE(x)	((x) << S_FW_VI_CMD_TYPE)
-#define G_FW_VI_CMD_TYPE(x)	(((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
-#define F_FW_VI_CMD_TYPE	V_FW_VI_CMD_TYPE(1U)
+#define S_FW_VI_CMD_TYPE		15
+#define M_FW_VI_CMD_TYPE		0x1
+#define V_FW_VI_CMD_TYPE(x)		((x) << S_FW_VI_CMD_TYPE)
+#define G_FW_VI_CMD_TYPE(x)		\
+    (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
+#define F_FW_VI_CMD_TYPE		V_FW_VI_CMD_TYPE(1U)
 
-#define S_FW_VI_CMD_FUNC	12
-#define M_FW_VI_CMD_FUNC	0x7
-#define V_FW_VI_CMD_FUNC(x)	((x) << S_FW_VI_CMD_FUNC)
-#define G_FW_VI_CMD_FUNC(x)	(((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
+#define S_FW_VI_CMD_FUNC		12
+#define M_FW_VI_CMD_FUNC		0x7
+#define V_FW_VI_CMD_FUNC(x)		((x) << S_FW_VI_CMD_FUNC)
+#define G_FW_VI_CMD_FUNC(x)		\
+    (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
 
-#define S_FW_VI_CMD_VIID	0
-#define M_FW_VI_CMD_VIID	0xfff
-#define V_FW_VI_CMD_VIID(x)	((x) << S_FW_VI_CMD_VIID)
-#define G_FW_VI_CMD_VIID(x)	(((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
+#define S_FW_VI_CMD_VIID		0
+#define M_FW_VI_CMD_VIID		0xfff
+#define V_FW_VI_CMD_VIID(x)		((x) << S_FW_VI_CMD_VIID)
+#define G_FW_VI_CMD_VIID(x)		\
+    (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
 
-#define S_FW_VI_CMD_PORTID	4
-#define M_FW_VI_CMD_PORTID	0xf
-#define V_FW_VI_CMD_PORTID(x)	((x) << S_FW_VI_CMD_PORTID)
-#define G_FW_VI_CMD_PORTID(x)	\
+#define S_FW_VI_CMD_PORTID		4
+#define M_FW_VI_CMD_PORTID		0xf
+#define V_FW_VI_CMD_PORTID(x)		((x) << S_FW_VI_CMD_PORTID)
+#define G_FW_VI_CMD_PORTID(x)		\
     (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID)
 
-#define S_FW_VI_CMD_NORSS	11
-#define M_FW_VI_CMD_NORSS	0x1
-#define V_FW_VI_CMD_NORSS(x)	((x) << S_FW_VI_CMD_NORSS)
-#define G_FW_VI_CMD_NORSS(x)	\
+#define S_FW_VI_CMD_NORSS		11
+#define M_FW_VI_CMD_NORSS		0x1
+#define V_FW_VI_CMD_NORSS(x)		((x) << S_FW_VI_CMD_NORSS)
+#define G_FW_VI_CMD_NORSS(x)		\
     (((x) >> S_FW_VI_CMD_NORSS) & M_FW_VI_CMD_NORSS)
-#define F_FW_VI_CMD_NORSS	V_FW_VI_CMD_NORSS(1U)
+#define F_FW_VI_CMD_NORSS		V_FW_VI_CMD_NORSS(1U)
 
-#define S_FW_VI_CMD_RSSSIZE	0
-#define M_FW_VI_CMD_RSSSIZE	0x7ff
-#define V_FW_VI_CMD_RSSSIZE(x)	((x) << S_FW_VI_CMD_RSSSIZE)
-#define G_FW_VI_CMD_RSSSIZE(x)	\
+#define S_FW_VI_CMD_RSSSIZE		0
+#define M_FW_VI_CMD_RSSSIZE		0x7ff
+#define V_FW_VI_CMD_RSSSIZE(x)		((x) << S_FW_VI_CMD_RSSSIZE)
+#define G_FW_VI_CMD_RSSSIZE(x)		\
     (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE)
 
-#define S_FW_VI_CMD_IDSIIQ	0
-#define M_FW_VI_CMD_IDSIIQ	0x3ff
-#define V_FW_VI_CMD_IDSIIQ(x)	((x) << S_FW_VI_CMD_IDSIIQ)
-#define G_FW_VI_CMD_IDSIIQ(x)	\
+#define S_FW_VI_CMD_IDSIIQ		0
+#define M_FW_VI_CMD_IDSIIQ		0x3ff
+#define V_FW_VI_CMD_IDSIIQ(x)		((x) << S_FW_VI_CMD_IDSIIQ)
+#define G_FW_VI_CMD_IDSIIQ(x)		\
     (((x) >> S_FW_VI_CMD_IDSIIQ) & M_FW_VI_CMD_IDSIIQ)
 
-#define S_FW_VI_CMD_IDSEIQ	0
-#define M_FW_VI_CMD_IDSEIQ	0x3ff
-#define V_FW_VI_CMD_IDSEIQ(x)	((x) << S_FW_VI_CMD_IDSEIQ)
-#define G_FW_VI_CMD_IDSEIQ(x)	\
+#define S_FW_VI_CMD_IDSEIQ		0
+#define M_FW_VI_CMD_IDSEIQ		0x3ff
+#define V_FW_VI_CMD_IDSEIQ(x)		((x) << S_FW_VI_CMD_IDSEIQ)
+#define G_FW_VI_CMD_IDSEIQ(x)		\
     (((x) >> S_FW_VI_CMD_IDSEIQ) & M_FW_VI_CMD_IDSEIQ)
 
 /* Special VI_MAC command index ids */
@@ -5112,6 +6436,13 @@
 	FW_VI_MAC_R_F_ACL_CHECK
 };
 
+enum fw_vi_mac_entry_types {
+	FW_VI_MAC_TYPE_EXACTMAC,
+	FW_VI_MAC_TYPE_HASHVEC,
+	FW_VI_MAC_TYPE_RAW,
+	FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
 struct fw_vi_mac_cmd {
 	__be32 op_to_viid;
 	__be32 freemacs_to_len16;
@@ -5123,13 +6454,28 @@
 		struct fw_vi_mac_hash {
 			__be64 hashvec;
 		} hash;
+		struct fw_vi_mac_raw {
+			__be32 raw_idx_pkd;
+			__be32 data0_pkd;
+			__be32 data1[2];
+			__be64 data0m_pkd;
+			__be32 data1m[2];
+		} raw;
+		struct fw_vi_mac_vni {
+			__be16 valid_to_idx;
+			__u8   macaddr[6];
+			__be16 r7;
+			__u8   macaddr_mask[6];
+			__be32 lookup_type_to_vni;
+			__be32 vni_mask_pkd;
+		} exact_vni[2];
 	} u;
 };
 
-#define S_FW_VI_MAC_CMD_VIID	0
-#define M_FW_VI_MAC_CMD_VIID	0xfff
-#define V_FW_VI_MAC_CMD_VIID(x)	((x) << S_FW_VI_MAC_CMD_VIID)
-#define G_FW_VI_MAC_CMD_VIID(x)	\
+#define S_FW_VI_MAC_CMD_VIID		0
+#define M_FW_VI_MAC_CMD_VIID		0xfff
+#define V_FW_VI_MAC_CMD_VIID(x)		((x) << S_FW_VI_MAC_CMD_VIID)
+#define G_FW_VI_MAC_CMD_VIID(x)		\
     (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
 
 #define S_FW_VI_MAC_CMD_FREEMACS	31
@@ -5139,12 +6485,11 @@
     (((x) >> S_FW_VI_MAC_CMD_FREEMACS) & M_FW_VI_MAC_CMD_FREEMACS)
 #define F_FW_VI_MAC_CMD_FREEMACS	V_FW_VI_MAC_CMD_FREEMACS(1U)
 
-#define S_FW_VI_MAC_CMD_HASHVECEN	23
-#define M_FW_VI_MAC_CMD_HASHVECEN	0x1
-#define V_FW_VI_MAC_CMD_HASHVECEN(x)	((x) << S_FW_VI_MAC_CMD_HASHVECEN)
-#define G_FW_VI_MAC_CMD_HASHVECEN(x)	\
-    (((x) >> S_FW_VI_MAC_CMD_HASHVECEN) & M_FW_VI_MAC_CMD_HASHVECEN)
-#define F_FW_VI_MAC_CMD_HASHVECEN	V_FW_VI_MAC_CMD_HASHVECEN(1U)
+#define S_FW_VI_MAC_CMD_ENTRY_TYPE	23
+#define M_FW_VI_MAC_CMD_ENTRY_TYPE	0x7
+#define V_FW_VI_MAC_CMD_ENTRY_TYPE(x)	((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE)
+#define G_FW_VI_MAC_CMD_ENTRY_TYPE(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_ENTRY_TYPE) & M_FW_VI_MAC_CMD_ENTRY_TYPE)
 
 #define S_FW_VI_MAC_CMD_HASHUNIEN	22
 #define M_FW_VI_MAC_CMD_HASHUNIEN	0x1
@@ -5158,12 +6503,12 @@
 #define V_FW_VI_MAC_CMD_VALID(x)	((x) << S_FW_VI_MAC_CMD_VALID)
 #define G_FW_VI_MAC_CMD_VALID(x)	\
     (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID)
-#define F_FW_VI_MAC_CMD_VALID	V_FW_VI_MAC_CMD_VALID(1U)
+#define F_FW_VI_MAC_CMD_VALID		V_FW_VI_MAC_CMD_VALID(1U)
 
-#define S_FW_VI_MAC_CMD_PRIO	12
-#define M_FW_VI_MAC_CMD_PRIO	0x7
-#define V_FW_VI_MAC_CMD_PRIO(x)	((x) << S_FW_VI_MAC_CMD_PRIO)
-#define G_FW_VI_MAC_CMD_PRIO(x)	\
+#define S_FW_VI_MAC_CMD_PRIO		12
+#define M_FW_VI_MAC_CMD_PRIO		0x7
+#define V_FW_VI_MAC_CMD_PRIO(x)		((x) << S_FW_VI_MAC_CMD_PRIO)
+#define G_FW_VI_MAC_CMD_PRIO(x)		\
     (((x) >> S_FW_VI_MAC_CMD_PRIO) & M_FW_VI_MAC_CMD_PRIO)
 
 #define S_FW_VI_MAC_CMD_SMAC_RESULT	10
@@ -5172,12 +6517,50 @@
 #define G_FW_VI_MAC_CMD_SMAC_RESULT(x)	\
     (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT)
 
-#define S_FW_VI_MAC_CMD_IDX	0
-#define M_FW_VI_MAC_CMD_IDX	0x3ff
-#define V_FW_VI_MAC_CMD_IDX(x)	((x) << S_FW_VI_MAC_CMD_IDX)
-#define G_FW_VI_MAC_CMD_IDX(x)	\
+#define S_FW_VI_MAC_CMD_IDX		0
+#define M_FW_VI_MAC_CMD_IDX		0x3ff
+#define V_FW_VI_MAC_CMD_IDX(x)		((x) << S_FW_VI_MAC_CMD_IDX)
+#define G_FW_VI_MAC_CMD_IDX(x)		\
     (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
 
+#define S_FW_VI_MAC_CMD_RAW_IDX		16
+#define M_FW_VI_MAC_CMD_RAW_IDX		0xffff
+#define V_FW_VI_MAC_CMD_RAW_IDX(x)	((x) << S_FW_VI_MAC_CMD_RAW_IDX)
+#define G_FW_VI_MAC_CMD_RAW_IDX(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX)
+
+#define S_FW_VI_MAC_CMD_DATA0		0
+#define M_FW_VI_MAC_CMD_DATA0		0xffff
+#define V_FW_VI_MAC_CMD_DATA0(x)	((x) << S_FW_VI_MAC_CMD_DATA0)
+#define G_FW_VI_MAC_CMD_DATA0(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_DATA0) & M_FW_VI_MAC_CMD_DATA0)
+
+#define S_FW_VI_MAC_CMD_LOOKUP_TYPE	31
+#define M_FW_VI_MAC_CMD_LOOKUP_TYPE	0x1
+#define V_FW_VI_MAC_CMD_LOOKUP_TYPE(x)	((x) << S_FW_VI_MAC_CMD_LOOKUP_TYPE)
+#define G_FW_VI_MAC_CMD_LOOKUP_TYPE(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_LOOKUP_TYPE) & M_FW_VI_MAC_CMD_LOOKUP_TYPE)
+#define F_FW_VI_MAC_CMD_LOOKUP_TYPE	V_FW_VI_MAC_CMD_LOOKUP_TYPE(1U)
+
+#define S_FW_VI_MAC_CMD_DIP_HIT		30
+#define M_FW_VI_MAC_CMD_DIP_HIT		0x1
+#define V_FW_VI_MAC_CMD_DIP_HIT(x)	((x) << S_FW_VI_MAC_CMD_DIP_HIT)
+#define G_FW_VI_MAC_CMD_DIP_HIT(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_DIP_HIT) & M_FW_VI_MAC_CMD_DIP_HIT)
+#define F_FW_VI_MAC_CMD_DIP_HIT	V_FW_VI_MAC_CMD_DIP_HIT(1U)
+
+#define S_FW_VI_MAC_CMD_VNI	0
+#define M_FW_VI_MAC_CMD_VNI	0xffffff
+#define V_FW_VI_MAC_CMD_VNI(x)	((x) << S_FW_VI_MAC_CMD_VNI)
+#define G_FW_VI_MAC_CMD_VNI(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_VNI) & M_FW_VI_MAC_CMD_VNI)
+
+#define S_FW_VI_MAC_CMD_VNI_MASK	0
+#define M_FW_VI_MAC_CMD_VNI_MASK	0xffffff
+#define V_FW_VI_MAC_CMD_VNI_MASK(x)	((x) << S_FW_VI_MAC_CMD_VNI_MASK)
+#define G_FW_VI_MAC_CMD_VNI_MASK(x)	\
+    (((x) >> S_FW_VI_MAC_CMD_VNI_MASK) & M_FW_VI_MAC_CMD_VNI_MASK)
+
 /* T4 max MTU supported */
 #define T4_MAX_MTU_SUPPORTED	9600
 #define FW_RXMODE_MTU_NO_CHG	65535
@@ -5207,18 +6590,18 @@
 #define G_FW_VI_RXMODE_CMD_PROMISCEN(x)	\
     (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN)
 
-#define S_FW_VI_RXMODE_CMD_ALLMULTIEN		12
-#define M_FW_VI_RXMODE_CMD_ALLMULTIEN		0x3
-#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x)	\
+#define S_FW_VI_RXMODE_CMD_ALLMULTIEN	12
+#define M_FW_VI_RXMODE_CMD_ALLMULTIEN	0x3
+#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
     ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN)
-#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x)	\
+#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
     (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN)
 
-#define S_FW_VI_RXMODE_CMD_BROADCASTEN		10
-#define M_FW_VI_RXMODE_CMD_BROADCASTEN		0x3
-#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x)	\
+#define S_FW_VI_RXMODE_CMD_BROADCASTEN	10
+#define M_FW_VI_RXMODE_CMD_BROADCASTEN	0x3
+#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
     ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN)
-#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x)	\
+#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
     (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & M_FW_VI_RXMODE_CMD_BROADCASTEN)
 
 #define S_FW_VI_RXMODE_CMD_VLANEXEN	8
@@ -5246,7 +6629,7 @@
 #define V_FW_VI_ENABLE_CMD_IEN(x)	((x) << S_FW_VI_ENABLE_CMD_IEN)
 #define G_FW_VI_ENABLE_CMD_IEN(x)	\
     (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN)
-#define F_FW_VI_ENABLE_CMD_IEN	V_FW_VI_ENABLE_CMD_IEN(1U)
+#define F_FW_VI_ENABLE_CMD_IEN		V_FW_VI_ENABLE_CMD_IEN(1U)
 
 #define S_FW_VI_ENABLE_CMD_EEN		30
 #define M_FW_VI_ENABLE_CMD_EEN		0x1
@@ -5253,7 +6636,7 @@
 #define V_FW_VI_ENABLE_CMD_EEN(x)	((x) << S_FW_VI_ENABLE_CMD_EEN)
 #define G_FW_VI_ENABLE_CMD_EEN(x)	\
     (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN)
-#define F_FW_VI_ENABLE_CMD_EEN	V_FW_VI_ENABLE_CMD_EEN(1U)
+#define F_FW_VI_ENABLE_CMD_EEN		V_FW_VI_ENABLE_CMD_EEN(1U)
 
 #define S_FW_VI_ENABLE_CMD_LED		29
 #define M_FW_VI_ENABLE_CMD_LED		0x1
@@ -5260,7 +6643,7 @@
 #define V_FW_VI_ENABLE_CMD_LED(x)	((x) << S_FW_VI_ENABLE_CMD_LED)
 #define G_FW_VI_ENABLE_CMD_LED(x)	\
     (((x) >> S_FW_VI_ENABLE_CMD_LED) & M_FW_VI_ENABLE_CMD_LED)
-#define F_FW_VI_ENABLE_CMD_LED	V_FW_VI_ENABLE_CMD_LED(1U)
+#define F_FW_VI_ENABLE_CMD_LED		V_FW_VI_ENABLE_CMD_LED(1U)
 
 #define S_FW_VI_ENABLE_CMD_DCB_INFO	28
 #define M_FW_VI_ENABLE_CMD_DCB_INFO	0x1
@@ -5379,10 +6762,10 @@
 #define G_FW_VI_STATS_CMD_NSTATS(x)	\
     (((x) >> S_FW_VI_STATS_CMD_NSTATS) & M_FW_VI_STATS_CMD_NSTATS)
 
-#define S_FW_VI_STATS_CMD_IX	0
-#define M_FW_VI_STATS_CMD_IX	0x1f
-#define V_FW_VI_STATS_CMD_IX(x)	((x) << S_FW_VI_STATS_CMD_IX)
-#define G_FW_VI_STATS_CMD_IX(x)	\
+#define S_FW_VI_STATS_CMD_IX		0
+#define M_FW_VI_STATS_CMD_IX		0x1f
+#define V_FW_VI_STATS_CMD_IX(x)		((x) << S_FW_VI_STATS_CMD_IX)
+#define G_FW_VI_STATS_CMD_IX(x)		\
     (((x) >> S_FW_VI_STATS_CMD_IX) & M_FW_VI_STATS_CMD_IX)
 
 struct fw_acl_mac_cmd {
@@ -5400,24 +6783,24 @@
 	__u8   macaddr3[6];
 };
 
-#define S_FW_ACL_MAC_CMD_PFN	8
-#define M_FW_ACL_MAC_CMD_PFN	0x7
-#define V_FW_ACL_MAC_CMD_PFN(x)	((x) << S_FW_ACL_MAC_CMD_PFN)
-#define G_FW_ACL_MAC_CMD_PFN(x)	\
+#define S_FW_ACL_MAC_CMD_PFN		8
+#define M_FW_ACL_MAC_CMD_PFN		0x7
+#define V_FW_ACL_MAC_CMD_PFN(x)		((x) << S_FW_ACL_MAC_CMD_PFN)
+#define G_FW_ACL_MAC_CMD_PFN(x)		\
     (((x) >> S_FW_ACL_MAC_CMD_PFN) & M_FW_ACL_MAC_CMD_PFN)
 
-#define S_FW_ACL_MAC_CMD_VFN	0
-#define M_FW_ACL_MAC_CMD_VFN	0xff
-#define V_FW_ACL_MAC_CMD_VFN(x)	((x) << S_FW_ACL_MAC_CMD_VFN)
-#define G_FW_ACL_MAC_CMD_VFN(x)	\
+#define S_FW_ACL_MAC_CMD_VFN		0
+#define M_FW_ACL_MAC_CMD_VFN		0xff
+#define V_FW_ACL_MAC_CMD_VFN(x)		((x) << S_FW_ACL_MAC_CMD_VFN)
+#define G_FW_ACL_MAC_CMD_VFN(x)		\
     (((x) >> S_FW_ACL_MAC_CMD_VFN) & M_FW_ACL_MAC_CMD_VFN)
 
-#define S_FW_ACL_MAC_CMD_EN	31
-#define M_FW_ACL_MAC_CMD_EN	0x1
-#define V_FW_ACL_MAC_CMD_EN(x)	((x) << S_FW_ACL_MAC_CMD_EN)
-#define G_FW_ACL_MAC_CMD_EN(x)	\
+#define S_FW_ACL_MAC_CMD_EN		31
+#define M_FW_ACL_MAC_CMD_EN		0x1
+#define V_FW_ACL_MAC_CMD_EN(x)		((x) << S_FW_ACL_MAC_CMD_EN)
+#define G_FW_ACL_MAC_CMD_EN(x)		\
     (((x) >> S_FW_ACL_MAC_CMD_EN) & M_FW_ACL_MAC_CMD_EN)
-#define F_FW_ACL_MAC_CMD_EN	V_FW_ACL_MAC_CMD_EN(1U)
+#define F_FW_ACL_MAC_CMD_EN		V_FW_ACL_MAC_CMD_EN(1U)
 
 struct fw_acl_vlan_cmd {
 	__be32 op_to_vfn;
@@ -5440,12 +6823,12 @@
 #define G_FW_ACL_VLAN_CMD_VFN(x)	\
     (((x) >> S_FW_ACL_VLAN_CMD_VFN) & M_FW_ACL_VLAN_CMD_VFN)
 
-#define S_FW_ACL_VLAN_CMD_EN	31
-#define M_FW_ACL_VLAN_CMD_EN	0x1
-#define V_FW_ACL_VLAN_CMD_EN(x)	((x) << S_FW_ACL_VLAN_CMD_EN)
-#define G_FW_ACL_VLAN_CMD_EN(x)	\
+#define S_FW_ACL_VLAN_CMD_EN		31
+#define M_FW_ACL_VLAN_CMD_EN		0x1
+#define V_FW_ACL_VLAN_CMD_EN(x)		((x) << S_FW_ACL_VLAN_CMD_EN)
+#define G_FW_ACL_VLAN_CMD_EN(x)		\
     (((x) >> S_FW_ACL_VLAN_CMD_EN) & M_FW_ACL_VLAN_CMD_EN)
-#define F_FW_ACL_VLAN_CMD_EN	V_FW_ACL_VLAN_CMD_EN(1U)
+#define F_FW_ACL_VLAN_CMD_EN		V_FW_ACL_VLAN_CMD_EN(1U)
 
 #define S_FW_ACL_VLAN_CMD_DROPNOVLAN	7
 #define M_FW_ACL_VLAN_CMD_DROPNOVLAN	0x1
@@ -5454,18 +6837,18 @@
     (((x) >> S_FW_ACL_VLAN_CMD_DROPNOVLAN) & M_FW_ACL_VLAN_CMD_DROPNOVLAN)
 #define F_FW_ACL_VLAN_CMD_DROPNOVLAN	V_FW_ACL_VLAN_CMD_DROPNOVLAN(1U)
 
-#define S_FW_ACL_VLAN_CMD_FM	6
-#define M_FW_ACL_VLAN_CMD_FM	0x1
-#define V_FW_ACL_VLAN_CMD_FM(x)	((x) << S_FW_ACL_VLAN_CMD_FM)
-#define G_FW_ACL_VLAN_CMD_FM(x)	\
+#define S_FW_ACL_VLAN_CMD_FM		6
+#define M_FW_ACL_VLAN_CMD_FM		0x1
+#define V_FW_ACL_VLAN_CMD_FM(x)		((x) << S_FW_ACL_VLAN_CMD_FM)
+#define G_FW_ACL_VLAN_CMD_FM(x)		\
     (((x) >> S_FW_ACL_VLAN_CMD_FM) & M_FW_ACL_VLAN_CMD_FM)
-#define F_FW_ACL_VLAN_CMD_FM	V_FW_ACL_VLAN_CMD_FM(1U)
+#define F_FW_ACL_VLAN_CMD_FM		V_FW_ACL_VLAN_CMD_FM(1U)
 
-/* port capabilities bitmap */
+/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
 enum fw_port_cap {
 	FW_PORT_CAP_SPEED_100M		= 0x0001,
 	FW_PORT_CAP_SPEED_1G		= 0x0002,
-	FW_PORT_CAP_SPEED_2_5G		= 0x0004,
+	FW_PORT_CAP_SPEED_25G		= 0x0004,
 	FW_PORT_CAP_SPEED_10G		= 0x0008,
 	FW_PORT_CAP_SPEED_40G		= 0x0010,
 	FW_PORT_CAP_SPEED_100G		= 0x0020,
@@ -5474,44 +6857,13 @@
 	FW_PORT_CAP_ANEG		= 0x0100,
 	FW_PORT_CAP_MDIX		= 0x0200,
 	FW_PORT_CAP_MDIAUTO		= 0x0400,
-	FW_PORT_CAP_FEC			= 0x0800,
-	FW_PORT_CAP_TECHKR		= 0x1000,
-	FW_PORT_CAP_TECHKX4		= 0x2000,
+	FW_PORT_CAP_FEC_RS		= 0x0800,
+	FW_PORT_CAP_FEC_BASER_RS	= 0x1000,
+	FW_PORT_CAP_FEC_RESERVED	= 0x2000,
+	FW_PORT_CAP_802_3_PAUSE		= 0x4000,
+	FW_PORT_CAP_802_3_ASM_DIR	= 0x8000,
 };
 
-#define S_FW_PORT_AUXLINFO_MDI		3
-#define M_FW_PORT_AUXLINFO_MDI		0x3
-#define V_FW_PORT_AUXLINFO_MDI(x)	((x) << S_FW_PORT_AUXLINFO_MDI)
-#define G_FW_PORT_AUXLINFO_MDI(x) \
-    (((x) >> S_FW_PORT_AUXLINFO_MDI) & M_FW_PORT_AUXLINFO_MDI)
-
-#define S_FW_PORT_AUXLINFO_KX4		2
-#define M_FW_PORT_AUXLINFO_KX4		0x1
-#define V_FW_PORT_AUXLINFO_KX4(x)	((x) << S_FW_PORT_AUXLINFO_KX4)
-#define G_FW_PORT_AUXLINFO_KX4(x) \
-    (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4)
-#define F_FW_PORT_AUXLINFO_KX4		V_FW_PORT_AUXLINFO_KX4(1U)
-
-#define S_FW_PORT_AUXLINFO_KR		1
-#define M_FW_PORT_AUXLINFO_KR		0x1
-#define V_FW_PORT_AUXLINFO_KR(x)	((x) << S_FW_PORT_AUXLINFO_KR)
-#define G_FW_PORT_AUXLINFO_KR(x) \
-    (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR)
-#define F_FW_PORT_AUXLINFO_KR		V_FW_PORT_AUXLINFO_KR(1U)
-
-#define S_FW_PORT_AUXLINFO_FEC		0
-#define M_FW_PORT_AUXLINFO_FEC		0x1
-#define V_FW_PORT_AUXLINFO_FEC(x)	((x) << S_FW_PORT_AUXLINFO_FEC)
-#define G_FW_PORT_AUXLINFO_FEC(x) \
-    (((x) >> S_FW_PORT_AUXLINFO_FEC) & M_FW_PORT_AUXLINFO_FEC) 
-#define F_FW_PORT_AUXLINFO_FEC		V_FW_PORT_AUXLINFO_FEC(1U)
-
-#define S_FW_PORT_RCAP_AUX	11
-#define M_FW_PORT_RCAP_AUX	0x7
-#define V_FW_PORT_RCAP_AUX(x)	((x) << S_FW_PORT_RCAP_AUX)
-#define G_FW_PORT_RCAP_AUX(x) \
-    (((x) >> S_FW_PORT_RCAP_AUX) & M_FW_PORT_RCAP_AUX)
-
 #define S_FW_PORT_CAP_SPEED	0
 #define M_FW_PORT_CAP_SPEED	0x3f
 #define V_FW_PORT_CAP_SPEED(x)	((x) << S_FW_PORT_CAP_SPEED)
@@ -5530,6 +6882,18 @@
 #define G_FW_PORT_CAP_ANEG(x) \
     (((x) >> S_FW_PORT_CAP_ANEG) & M_FW_PORT_CAP_ANEG)
 
+#define S_FW_PORT_CAP_FEC	11
+#define M_FW_PORT_CAP_FEC	0x7
+#define V_FW_PORT_CAP_FEC(x)	((x) << S_FW_PORT_CAP_FEC)
+#define G_FW_PORT_CAP_FEC(x) \
+    (((x) >> S_FW_PORT_CAP_FEC) & M_FW_PORT_CAP_FEC)
+
+#define S_FW_PORT_CAP_802_3	14
+#define M_FW_PORT_CAP_802_3	0x3
+#define V_FW_PORT_CAP_802_3(x)	((x) << S_FW_PORT_CAP_802_3)
+#define G_FW_PORT_CAP_802_3(x) \
+    (((x) >> S_FW_PORT_CAP_802_3) & M_FW_PORT_CAP_802_3)
+
 enum fw_port_mdi {
 	FW_PORT_CAP_MDI_UNCHANGED,
 	FW_PORT_CAP_MDI_AUTO,
@@ -5542,6 +6906,84 @@
 #define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
 #define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
 
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define	FW_PORT_CAP32_SPEED_100M	0x00000001UL
+#define	FW_PORT_CAP32_SPEED_1G		0x00000002UL
+#define	FW_PORT_CAP32_SPEED_10G		0x00000004UL
+#define	FW_PORT_CAP32_SPEED_25G		0x00000008UL
+#define	FW_PORT_CAP32_SPEED_40G		0x00000010UL
+#define	FW_PORT_CAP32_SPEED_50G		0x00000020UL
+#define	FW_PORT_CAP32_SPEED_100G	0x00000040UL
+#define	FW_PORT_CAP32_SPEED_200G	0x00000080UL
+#define	FW_PORT_CAP32_SPEED_400G	0x00000100UL
+#define	FW_PORT_CAP32_SPEED_RESERVED1	0x00000200UL
+#define	FW_PORT_CAP32_SPEED_RESERVED2	0x00000400UL
+#define	FW_PORT_CAP32_SPEED_RESERVED3	0x00000800UL
+#define	FW_PORT_CAP32_RESERVED1		0x0000f000UL
+#define	FW_PORT_CAP32_FC_RX		0x00010000UL
+#define	FW_PORT_CAP32_FC_TX		0x00020000UL
+#define	FW_PORT_CAP32_802_3_PAUSE	0x00040000UL
+#define	FW_PORT_CAP32_802_3_ASM_DIR	0x00080000UL
+#define	FW_PORT_CAP32_ANEG		0x00100000UL
+#define	FW_PORT_CAP32_MDIX		0x00200000UL
+#define	FW_PORT_CAP32_MDIAUTO		0x00400000UL
+#define	FW_PORT_CAP32_FEC_RS		0x00800000UL
+#define	FW_PORT_CAP32_FEC_BASER_RS	0x01000000UL
+#define	FW_PORT_CAP32_FEC_RESERVED1	0x02000000UL
+#define	FW_PORT_CAP32_FEC_RESERVED2	0x04000000UL
+#define	FW_PORT_CAP32_FEC_RESERVED3	0x08000000UL
+#define	FW_PORT_CAP32_RESERVED2		0xf0000000UL
+
+#define S_FW_PORT_CAP32_SPEED	0
+#define M_FW_PORT_CAP32_SPEED	0xfff
+#define V_FW_PORT_CAP32_SPEED(x)	((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+    (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+#define S_FW_PORT_CAP32_FC	16
+#define M_FW_PORT_CAP32_FC	0x3
+#define V_FW_PORT_CAP32_FC(x)	((x) << S_FW_PORT_CAP32_FC)
+#define G_FW_PORT_CAP32_FC(x) \
+    (((x) >> S_FW_PORT_CAP32_FC) & M_FW_PORT_CAP32_FC)
+
+#define S_FW_PORT_CAP32_802_3	18
+#define M_FW_PORT_CAP32_802_3	0x3
+#define V_FW_PORT_CAP32_802_3(x)	((x) << S_FW_PORT_CAP32_802_3)
+#define G_FW_PORT_CAP32_802_3(x) \
+    (((x) >> S_FW_PORT_CAP32_802_3) & M_FW_PORT_CAP32_802_3)
+
+#define S_FW_PORT_CAP32_ANEG	20
+#define M_FW_PORT_CAP32_ANEG	0x1
+#define V_FW_PORT_CAP32_ANEG(x)	((x) << S_FW_PORT_CAP32_ANEG)
+#define G_FW_PORT_CAP32_ANEG(x) \
+    (((x) >> S_FW_PORT_CAP32_ANEG) & M_FW_PORT_CAP32_ANEG)
+
+enum fw_port_mdi32 {
+	FW_PORT_CAP32_MDI_UNCHANGED,
+	FW_PORT_CAP32_MDI_AUTO,
+	FW_PORT_CAP32_MDI_F_STRAIGHT,
+	FW_PORT_CAP32_MDI_F_CROSSOVER
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+    (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
+#define S_FW_PORT_CAP32_FEC	23
+#define M_FW_PORT_CAP32_FEC	0x1f
+#define V_FW_PORT_CAP32_FEC(x)	((x) << S_FW_PORT_CAP32_FEC)
+#define G_FW_PORT_CAP32_FEC(x) \
+    (((x) >> S_FW_PORT_CAP32_FEC) & M_FW_PORT_CAP32_FEC)
+
+/* macros to isolate various 32-bit Port Capabilities sub-fields */
+#define CAP32_SPEED(__cap32) \
+	(V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) & __cap32)
+
+#define CAP32_FEC(__cap32) \
+	(V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC) & __cap32)
+
 enum fw_port_action {
 	FW_PORT_ACTION_L1_CFG		= 0x0001,
 	FW_PORT_ACTION_L2_CFG		= 0x0002,
@@ -5551,16 +6993,18 @@
 	FW_PORT_ACTION_DCB_READ_TRANS	= 0x0006,
 	FW_PORT_ACTION_DCB_READ_RECV	= 0x0007,
 	FW_PORT_ACTION_DCB_READ_DET	= 0x0008,
+	FW_PORT_ACTION_L1_CFG32		= 0x0009,
+	FW_PORT_ACTION_GET_PORT_INFO32	= 0x000a,
 	FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
 	FW_PORT_ACTION_L1_LOW_PWR_EN	= 0x0011,
 	FW_PORT_ACTION_L2_WOL_MODE_EN	= 0x0012,
 	FW_PORT_ACTION_LPBK_TO_NORMAL	= 0x0020,
-	FW_PORT_ACTION_L1_SS_LPBK_ASIC	= 0x0021,
-	FW_PORT_ACTION_MAC_LPBK		= 0x0022,
-	FW_PORT_ACTION_L1_WS_LPBK_ASIC	= 0x0023,
-	FW_PORT_ACTION_L1_EXT_LPBK      = 0x0026,
+	FW_PORT_ACTION_LPBK_SS_ASIC	= 0x0022,
+	FW_PORT_ACTION_LPBK_WS_ASIC	= 0x0023,
+	FW_PORT_ACTION_LPBK_WS_EXT_PHY	= 0x0025,
+	FW_PORT_ACTION_LPBK_SS_EXT	= 0x0026,
 	FW_PORT_ACTION_DIAGNOSTICS	= 0x0027,
-	FW_PORT_ACTION_PCS_LPBK		= 0x0028,
+	FW_PORT_ACTION_LPBK_SS_EXT_PHY	= 0x0028,
 	FW_PORT_ACTION_PHY_RESET	= 0x0040,
 	FW_PORT_ACTION_PMA_RESET	= 0x0041,
 	FW_PORT_ACTION_PCS_RESET	= 0x0042,
@@ -5579,6 +7023,21 @@
 	FW_PORT_L2_CTLBF_MTU	= 0x40
 };
 
+enum fw_dcb_app_tlv_sf {
+	FW_DCB_APP_SF_ETHERTYPE,
+	FW_DCB_APP_SF_SOCKET_TCP,
+	FW_DCB_APP_SF_SOCKET_UDP,
+	FW_DCB_APP_SF_SOCKET_ALL,
+};
+
+enum fw_port_dcb_versions {
+	FW_PORT_DCB_VER_UNKNOWN,
+	FW_PORT_DCB_VER_CEE1D0,
+	FW_PORT_DCB_VER_CEE1D01,
+	FW_PORT_DCB_VER_IEEE,
+	FW_PORT_DCB_VER_AUTO=7
+};
+
 enum fw_port_dcb_cfg {
 	FW_PORT_DCB_CFG_PG	= 0x01,
 	FW_PORT_DCB_CFG_PFC	= 0x02,
@@ -5599,10 +7058,18 @@
 	FW_PORT_DCB_TYPE_CONTROL	= 0x05,
 };
 
+enum fw_port_dcb_feature_state {
+	FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0,
+	FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1,
+	FW_PORT_DCB_FEATURE_STATE_ERROR	= 0x2,
+	FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3,
+};
+
 enum fw_port_diag_ops {
 	FW_PORT_DIAGS_TEMP		= 0x00,
 	FW_PORT_DIAGS_TX_POWER		= 0x01,
 	FW_PORT_DIAGS_RX_POWER		= 0x02,
+	FW_PORT_DIAGS_TX_DIS		= 0x03,
 };
 
 struct fw_port_cmd {
@@ -5635,7 +7102,9 @@
 			__be16 mtu;
 			__u8   cbllen;
 			__u8   auxlinfo;
-			__be32 r8;
+			__u8   dcbxdis_pkd;
+			__u8   r8_lo;
+			__be16 lpacap;
 			__be64 r9;
 		} info;
 		struct fw_port_diags {
@@ -5657,6 +7126,7 @@
 				__u8   r10_lo[5];
 				__u8   num_tcs_supported;
 				__u8   pgrate[8];
+				__u8   tsa[8];
 			} pgrate;
 			struct fw_port_dcb_priorate {
 				__u8   type;
@@ -5667,7 +7137,8 @@
 			struct fw_port_dcb_pfc {
 				__u8   type;
 				__u8   pfcen;
-				__be16 r10[3];
+				__u8   r10[5];
+				__u8   max_pfc_tcs;
 				__be64 r11;
 			} pfc;
 			struct fw_port_app_priority {
@@ -5682,71 +7153,84 @@
 			struct fw_port_dcb_control {
 				__u8   type;
 				__u8   all_syncd_pkd;
-				__be16 r10_lo[3];
-				__be64 r11;
+				__be16 dcb_version_to_app_state;
+				__be32 r11;
+				__be64 r12;
 			} control;
 		} dcb;
+		struct fw_port_l1cfg32 {
+			__be32 rcap32;
+			__be32 r;
+		} l1cfg32;
+		struct fw_port_info32 {
+			__be32 lstatus32_to_cbllen32;
+			__be32 auxlinfo32_mtu32;
+			__be32 linkattr32;
+			__be32 pcaps32;
+			__be32 acaps32;
+			__be32 lpacaps32;
+		} info32;
 	} u;
 };
 
-#define S_FW_PORT_CMD_READ	22
-#define M_FW_PORT_CMD_READ	0x1
-#define V_FW_PORT_CMD_READ(x)	((x) << S_FW_PORT_CMD_READ)
-#define G_FW_PORT_CMD_READ(x)	\
+#define S_FW_PORT_CMD_READ		22
+#define M_FW_PORT_CMD_READ		0x1
+#define V_FW_PORT_CMD_READ(x)		((x) << S_FW_PORT_CMD_READ)
+#define G_FW_PORT_CMD_READ(x)		\
     (((x) >> S_FW_PORT_CMD_READ) & M_FW_PORT_CMD_READ)
-#define F_FW_PORT_CMD_READ	V_FW_PORT_CMD_READ(1U)
+#define F_FW_PORT_CMD_READ		V_FW_PORT_CMD_READ(1U)
 
-#define S_FW_PORT_CMD_PORTID	0
-#define M_FW_PORT_CMD_PORTID	0xf
-#define V_FW_PORT_CMD_PORTID(x)	((x) << S_FW_PORT_CMD_PORTID)
-#define G_FW_PORT_CMD_PORTID(x)	\
+#define S_FW_PORT_CMD_PORTID		0
+#define M_FW_PORT_CMD_PORTID		0xf
+#define V_FW_PORT_CMD_PORTID(x)		((x) << S_FW_PORT_CMD_PORTID)
+#define G_FW_PORT_CMD_PORTID(x)		\
     (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID)
 
-#define S_FW_PORT_CMD_ACTION	16
-#define M_FW_PORT_CMD_ACTION	0xffff
-#define V_FW_PORT_CMD_ACTION(x)	((x) << S_FW_PORT_CMD_ACTION)
-#define G_FW_PORT_CMD_ACTION(x)	\
+#define S_FW_PORT_CMD_ACTION		16
+#define M_FW_PORT_CMD_ACTION		0xffff
+#define V_FW_PORT_CMD_ACTION(x)		((x) << S_FW_PORT_CMD_ACTION)
+#define G_FW_PORT_CMD_ACTION(x)		\
     (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION)
 
-#define S_FW_PORT_CMD_OVLAN3	7
-#define M_FW_PORT_CMD_OVLAN3	0x1
-#define V_FW_PORT_CMD_OVLAN3(x)	((x) << S_FW_PORT_CMD_OVLAN3)
-#define G_FW_PORT_CMD_OVLAN3(x)	\
+#define S_FW_PORT_CMD_OVLAN3		7
+#define M_FW_PORT_CMD_OVLAN3		0x1
+#define V_FW_PORT_CMD_OVLAN3(x)		((x) << S_FW_PORT_CMD_OVLAN3)
+#define G_FW_PORT_CMD_OVLAN3(x)		\
     (((x) >> S_FW_PORT_CMD_OVLAN3) & M_FW_PORT_CMD_OVLAN3)
-#define F_FW_PORT_CMD_OVLAN3	V_FW_PORT_CMD_OVLAN3(1U)
+#define F_FW_PORT_CMD_OVLAN3		V_FW_PORT_CMD_OVLAN3(1U)
 
-#define S_FW_PORT_CMD_OVLAN2	6
-#define M_FW_PORT_CMD_OVLAN2	0x1
-#define V_FW_PORT_CMD_OVLAN2(x)	((x) << S_FW_PORT_CMD_OVLAN2)
-#define G_FW_PORT_CMD_OVLAN2(x)	\
+#define S_FW_PORT_CMD_OVLAN2		6
+#define M_FW_PORT_CMD_OVLAN2		0x1
+#define V_FW_PORT_CMD_OVLAN2(x)		((x) << S_FW_PORT_CMD_OVLAN2)
+#define G_FW_PORT_CMD_OVLAN2(x)		\
     (((x) >> S_FW_PORT_CMD_OVLAN2) & M_FW_PORT_CMD_OVLAN2)
-#define F_FW_PORT_CMD_OVLAN2	V_FW_PORT_CMD_OVLAN2(1U)
+#define F_FW_PORT_CMD_OVLAN2		V_FW_PORT_CMD_OVLAN2(1U)
 
-#define S_FW_PORT_CMD_OVLAN1	5
-#define M_FW_PORT_CMD_OVLAN1	0x1
-#define V_FW_PORT_CMD_OVLAN1(x)	((x) << S_FW_PORT_CMD_OVLAN1)
-#define G_FW_PORT_CMD_OVLAN1(x)	\
+#define S_FW_PORT_CMD_OVLAN1		5
+#define M_FW_PORT_CMD_OVLAN1		0x1
+#define V_FW_PORT_CMD_OVLAN1(x)		((x) << S_FW_PORT_CMD_OVLAN1)
+#define G_FW_PORT_CMD_OVLAN1(x)		\
     (((x) >> S_FW_PORT_CMD_OVLAN1) & M_FW_PORT_CMD_OVLAN1)
-#define F_FW_PORT_CMD_OVLAN1	V_FW_PORT_CMD_OVLAN1(1U)
+#define F_FW_PORT_CMD_OVLAN1		V_FW_PORT_CMD_OVLAN1(1U)
 
-#define S_FW_PORT_CMD_OVLAN0	4
-#define M_FW_PORT_CMD_OVLAN0	0x1
-#define V_FW_PORT_CMD_OVLAN0(x)	((x) << S_FW_PORT_CMD_OVLAN0)
-#define G_FW_PORT_CMD_OVLAN0(x)	\
+#define S_FW_PORT_CMD_OVLAN0		4
+#define M_FW_PORT_CMD_OVLAN0		0x1
+#define V_FW_PORT_CMD_OVLAN0(x)		((x) << S_FW_PORT_CMD_OVLAN0)
+#define G_FW_PORT_CMD_OVLAN0(x)		\
     (((x) >> S_FW_PORT_CMD_OVLAN0) & M_FW_PORT_CMD_OVLAN0)
-#define F_FW_PORT_CMD_OVLAN0	V_FW_PORT_CMD_OVLAN0(1U)
+#define F_FW_PORT_CMD_OVLAN0		V_FW_PORT_CMD_OVLAN0(1U)
 
-#define S_FW_PORT_CMD_IVLAN0	3
-#define M_FW_PORT_CMD_IVLAN0	0x1
-#define V_FW_PORT_CMD_IVLAN0(x)	((x) << S_FW_PORT_CMD_IVLAN0)
-#define G_FW_PORT_CMD_IVLAN0(x)	\
+#define S_FW_PORT_CMD_IVLAN0		3
+#define M_FW_PORT_CMD_IVLAN0		0x1
+#define V_FW_PORT_CMD_IVLAN0(x)		((x) << S_FW_PORT_CMD_IVLAN0)
+#define G_FW_PORT_CMD_IVLAN0(x)		\
     (((x) >> S_FW_PORT_CMD_IVLAN0) & M_FW_PORT_CMD_IVLAN0)
-#define F_FW_PORT_CMD_IVLAN0	V_FW_PORT_CMD_IVLAN0(1U)
+#define F_FW_PORT_CMD_IVLAN0		V_FW_PORT_CMD_IVLAN0(1U)
 
-#define S_FW_PORT_CMD_TXIPG	3
-#define M_FW_PORT_CMD_TXIPG	0x1fff
-#define V_FW_PORT_CMD_TXIPG(x)	((x) << S_FW_PORT_CMD_TXIPG)
-#define G_FW_PORT_CMD_TXIPG(x)	\
+#define S_FW_PORT_CMD_TXIPG		3
+#define M_FW_PORT_CMD_TXIPG		0x1fff
+#define V_FW_PORT_CMD_TXIPG(x)		((x) << S_FW_PORT_CMD_TXIPG)
+#define G_FW_PORT_CMD_TXIPG(x)		\
     (((x) >> S_FW_PORT_CMD_TXIPG) & M_FW_PORT_CMD_TXIPG)
 
 #define S_FW_PORT_CMD_FORCE_PINFO	0
@@ -5761,12 +7245,12 @@
 #define V_FW_PORT_CMD_LSTATUS(x)	((x) << S_FW_PORT_CMD_LSTATUS)
 #define G_FW_PORT_CMD_LSTATUS(x)	\
     (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS)
-#define F_FW_PORT_CMD_LSTATUS	V_FW_PORT_CMD_LSTATUS(1U)
+#define F_FW_PORT_CMD_LSTATUS		V_FW_PORT_CMD_LSTATUS(1U)
 
-#define S_FW_PORT_CMD_LSPEED	24
-#define M_FW_PORT_CMD_LSPEED	0x3f
-#define V_FW_PORT_CMD_LSPEED(x)	((x) << S_FW_PORT_CMD_LSPEED)
-#define G_FW_PORT_CMD_LSPEED(x)	\
+#define S_FW_PORT_CMD_LSPEED		24
+#define M_FW_PORT_CMD_LSPEED		0x3f
+#define V_FW_PORT_CMD_LSPEED(x)		((x) << S_FW_PORT_CMD_LSPEED)
+#define G_FW_PORT_CMD_LSPEED(x)		\
     (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED)
 
 #define S_FW_PORT_CMD_TXPAUSE		23
@@ -5774,7 +7258,7 @@
 #define V_FW_PORT_CMD_TXPAUSE(x)	((x) << S_FW_PORT_CMD_TXPAUSE)
 #define G_FW_PORT_CMD_TXPAUSE(x)	\
     (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE)
-#define F_FW_PORT_CMD_TXPAUSE	V_FW_PORT_CMD_TXPAUSE(1U)
+#define F_FW_PORT_CMD_TXPAUSE		V_FW_PORT_CMD_TXPAUSE(1U)
 
 #define S_FW_PORT_CMD_RXPAUSE		22
 #define M_FW_PORT_CMD_RXPAUSE		0x1
@@ -5781,7 +7265,7 @@
 #define V_FW_PORT_CMD_RXPAUSE(x)	((x) << S_FW_PORT_CMD_RXPAUSE)
 #define G_FW_PORT_CMD_RXPAUSE(x)	\
     (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE)
-#define F_FW_PORT_CMD_RXPAUSE	V_FW_PORT_CMD_RXPAUSE(1U)
+#define F_FW_PORT_CMD_RXPAUSE		V_FW_PORT_CMD_RXPAUSE(1U)
 
 #define S_FW_PORT_CMD_MDIOCAP		21
 #define M_FW_PORT_CMD_MDIOCAP		0x1
@@ -5788,7 +7272,7 @@
 #define V_FW_PORT_CMD_MDIOCAP(x)	((x) << S_FW_PORT_CMD_MDIOCAP)
 #define G_FW_PORT_CMD_MDIOCAP(x)	\
     (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP)
-#define F_FW_PORT_CMD_MDIOCAP	V_FW_PORT_CMD_MDIOCAP(1U)
+#define F_FW_PORT_CMD_MDIOCAP		V_FW_PORT_CMD_MDIOCAP(1U)
 
 #define S_FW_PORT_CMD_MDIOADDR		16
 #define M_FW_PORT_CMD_MDIOADDR		0x1f
@@ -5801,7 +7285,7 @@
 #define V_FW_PORT_CMD_LPTXPAUSE(x)	((x) << S_FW_PORT_CMD_LPTXPAUSE)
 #define G_FW_PORT_CMD_LPTXPAUSE(x)	\
     (((x) >> S_FW_PORT_CMD_LPTXPAUSE) & M_FW_PORT_CMD_LPTXPAUSE)
-#define F_FW_PORT_CMD_LPTXPAUSE	V_FW_PORT_CMD_LPTXPAUSE(1U)
+#define F_FW_PORT_CMD_LPTXPAUSE		V_FW_PORT_CMD_LPTXPAUSE(1U)
 
 #define S_FW_PORT_CMD_LPRXPAUSE		14
 #define M_FW_PORT_CMD_LPRXPAUSE		0x1
@@ -5808,12 +7292,12 @@
 #define V_FW_PORT_CMD_LPRXPAUSE(x)	((x) << S_FW_PORT_CMD_LPRXPAUSE)
 #define G_FW_PORT_CMD_LPRXPAUSE(x)	\
     (((x) >> S_FW_PORT_CMD_LPRXPAUSE) & M_FW_PORT_CMD_LPRXPAUSE)
-#define F_FW_PORT_CMD_LPRXPAUSE	V_FW_PORT_CMD_LPRXPAUSE(1U)
+#define F_FW_PORT_CMD_LPRXPAUSE		V_FW_PORT_CMD_LPRXPAUSE(1U)
 
-#define S_FW_PORT_CMD_PTYPE	8
-#define M_FW_PORT_CMD_PTYPE	0x1f
-#define V_FW_PORT_CMD_PTYPE(x)	((x) << S_FW_PORT_CMD_PTYPE)
-#define G_FW_PORT_CMD_PTYPE(x)	\
+#define S_FW_PORT_CMD_PTYPE		8
+#define M_FW_PORT_CMD_PTYPE		0x1f
+#define V_FW_PORT_CMD_PTYPE(x)		((x) << S_FW_PORT_CMD_PTYPE)
+#define G_FW_PORT_CMD_PTYPE(x)		\
     (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE)
 
 #define S_FW_PORT_CMD_LINKDNRC		5
@@ -5828,12 +7312,35 @@
 #define G_FW_PORT_CMD_MODTYPE(x)	\
     (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
 
-#define S_FW_PORT_CMD_APPLY	7
-#define M_FW_PORT_CMD_APPLY	0x1
-#define V_FW_PORT_CMD_APPLY(x)	((x) << S_FW_PORT_CMD_APPLY)
-#define G_FW_PORT_CMD_APPLY(x)	\
+#define S_FW_PORT_AUXLINFO_KX4	2
+#define M_FW_PORT_AUXLINFO_KX4	0x1
+#define V_FW_PORT_AUXLINFO_KX4(x) \
+    ((x) << S_FW_PORT_AUXLINFO_KX4)
+#define G_FW_PORT_AUXLINFO_KX4(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_KX4) & M_FW_PORT_AUXLINFO_KX4)
+#define F_FW_PORT_AUXLINFO_KX4	V_FW_PORT_AUXLINFO_KX4(1U)
+
+#define S_FW_PORT_AUXLINFO_KR	1
+#define M_FW_PORT_AUXLINFO_KR	0x1
+#define V_FW_PORT_AUXLINFO_KR(x) \
+    ((x) << S_FW_PORT_AUXLINFO_KR)
+#define G_FW_PORT_AUXLINFO_KR(x) \
+    (((x) >> S_FW_PORT_AUXLINFO_KR) & M_FW_PORT_AUXLINFO_KR)
+#define F_FW_PORT_AUXLINFO_KR	V_FW_PORT_AUXLINFO_KR(1U)
+
+#define S_FW_PORT_CMD_DCBXDIS		7
+#define M_FW_PORT_CMD_DCBXDIS		0x1
+#define V_FW_PORT_CMD_DCBXDIS(x)	((x) << S_FW_PORT_CMD_DCBXDIS)
+#define G_FW_PORT_CMD_DCBXDIS(x)	\
+    (((x) >> S_FW_PORT_CMD_DCBXDIS) & M_FW_PORT_CMD_DCBXDIS)
+#define F_FW_PORT_CMD_DCBXDIS		V_FW_PORT_CMD_DCBXDIS(1U)
+
+#define S_FW_PORT_CMD_APPLY		7
+#define M_FW_PORT_CMD_APPLY		0x1
+#define V_FW_PORT_CMD_APPLY(x)		((x) << S_FW_PORT_CMD_APPLY)
+#define G_FW_PORT_CMD_APPLY(x)		\
     (((x) >> S_FW_PORT_CMD_APPLY) & M_FW_PORT_CMD_APPLY)
-#define F_FW_PORT_CMD_APPLY	V_FW_PORT_CMD_APPLY(1U)
+#define F_FW_PORT_CMD_APPLY		V_FW_PORT_CMD_APPLY(1U)
 
 #define S_FW_PORT_CMD_ALL_SYNCD		7
 #define M_FW_PORT_CMD_ALL_SYNCD		0x1
@@ -5840,19 +7347,126 @@
 #define V_FW_PORT_CMD_ALL_SYNCD(x)	((x) << S_FW_PORT_CMD_ALL_SYNCD)
 #define G_FW_PORT_CMD_ALL_SYNCD(x)	\
     (((x) >> S_FW_PORT_CMD_ALL_SYNCD) & M_FW_PORT_CMD_ALL_SYNCD)
-#define F_FW_PORT_CMD_ALL_SYNCD	V_FW_PORT_CMD_ALL_SYNCD(1U)
+#define F_FW_PORT_CMD_ALL_SYNCD		V_FW_PORT_CMD_ALL_SYNCD(1U)
 
+#define S_FW_PORT_CMD_DCB_VERSION	12
+#define M_FW_PORT_CMD_DCB_VERSION	0x7
+#define V_FW_PORT_CMD_DCB_VERSION(x)	((x) << S_FW_PORT_CMD_DCB_VERSION)
+#define G_FW_PORT_CMD_DCB_VERSION(x)	\
+    (((x) >> S_FW_PORT_CMD_DCB_VERSION) & M_FW_PORT_CMD_DCB_VERSION)
+
+#define S_FW_PORT_CMD_PFC_STATE		8
+#define M_FW_PORT_CMD_PFC_STATE		0xf
+#define V_FW_PORT_CMD_PFC_STATE(x)	((x) << S_FW_PORT_CMD_PFC_STATE)
+#define G_FW_PORT_CMD_PFC_STATE(x)	\
+    (((x) >> S_FW_PORT_CMD_PFC_STATE) & M_FW_PORT_CMD_PFC_STATE)
+
+#define S_FW_PORT_CMD_ETS_STATE		4
+#define M_FW_PORT_CMD_ETS_STATE		0xf
+#define V_FW_PORT_CMD_ETS_STATE(x)	((x) << S_FW_PORT_CMD_ETS_STATE)
+#define G_FW_PORT_CMD_ETS_STATE(x)	\
+    (((x) >> S_FW_PORT_CMD_ETS_STATE) & M_FW_PORT_CMD_ETS_STATE)
+
+#define S_FW_PORT_CMD_APP_STATE		0
+#define M_FW_PORT_CMD_APP_STATE		0xf
+#define V_FW_PORT_CMD_APP_STATE(x)	((x) << S_FW_PORT_CMD_APP_STATE)
+#define G_FW_PORT_CMD_APP_STATE(x)	\
+    (((x) >> S_FW_PORT_CMD_APP_STATE) & M_FW_PORT_CMD_APP_STATE)
+
+#define S_FW_PORT_CMD_LSTATUS32		31
+#define M_FW_PORT_CMD_LSTATUS32		0x1
+#define V_FW_PORT_CMD_LSTATUS32(x)	((x) << S_FW_PORT_CMD_LSTATUS32)
+#define G_FW_PORT_CMD_LSTATUS32(x)	\
+    (((x) >> S_FW_PORT_CMD_LSTATUS32) & M_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32	V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32	28
+#define M_FW_PORT_CMD_LINKDNRC32	0x7
+#define V_FW_PORT_CMD_LINKDNRC32(x)	((x) << S_FW_PORT_CMD_LINKDNRC32)
+#define G_FW_PORT_CMD_LINKDNRC32(x)	\
+    (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_DCBXDIS32		27
+#define M_FW_PORT_CMD_DCBXDIS32		0x1
+#define V_FW_PORT_CMD_DCBXDIS32(x)	((x) << S_FW_PORT_CMD_DCBXDIS32)
+#define G_FW_PORT_CMD_DCBXDIS32(x)	\
+    (((x) >> S_FW_PORT_CMD_DCBXDIS32) & M_FW_PORT_CMD_DCBXDIS32)
+#define F_FW_PORT_CMD_DCBXDIS32	V_FW_PORT_CMD_DCBXDIS32(1U)
+
+#define S_FW_PORT_CMD_MDIOCAP32		26
+#define M_FW_PORT_CMD_MDIOCAP32		0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x)	((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define G_FW_PORT_CMD_MDIOCAP32(x)	\
+    (((x) >> S_FW_PORT_CMD_MDIOCAP32) & M_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32	V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32	21
+#define M_FW_PORT_CMD_MDIOADDR32	0x1f
+#define V_FW_PORT_CMD_MDIOADDR32(x)	((x) << S_FW_PORT_CMD_MDIOADDR32)
+#define G_FW_PORT_CMD_MDIOADDR32(x)	\
+    (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32	13
+#define M_FW_PORT_CMD_PORTTYPE32	0xff
+#define V_FW_PORT_CMD_PORTTYPE32(x)	((x) << S_FW_PORT_CMD_PORTTYPE32)
+#define G_FW_PORT_CMD_PORTTYPE32(x)	\
+    (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32		8
+#define M_FW_PORT_CMD_MODTYPE32		0x1f
+#define V_FW_PORT_CMD_MODTYPE32(x)	((x) << S_FW_PORT_CMD_MODTYPE32)
+#define G_FW_PORT_CMD_MODTYPE32(x)	\
+    (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
+#define S_FW_PORT_CMD_CBLLEN32		0
+#define M_FW_PORT_CMD_CBLLEN32		0xff
+#define V_FW_PORT_CMD_CBLLEN32(x)	((x) << S_FW_PORT_CMD_CBLLEN32)
+#define G_FW_PORT_CMD_CBLLEN32(x)	\
+    (((x) >> S_FW_PORT_CMD_CBLLEN32) & M_FW_PORT_CMD_CBLLEN32)
+
+#define S_FW_PORT_CMD_AUXLINFO32	24
+#define M_FW_PORT_CMD_AUXLINFO32	0xff
+#define V_FW_PORT_CMD_AUXLINFO32(x)	((x) << S_FW_PORT_CMD_AUXLINFO32)
+#define G_FW_PORT_CMD_AUXLINFO32(x)	\
+    (((x) >> S_FW_PORT_CMD_AUXLINFO32) & M_FW_PORT_CMD_AUXLINFO32)
+
+#define S_FW_PORT_AUXLINFO32_KX4	2
+#define M_FW_PORT_AUXLINFO32_KX4	0x1
+#define V_FW_PORT_AUXLINFO32_KX4(x) \
+    ((x) << S_FW_PORT_AUXLINFO32_KX4)
+#define G_FW_PORT_AUXLINFO32_KX4(x) \
+    (((x) >> S_FW_PORT_AUXLINFO32_KX4) & M_FW_PORT_AUXLINFO32_KX4)
+#define F_FW_PORT_AUXLINFO32_KX4	V_FW_PORT_AUXLINFO32_KX4(1U)
+
+#define S_FW_PORT_AUXLINFO32_KR	1
+#define M_FW_PORT_AUXLINFO32_KR	0x1
+#define V_FW_PORT_AUXLINFO32_KR(x) \
+    ((x) << S_FW_PORT_AUXLINFO32_KR)
+#define G_FW_PORT_AUXLINFO32_KR(x) \
+    (((x) >> S_FW_PORT_AUXLINFO32_KR) & M_FW_PORT_AUXLINFO32_KR)
+#define F_FW_PORT_AUXLINFO32_KR	V_FW_PORT_AUXLINFO32_KR(1U)
+
+#define S_FW_PORT_CMD_MTU32	0
+#define M_FW_PORT_CMD_MTU32	0xffff
+#define V_FW_PORT_CMD_MTU32(x)	((x) << S_FW_PORT_CMD_MTU32)
+#define G_FW_PORT_CMD_MTU32(x)	\
+    (((x) >> S_FW_PORT_CMD_MTU32) & M_FW_PORT_CMD_MTU32)
+
 /*
  *	These are configured into the VPD and hence tools that generate
  *	VPD may use this enumeration.
  *	extPHY	#lanes	T4_I2C	extI2C	BP_Eq	BP_ANEG	Speed
+ *
+ *	REMEMBER:
+ *	    Update the Common Code t4_hw.c:t4_get_port_type_description()
+ *	    with any new Firmware Port Technology Types!
  */
 enum fw_port_type {
 	FW_PORT_TYPE_FIBER_XFI	=  0,	/* Y, 1, N, Y, N, N, 10G */
 	FW_PORT_TYPE_FIBER_XAUI	=  1,	/* Y, 4, N, Y, N, N, 10G */
 	FW_PORT_TYPE_BT_SGMII	=  2,	/* Y, 1, No, No, No, No, 1G/100M */
-	FW_PORT_TYPE_BT_XFI	=  3,	/* Y, 1, No, No, No, No, 10G */
-	FW_PORT_TYPE_BT_XAUI	=  4,	/* Y, 4, No, No, No, No, 10G/1G/100M? */
+	FW_PORT_TYPE_BT_XFI	=  3,	/* Y, 1, No, No, No, No, 10G/1G/100M */
+	FW_PORT_TYPE_BT_XAUI	=  4,	/* Y, 4, No, No, No, No, 10G/1G/100M */
 	FW_PORT_TYPE_KX4	=  5,	/* No, 4, No, No, Yes, Yes, 10G */
 	FW_PORT_TYPE_CX4	=  6,	/* No, 4, No, No, No, No, 10G */
 	FW_PORT_TYPE_KX		=  7,	/* No, 1, No, No, Yes, No, 1G */
@@ -5860,7 +7474,16 @@
 	FW_PORT_TYPE_SFP	=  9,	/* No, 1, Yes, No, No, No, 10G */
 	FW_PORT_TYPE_BP_AP	= 10,	/* No, 1, No, No, Yes, Yes, 10G, BP ANGE */
 	FW_PORT_TYPE_BP4_AP	= 11,	/* No, 4, No, No, Yes, Yes, 10G, BP ANGE */
-
+	FW_PORT_TYPE_QSFP_10G	= 12,	/* No, 1, Yes, No, No, No, 10G */
+	FW_PORT_TYPE_QSA	= 13,	/* No, 1, Yes, No, No, No, 10G */
+	FW_PORT_TYPE_QSFP	= 14,	/* No, 4, Yes, No, No, No, 40G */
+	FW_PORT_TYPE_BP40_BA	= 15,	/* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
+	FW_PORT_TYPE_KR4_100G	= 16,	/* No, 4, 100G/40G/25G, Backplane */
+	FW_PORT_TYPE_CR4_QSFP	= 17,	/* No, 4, 100G/40G/25G */
+	FW_PORT_TYPE_CR_QSFP	= 18,	/* No, 1, 25G Spider cable */
+	FW_PORT_TYPE_CR2_QSFP	= 19,	/* No, 2, 50G */
+	FW_PORT_TYPE_SFP28	= 20,	/* No, 1, 25G/10G/1G */
+	FW_PORT_TYPE_KR_SFP28	= 21,	/* No, 1, 25G/10G/1G using Backplane */
 	FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
 };
 
@@ -5888,6 +7511,8 @@
 	FW_PORT_MOD_SUB_TYPE_AQ1202=0x3,
 	FW_PORT_MOD_SUB_TYPE_88x3120=0x4,
 	FW_PORT_MOD_SUB_TYPE_BCM84834=0x5,
+	FW_PORT_MOD_SUB_TYPE_BCM5482=0x6,
+	FW_PORT_MOD_SUB_TYPE_BCM84856=0x7,
 	FW_PORT_MOD_SUB_TYPE_BT_VSC8634=0x8,
 
 	/*
@@ -5912,14 +7537,8 @@
 	FW_PORT_LINK_DN_RX_LOS,		/* No RX signal detected */
 	FW_PORT_LINK_DN_RESERVED7
 };
-
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
 enum fw_port_stats_tx_index {
-	FW_STAT_TX_PORT_BYTES_IX,
+	FW_STAT_TX_PORT_BYTES_IX = 0,
 	FW_STAT_TX_PORT_FRAMES_IX,
 	FW_STAT_TX_PORT_BCAST_IX,
 	FW_STAT_TX_PORT_MCAST_IX,
@@ -5941,11 +7560,12 @@
 	FW_STAT_TX_PORT_PPP4_IX,
 	FW_STAT_TX_PORT_PPP5_IX,
 	FW_STAT_TX_PORT_PPP6_IX,
-	FW_STAT_TX_PORT_PPP7_IX
+	FW_STAT_TX_PORT_PPP7_IX,
+	FW_NUM_PORT_TX_STATS
 };
 
 enum fw_port_stat_rx_index {
-	FW_STAT_RX_PORT_BYTES_IX,
+	FW_STAT_RX_PORT_BYTES_IX = 0,
 	FW_STAT_RX_PORT_FRAMES_IX,
 	FW_STAT_RX_PORT_BCAST_IX,
 	FW_STAT_RX_PORT_MCAST_IX,
@@ -5971,9 +7591,15 @@
 	FW_STAT_RX_PORT_PPP5_IX,
 	FW_STAT_RX_PORT_PPP6_IX,
 	FW_STAT_RX_PORT_PPP7_IX,
-	FW_STAT_RX_PORT_LESS_64B_IX
+	FW_STAT_RX_PORT_LESS_64B_IX,
+        FW_STAT_RX_PORT_MAC_ERROR_IX,
+        FW_NUM_PORT_RX_STATS
 };
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + \
+                                 FW_NUM_PORT_RX_STATS)
 
+
 struct fw_port_stats_cmd {
 	__be32 op_to_portid;
 	__be32 retval_len16;
@@ -6064,7 +7690,7 @@
 #define V_FW_PORT_STATS_CMD_TX(x)	((x) << S_FW_PORT_STATS_CMD_TX)
 #define G_FW_PORT_STATS_CMD_TX(x)	\
     (((x) >> S_FW_PORT_STATS_CMD_TX) & M_FW_PORT_STATS_CMD_TX)
-#define F_FW_PORT_STATS_CMD_TX	V_FW_PORT_STATS_CMD_TX(1U)
+#define F_FW_PORT_STATS_CMD_TX		V_FW_PORT_STATS_CMD_TX(1U)
 
 #define S_FW_PORT_STATS_CMD_IX		0
 #define M_FW_PORT_STATS_CMD_IX		0x3f
@@ -6127,18 +7753,18 @@
 	} u;
 };
 
-#define S_FW_PORT_LB_STATS_CMD_LBPORT		0
-#define M_FW_PORT_LB_STATS_CMD_LBPORT		0xf
-#define V_FW_PORT_LB_STATS_CMD_LBPORT(x)	\
+#define S_FW_PORT_LB_STATS_CMD_LBPORT	0
+#define M_FW_PORT_LB_STATS_CMD_LBPORT	0xf
+#define V_FW_PORT_LB_STATS_CMD_LBPORT(x) \
     ((x) << S_FW_PORT_LB_STATS_CMD_LBPORT)
-#define G_FW_PORT_LB_STATS_CMD_LBPORT(x)	\
+#define G_FW_PORT_LB_STATS_CMD_LBPORT(x) \
     (((x) >> S_FW_PORT_LB_STATS_CMD_LBPORT) & M_FW_PORT_LB_STATS_CMD_LBPORT)
 
-#define S_FW_PORT_LB_STATS_CMD_NSTATS		4
-#define M_FW_PORT_LB_STATS_CMD_NSTATS		0x7
-#define V_FW_PORT_LB_STATS_CMD_NSTATS(x)	\
+#define S_FW_PORT_LB_STATS_CMD_NSTATS	4
+#define M_FW_PORT_LB_STATS_CMD_NSTATS	0x7
+#define V_FW_PORT_LB_STATS_CMD_NSTATS(x) \
     ((x) << S_FW_PORT_LB_STATS_CMD_NSTATS)
-#define G_FW_PORT_LB_STATS_CMD_NSTATS(x)	\
+#define G_FW_PORT_LB_STATS_CMD_NSTATS(x) \
     (((x) >> S_FW_PORT_LB_STATS_CMD_NSTATS) & M_FW_PORT_LB_STATS_CMD_NSTATS)
 
 #define S_FW_PORT_LB_STATS_CMD_BG_BM	0
@@ -6192,11 +7818,11 @@
     (((x) >> S_FW_PORT_TRACE_CMD_DUPLEN) & M_FW_PORT_TRACE_CMD_DUPLEN)
 #define F_FW_PORT_TRACE_CMD_DUPLEN	V_FW_PORT_TRACE_CMD_DUPLEN(1U)
 
-#define S_FW_PORT_TRACE_CMD_RUNTFLTSIZE		8
-#define M_FW_PORT_TRACE_CMD_RUNTFLTSIZE		0x1f
-#define V_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x)	\
+#define S_FW_PORT_TRACE_CMD_RUNTFLTSIZE	8
+#define M_FW_PORT_TRACE_CMD_RUNTFLTSIZE	0x1f
+#define V_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \
     ((x) << S_FW_PORT_TRACE_CMD_RUNTFLTSIZE)
-#define G_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x)	\
+#define G_FW_PORT_TRACE_CMD_RUNTFLTSIZE(x) \
     (((x) >> S_FW_PORT_TRACE_CMD_RUNTFLTSIZE) & \
      M_FW_PORT_TRACE_CMD_RUNTFLTSIZE)
 
@@ -6214,11 +7840,11 @@
 	__u8   map[224];
 };
 
-#define S_FW_PORT_TRACE_MMAP_CMD_PORTID		0
-#define M_FW_PORT_TRACE_MMAP_CMD_PORTID		0xf
-#define V_FW_PORT_TRACE_MMAP_CMD_PORTID(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_PORTID	0
+#define M_FW_PORT_TRACE_MMAP_CMD_PORTID	0xf
+#define V_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_PORTID)
-#define G_FW_PORT_TRACE_MMAP_CMD_PORTID(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_PORTID(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_PORTID) & \
      M_FW_PORT_TRACE_MMAP_CMD_PORTID)
 
@@ -6228,57 +7854,107 @@
 #define G_FW_PORT_TRACE_MMAP_CMD_FID(x)	\
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_FID) & M_FW_PORT_TRACE_MMAP_CMD_FID)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_MMAPEN		29
-#define M_FW_PORT_TRACE_MMAP_CMD_MMAPEN		0x1
-#define V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_MMAPEN	29
+#define M_FW_PORT_TRACE_MMAP_CMD_MMAPEN	0x1
+#define V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_MMAPEN)
-#define G_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_MMAPEN(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MMAPEN) & \
      M_FW_PORT_TRACE_MMAP_CMD_MMAPEN)
 #define F_FW_PORT_TRACE_MMAP_CMD_MMAPEN	V_FW_PORT_TRACE_MMAP_CMD_MMAPEN(1U)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN	28
-#define M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN	0x1
-#define V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 28
+#define M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN 0x1
+#define V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN)
-#define G_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_DCMAPEN) & \
      M_FW_PORT_TRACE_MMAP_CMD_DCMAPEN)
-#define F_FW_PORT_TRACE_MMAP_CMD_DCMAPEN	\
-    V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(1U)
+#define F_FW_PORT_TRACE_MMAP_CMD_DCMAPEN V_FW_PORT_TRACE_MMAP_CMD_DCMAPEN(1U)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH	8
-#define M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH	0x1f
-#define V_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 8
+#define M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH 0x1f
+#define V_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH)
-#define G_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH) & \
      M_FW_PORT_TRACE_MMAP_CMD_SKIPLENGTH)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET	0
-#define M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET	0x1f
-#define V_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0
+#define M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET 0x1f
+#define V_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET)
-#define G_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET) & \
      M_FW_PORT_TRACE_MMAP_CMD_SKIPOFFSET)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE	18
-#define M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE	0x3fff
-#define V_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 18
+#define M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE 0x3fff
+#define V_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE)
-#define G_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE) & \
      M_FW_PORT_TRACE_MMAP_CMD_MINPKTSIZE)
 
-#define S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX	0
-#define M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX	0x3fff
-#define V_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x)	\
+#define S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0
+#define M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX 0x3fff
+#define V_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \
     ((x) << S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX)
-#define G_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x)	\
+#define G_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX(x) \
     (((x) >> S_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX) & \
      M_FW_PORT_TRACE_MMAP_CMD_CAPTUREMAX)
 
+enum fw_ptp_subop {
+
+	/* none */
+	FW_PTP_SC_INIT_TIMER		= 0x00,
+	FW_PTP_SC_TX_TYPE		= 0x01,
+
+	/* init */
+	FW_PTP_SC_RXTIME_STAMP		= 0x08,
+	FW_PTP_SC_RDRX_TYPE		= 0x09,
+
+	/* ts */
+	FW_PTP_SC_ADJ_FREQ		= 0x10,
+	FW_PTP_SC_ADJ_TIME		= 0x11,
+	FW_PTP_SC_ADJ_FTIME		= 0x12,
+	FW_PTP_SC_WALL_CLOCK		= 0x13,
+	FW_PTP_SC_GET_TIME		= 0x14,
+	FW_PTP_SC_SET_TIME		= 0x15,
+};
+
+struct fw_ptp_cmd {
+	__be32 op_to_portid;
+	__be32 retval_len16;
+	union fw_ptp {
+		struct fw_ptp_sc {
+			__u8   sc;
+			__u8   r3[7];
+		} scmd;
+		struct fw_ptp_init {
+			__u8   sc;
+			__u8   txchan;
+			__be16 absid;
+			__be16 mode;
+			__be16 r3;
+		} init;
+		struct fw_ptp_ts {
+			__u8   sc;
+			__u8   sign;
+			__be16 r3;
+			__be32 ppb;
+			__be64 tm;
+		} ts;
+	} u;
+	__be64 r3;
+};
+
+#define S_FW_PTP_CMD_PORTID		0
+#define M_FW_PTP_CMD_PORTID		0xf
+#define V_FW_PTP_CMD_PORTID(x)		((x) << S_FW_PTP_CMD_PORTID)
+#define G_FW_PTP_CMD_PORTID(x)		\
+    (((x) >> S_FW_PTP_CMD_PORTID) & M_FW_PTP_CMD_PORTID)
+
 struct fw_rss_ind_tbl_cmd {
 	__be32 op_to_viid;
 	__be32 retval_len16;
@@ -6508,7 +8184,7 @@
 			__be64 r5;
 		} manual;
 		struct fw_rss_glb_config_basicvirtual {
-			__be32 mode_pkd;
+			__be32 mode_keymode;
 			__be32 synmapen_to_hashtoeplitz;
 			__be64 r8;
 			__be64 r9;
@@ -6526,94 +8202,104 @@
 #define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL	1
 #define FW_RSS_GLB_CONFIG_CMD_MODE_MAX		1
 
-#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN	8
-#define M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN	0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_KEYMODE	26
+#define M_FW_RSS_GLB_CONFIG_CMD_KEYMODE	0x3
+#define V_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \
+    ((x) << S_FW_RSS_GLB_CONFIG_CMD_KEYMODE)
+#define G_FW_RSS_GLB_CONFIG_CMD_KEYMODE(x) \
+    (((x) >> S_FW_RSS_GLB_CONFIG_CMD_KEYMODE) & \
+     M_FW_RSS_GLB_CONFIG_CMD_KEYMODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBKEY	0
+#define FW_RSS_GLB_CONFIG_CMD_KEYMODE_GLBVF_KEY	1
+#define FW_RSS_GLB_CONFIG_CMD_KEYMODE_PFVF_KEY	2
+#define FW_RSS_GLB_CONFIG_CMD_KEYMODE_IDXVF_KEY	3
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
-#define G_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) & \
      M_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
-#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN	\
-    V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6		7
-#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6		0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
-#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) & \
      M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
-#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6	\
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
     V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6		6
-#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6		0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
-#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) & \
      M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
-#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6	\
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
     V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4		5
-#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4		0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
-#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) & \
      M_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
-#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4	\
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
     V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4		4
-#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4		0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
-#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) & \
      M_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
-#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4	\
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
     V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN	3
-#define M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN	0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
-#define G_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) & \
      M_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
-#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN	\
-    V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN	2
-#define M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN	0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
-#define G_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) & \
      M_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
-#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN	\
-    V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP	1
-#define M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP	0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
-#define G_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) & \
      M_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
-#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP	\
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
     V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
 
-#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ	0
-#define M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ	0x1
-#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x)	\
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0x1
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
     ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
-#define G_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x)	\
+#define G_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
     (((x) >> S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) & \
      M_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
-#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ	\
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
     V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
 
 struct fw_rss_vi_config_cmd {
@@ -6628,7 +8314,8 @@
 		struct fw_rss_vi_config_basicvirtual {
 			__be32 r6;
 			__be32 defaultq_to_udpen;
-			__be64 r9;
+			__be32 secretkeyidx_pkd;
+			__be32 secretkeyxor;
 			__be64 r10;
 		} basicvirtual;
 	} u;
@@ -6640,52 +8327,52 @@
 #define G_FW_RSS_VI_CONFIG_CMD_VIID(x)	\
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID)
 
-#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ		16
-#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ		0x3ff
-#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x)	\
+#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ	16
+#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ	0x3ff
+#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
     ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
-#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x)	\
+#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \
      M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
 
-#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN	4
-#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN	0x1
-#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x)	\
+#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4
+#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
     ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
-#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x)	\
+#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \
      M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
-#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN	\
+#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \
     V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U)
 
-#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN	3
-#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN	0x1
-#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x)	\
+#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3
+#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
     ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
-#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x)	\
+#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \
      M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
-#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN	\
+#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \
     V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U)
 
-#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN	2
-#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN	0x1
-#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x)	\
+#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2
+#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
     ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
-#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x)	\
+#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \
      M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
-#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN	\
+#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \
     V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U)
 
-#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN	1
-#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN	0x1
-#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x)	\
+#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1
+#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
     ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
-#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x)	\
+#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \
      M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
-#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN	\
+#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \
     V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U)
 
 #define S_FW_RSS_VI_CONFIG_CMD_UDPEN	0
@@ -6695,6 +8382,14 @@
     (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
 #define F_FW_RSS_VI_CONFIG_CMD_UDPEN	V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
 
+#define S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0
+#define M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX 0xf
+#define V_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \
+    ((x) << S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX)
+#define G_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX(x) \
+    (((x) >> S_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX) & \
+     M_FW_RSS_VI_CONFIG_CMD_SECRETKEYIDX)
+
 enum fw_sched_sc {
 	FW_SCHED_SC_CONFIG		= 0,
 	FW_SCHED_SC_PARAMS		= 1,
@@ -6735,6 +8430,8 @@
 			__u8   type;
 			__u8   minmaxen;
 			__u8   r3[5];
+			__u8   nclasses[4];
+			__be32 r4;
 		} config;
 		struct fw_sched_params {
 			__u8   sc;
@@ -6783,6 +8480,7 @@
  */
 enum fw_devlog_facility {
 	FW_DEVLOG_FACILITY_CORE		= 0x00,
+	FW_DEVLOG_FACILITY_CF		= 0x01,
 	FW_DEVLOG_FACILITY_SCHED	= 0x02,
 	FW_DEVLOG_FACILITY_TIMER	= 0x04,
 	FW_DEVLOG_FACILITY_RES		= 0x06,
@@ -6805,7 +8503,9 @@
 	FW_DEVLOG_FACILITY_FCOE		= 0x2E,
 	FW_DEVLOG_FACILITY_FOISCSI	= 0x30,
 	FW_DEVLOG_FACILITY_FOFCOE	= 0x32,
-	FW_DEVLOG_FACILITY_MAX		= 0x32,
+	FW_DEVLOG_FACILITY_CHNET	= 0x34,
+	FW_DEVLOG_FACILITY_COISCSI	= 0x36,
+	FW_DEVLOG_FACILITY_MAX		= 0x38,
 };
 
 /*
@@ -6832,18 +8532,18 @@
 	__be32 r3[2];
 };
 
-#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG		28
-#define M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG		0xf
-#define V_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x)	\
+#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG	28
+#define M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG	0xf
+#define V_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \
     ((x) << S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG)
-#define G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x)	\
+#define G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(x) \
     (((x) >> S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG) & M_FW_DEVLOG_CMD_MEMTYPE_DEVLOG)
 
-#define S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG	0
-#define M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG	0xfffffff
-#define V_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x)	\
+#define S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0
+#define M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG 0xfffffff
+#define V_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \
     ((x) << S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
-#define G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x)	\
+#define G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(x) \
     (((x) >> S_FW_DEVLOG_CMD_MEMADDR16_DEVLOG) & \
      M_FW_DEVLOG_CMD_MEMADDR16_DEVLOG)
 
@@ -6852,8 +8552,9 @@
 	FW_WATCHDOG_ACTION_FLR = 1,
 	FW_WATCHDOG_ACTION_BYPASS = 2,
 	FW_WATCHDOG_ACTION_TMPCHK = 3,
+	FW_WATCHDOG_ACTION_PAUSEOFF = 4,
 
-	FW_WATCHDOG_ACTION_MAX = 4,
+	FW_WATCHDOG_ACTION_MAX = 5,
 };
 
 #define FW_WATCHDOG_MAX_TIMEOUT_SECS	60
@@ -6885,19 +8586,19 @@
 	__be32 r4[2];
 };
 
-#define S_FW_CLIP_CMD_ALLOC	31
-#define M_FW_CLIP_CMD_ALLOC	0x1
-#define V_FW_CLIP_CMD_ALLOC(x)	((x) << S_FW_CLIP_CMD_ALLOC)
-#define G_FW_CLIP_CMD_ALLOC(x)	\
+#define S_FW_CLIP_CMD_ALLOC		31
+#define M_FW_CLIP_CMD_ALLOC		0x1
+#define V_FW_CLIP_CMD_ALLOC(x)		((x) << S_FW_CLIP_CMD_ALLOC)
+#define G_FW_CLIP_CMD_ALLOC(x)		\
     (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
-#define F_FW_CLIP_CMD_ALLOC	V_FW_CLIP_CMD_ALLOC(1U)
+#define F_FW_CLIP_CMD_ALLOC		V_FW_CLIP_CMD_ALLOC(1U)
 
-#define S_FW_CLIP_CMD_FREE	30
-#define M_FW_CLIP_CMD_FREE	0x1
-#define V_FW_CLIP_CMD_FREE(x)	((x) << S_FW_CLIP_CMD_FREE)
-#define G_FW_CLIP_CMD_FREE(x)	\
+#define S_FW_CLIP_CMD_FREE		30
+#define M_FW_CLIP_CMD_FREE		0x1
+#define V_FW_CLIP_CMD_FREE(x)		((x) << S_FW_CLIP_CMD_FREE)
+#define G_FW_CLIP_CMD_FREE(x)		\
     (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
-#define F_FW_CLIP_CMD_FREE	V_FW_CLIP_CMD_FREE(1U)
+#define F_FW_CLIP_CMD_FREE		V_FW_CLIP_CMD_FREE(1U)
 
 /******************************************************************************
  *   F O i S C S I   C O M M A N D s
@@ -6907,10 +8608,10 @@
 
 enum fw_chnet_iface_cmd_subop {
 	FW_CHNET_IFACE_CMD_SUBOP_NOOP = 0,
-	
+
 	FW_CHNET_IFACE_CMD_SUBOP_LINK_UP,
 	FW_CHNET_IFACE_CMD_SUBOP_LINK_DOWN,
-	
+
 	FW_CHNET_IFACE_CMD_SUBOP_MTU_SET,
 	FW_CHNET_IFACE_CMD_SUBOP_MTU_GET,
 
@@ -6948,10 +8649,6 @@
 #define G_FW_CHNET_IFACE_CMD_IFSTATE(x)	\
     (((x) >> S_FW_CHNET_IFACE_CMD_IFSTATE) & M_FW_CHNET_IFACE_CMD_IFSTATE)
 
-/******************************************************************************
- *   F O F C O E   C O M M A N D s
- ************************************/
-
 struct fw_fcoe_res_info_cmd {
 	__be32 op_to_read;
 	__be32 retval_len16;
@@ -6992,11 +8689,11 @@
 #define G_FW_FCOE_LINK_CMD_PORTID(x)	\
     (((x) >> S_FW_FCOE_LINK_CMD_PORTID) & M_FW_FCOE_LINK_CMD_PORTID)
 
-#define S_FW_FCOE_LINK_CMD_SUB_OPCODE		24
-#define M_FW_FCOE_LINK_CMD_SUB_OPCODE		0xff
-#define V_FW_FCOE_LINK_CMD_SUB_OPCODE(x)	\
+#define S_FW_FCOE_LINK_CMD_SUB_OPCODE	24
+#define M_FW_FCOE_LINK_CMD_SUB_OPCODE	0xff
+#define V_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \
     ((x) << S_FW_FCOE_LINK_CMD_SUB_OPCODE)
-#define G_FW_FCOE_LINK_CMD_SUB_OPCODE(x)	\
+#define G_FW_FCOE_LINK_CMD_SUB_OPCODE(x) \
     (((x) >> S_FW_FCOE_LINK_CMD_SUB_OPCODE) & M_FW_FCOE_LINK_CMD_SUB_OPCODE)
 
 #define S_FW_FCOE_LINK_CMD_FCFI		0
@@ -7035,7 +8732,7 @@
 #define V_FW_FCOE_VNP_CMD_ALLOC(x)	((x) << S_FW_FCOE_VNP_CMD_ALLOC)
 #define G_FW_FCOE_VNP_CMD_ALLOC(x)	\
     (((x) >> S_FW_FCOE_VNP_CMD_ALLOC) & M_FW_FCOE_VNP_CMD_ALLOC)
-#define F_FW_FCOE_VNP_CMD_ALLOC	V_FW_FCOE_VNP_CMD_ALLOC(1U)
+#define F_FW_FCOE_VNP_CMD_ALLOC		V_FW_FCOE_VNP_CMD_ALLOC(1U)
 
 #define S_FW_FCOE_VNP_CMD_FREE		30
 #define M_FW_FCOE_VNP_CMD_FREE		0x1
@@ -7042,7 +8739,7 @@
 #define V_FW_FCOE_VNP_CMD_FREE(x)	((x) << S_FW_FCOE_VNP_CMD_FREE)
 #define G_FW_FCOE_VNP_CMD_FREE(x)	\
     (((x) >> S_FW_FCOE_VNP_CMD_FREE) & M_FW_FCOE_VNP_CMD_FREE)
-#define F_FW_FCOE_VNP_CMD_FREE	V_FW_FCOE_VNP_CMD_FREE(1U)
+#define F_FW_FCOE_VNP_CMD_FREE		V_FW_FCOE_VNP_CMD_FREE(1U)
 
 #define S_FW_FCOE_VNP_CMD_MODIFY	29
 #define M_FW_FCOE_VNP_CMD_MODIFY	0x1
@@ -7238,11 +8935,11 @@
 #define G_FW_FCOE_STATS_CMD_PORT(x)	\
     (((x) >> S_FW_FCOE_STATS_CMD_PORT) & M_FW_FCOE_STATS_CMD_PORT)
 
-#define S_FW_FCOE_STATS_CMD_PORT_VALID		7
-#define M_FW_FCOE_STATS_CMD_PORT_VALID		0x1
-#define V_FW_FCOE_STATS_CMD_PORT_VALID(x)	\
+#define S_FW_FCOE_STATS_CMD_PORT_VALID	7
+#define M_FW_FCOE_STATS_CMD_PORT_VALID	0x1
+#define V_FW_FCOE_STATS_CMD_PORT_VALID(x) \
     ((x) << S_FW_FCOE_STATS_CMD_PORT_VALID)
-#define G_FW_FCOE_STATS_CMD_PORT_VALID(x)	\
+#define G_FW_FCOE_STATS_CMD_PORT_VALID(x) \
     (((x) >> S_FW_FCOE_STATS_CMD_PORT_VALID) & M_FW_FCOE_STATS_CMD_PORT_VALID)
 #define F_FW_FCOE_STATS_CMD_PORT_VALID	V_FW_FCOE_STATS_CMD_PORT_VALID(1U)
 
@@ -7288,7 +8985,7 @@
 #define V_FW_FCOE_FCF_CMD_FPMA(x)	((x) << S_FW_FCOE_FCF_CMD_FPMA)
 #define G_FW_FCOE_FCF_CMD_FPMA(x)	\
     (((x) >> S_FW_FCOE_FCF_CMD_FPMA) & M_FW_FCOE_FCF_CMD_FPMA)
-#define F_FW_FCOE_FCF_CMD_FPMA	V_FW_FCOE_FCF_CMD_FPMA(1U)
+#define F_FW_FCOE_FCF_CMD_FPMA		V_FW_FCOE_FCF_CMD_FPMA(1U)
 
 #define S_FW_FCOE_FCF_CMD_SPMA		5
 #define M_FW_FCOE_FCF_CMD_SPMA		0x1
@@ -7295,7 +8992,7 @@
 #define V_FW_FCOE_FCF_CMD_SPMA(x)	((x) << S_FW_FCOE_FCF_CMD_SPMA)
 #define G_FW_FCOE_FCF_CMD_SPMA(x)	\
     (((x) >> S_FW_FCOE_FCF_CMD_SPMA) & M_FW_FCOE_FCF_CMD_SPMA)
-#define F_FW_FCOE_FCF_CMD_SPMA	V_FW_FCOE_FCF_CMD_SPMA(1U)
+#define F_FW_FCOE_FCF_CMD_SPMA		V_FW_FCOE_FCF_CMD_SPMA(1U)
 
 #define S_FW_FCOE_FCF_CMD_LOGIN		4
 #define M_FW_FCOE_FCF_CMD_LOGIN		0x1
@@ -7302,7 +8999,7 @@
 #define V_FW_FCOE_FCF_CMD_LOGIN(x)	((x) << S_FW_FCOE_FCF_CMD_LOGIN)
 #define G_FW_FCOE_FCF_CMD_LOGIN(x)	\
     (((x) >> S_FW_FCOE_FCF_CMD_LOGIN) & M_FW_FCOE_FCF_CMD_LOGIN)
-#define F_FW_FCOE_FCF_CMD_LOGIN	V_FW_FCOE_FCF_CMD_LOGIN(1U)
+#define F_FW_FCOE_FCF_CMD_LOGIN		V_FW_FCOE_FCF_CMD_LOGIN(1U)
 
 #define S_FW_FCOE_FCF_CMD_PORTID	0
 #define M_FW_FCOE_FCF_CMD_PORTID	0xf
@@ -7321,6 +9018,181 @@
 	FW_ERROR_TYPE_ACL		= 0x3,
 };
 
+enum fw_dcb_ieee_locations {
+	FW_IEEE_LOC_LOCAL,
+	FW_IEEE_LOC_PEER,
+	FW_IEEE_LOC_OPERATIONAL,
+};
+
+struct fw_dcb_ieee_cmd {
+	__be32 op_to_location;
+	__be32 changed_to_len16;
+	union fw_dcbx_stats {
+		struct fw_dcbx_pfc_stats_ieee {
+			__be32 pfc_mbc_pkd;
+			__be32 pfc_willing_to_pfc_en;
+		} dcbx_pfc_stats;
+		struct fw_dcbx_ets_stats_ieee {
+			__be32 cbs_to_ets_max_tc;
+			__be32 pg_table;
+			__u8   pg_percent[8];
+			__u8   tsa[8];
+		} dcbx_ets_stats;
+		struct fw_dcbx_app_stats_ieee {
+			__be32 num_apps_pkd;
+			__be32 r6;
+			__be32 app[4];
+		} dcbx_app_stats;
+		struct fw_dcbx_control {
+			__be32 multi_peer_invalidated;
+			__u8 version;
+			__u8 r6[3];
+		} dcbx_control;
+	} u;
+};
+
+#define S_FW_DCB_IEEE_CMD_PORT		8
+#define M_FW_DCB_IEEE_CMD_PORT		0x7
+#define V_FW_DCB_IEEE_CMD_PORT(x)	((x) << S_FW_DCB_IEEE_CMD_PORT)
+#define G_FW_DCB_IEEE_CMD_PORT(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_PORT) & M_FW_DCB_IEEE_CMD_PORT)
+
+#define S_FW_DCB_IEEE_CMD_FEATURE	2
+#define M_FW_DCB_IEEE_CMD_FEATURE	0x7
+#define V_FW_DCB_IEEE_CMD_FEATURE(x)	((x) << S_FW_DCB_IEEE_CMD_FEATURE)
+#define G_FW_DCB_IEEE_CMD_FEATURE(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_FEATURE) & M_FW_DCB_IEEE_CMD_FEATURE)
+
+#define S_FW_DCB_IEEE_CMD_LOCATION	0
+#define M_FW_DCB_IEEE_CMD_LOCATION	0x3
+#define V_FW_DCB_IEEE_CMD_LOCATION(x)	((x) << S_FW_DCB_IEEE_CMD_LOCATION)
+#define G_FW_DCB_IEEE_CMD_LOCATION(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_LOCATION) & M_FW_DCB_IEEE_CMD_LOCATION)
+
+#define S_FW_DCB_IEEE_CMD_CHANGED	20
+#define M_FW_DCB_IEEE_CMD_CHANGED	0x1
+#define V_FW_DCB_IEEE_CMD_CHANGED(x)	((x) << S_FW_DCB_IEEE_CMD_CHANGED)
+#define G_FW_DCB_IEEE_CMD_CHANGED(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_CHANGED) & M_FW_DCB_IEEE_CMD_CHANGED)
+#define F_FW_DCB_IEEE_CMD_CHANGED	V_FW_DCB_IEEE_CMD_CHANGED(1U)
+
+#define S_FW_DCB_IEEE_CMD_RECEIVED	19
+#define M_FW_DCB_IEEE_CMD_RECEIVED	0x1
+#define V_FW_DCB_IEEE_CMD_RECEIVED(x)	((x) << S_FW_DCB_IEEE_CMD_RECEIVED)
+#define G_FW_DCB_IEEE_CMD_RECEIVED(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_RECEIVED) & M_FW_DCB_IEEE_CMD_RECEIVED)
+#define F_FW_DCB_IEEE_CMD_RECEIVED	V_FW_DCB_IEEE_CMD_RECEIVED(1U)
+
+#define S_FW_DCB_IEEE_CMD_APPLY		18
+#define M_FW_DCB_IEEE_CMD_APPLY		0x1
+#define V_FW_DCB_IEEE_CMD_APPLY(x)	((x) << S_FW_DCB_IEEE_CMD_APPLY)
+#define G_FW_DCB_IEEE_CMD_APPLY(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_APPLY) & M_FW_DCB_IEEE_CMD_APPLY)
+#define F_FW_DCB_IEEE_CMD_APPLY	V_FW_DCB_IEEE_CMD_APPLY(1U)
+
+#define S_FW_DCB_IEEE_CMD_DISABLED	17
+#define M_FW_DCB_IEEE_CMD_DISABLED	0x1
+#define V_FW_DCB_IEEE_CMD_DISABLED(x)	((x) << S_FW_DCB_IEEE_CMD_DISABLED)
+#define G_FW_DCB_IEEE_CMD_DISABLED(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_DISABLED) & M_FW_DCB_IEEE_CMD_DISABLED)
+#define F_FW_DCB_IEEE_CMD_DISABLED	V_FW_DCB_IEEE_CMD_DISABLED(1U)
+
+#define S_FW_DCB_IEEE_CMD_MORE		16
+#define M_FW_DCB_IEEE_CMD_MORE		0x1
+#define V_FW_DCB_IEEE_CMD_MORE(x)	((x) << S_FW_DCB_IEEE_CMD_MORE)
+#define G_FW_DCB_IEEE_CMD_MORE(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_MORE) & M_FW_DCB_IEEE_CMD_MORE)
+#define F_FW_DCB_IEEE_CMD_MORE	V_FW_DCB_IEEE_CMD_MORE(1U)
+
+#define S_FW_DCB_IEEE_CMD_PFC_MBC	0
+#define M_FW_DCB_IEEE_CMD_PFC_MBC	0x1
+#define V_FW_DCB_IEEE_CMD_PFC_MBC(x)	((x) << S_FW_DCB_IEEE_CMD_PFC_MBC)
+#define G_FW_DCB_IEEE_CMD_PFC_MBC(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_PFC_MBC) & M_FW_DCB_IEEE_CMD_PFC_MBC)
+#define F_FW_DCB_IEEE_CMD_PFC_MBC	V_FW_DCB_IEEE_CMD_PFC_MBC(1U)
+
+#define S_FW_DCB_IEEE_CMD_PFC_WILLING		16
+#define M_FW_DCB_IEEE_CMD_PFC_WILLING		0x1
+#define V_FW_DCB_IEEE_CMD_PFC_WILLING(x)	\
+    ((x) << S_FW_DCB_IEEE_CMD_PFC_WILLING)
+#define G_FW_DCB_IEEE_CMD_PFC_WILLING(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_PFC_WILLING) & M_FW_DCB_IEEE_CMD_PFC_WILLING)
+#define F_FW_DCB_IEEE_CMD_PFC_WILLING	V_FW_DCB_IEEE_CMD_PFC_WILLING(1U)
+
+#define S_FW_DCB_IEEE_CMD_PFC_MAX_TC	8
+#define M_FW_DCB_IEEE_CMD_PFC_MAX_TC	0xff
+#define V_FW_DCB_IEEE_CMD_PFC_MAX_TC(x)	((x) << S_FW_DCB_IEEE_CMD_PFC_MAX_TC)
+#define G_FW_DCB_IEEE_CMD_PFC_MAX_TC(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_PFC_MAX_TC) & M_FW_DCB_IEEE_CMD_PFC_MAX_TC)
+
+#define S_FW_DCB_IEEE_CMD_PFC_EN	0
+#define M_FW_DCB_IEEE_CMD_PFC_EN	0xff
+#define V_FW_DCB_IEEE_CMD_PFC_EN(x)	((x) << S_FW_DCB_IEEE_CMD_PFC_EN)
+#define G_FW_DCB_IEEE_CMD_PFC_EN(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_PFC_EN) & M_FW_DCB_IEEE_CMD_PFC_EN)
+
+#define S_FW_DCB_IEEE_CMD_CBS		16
+#define M_FW_DCB_IEEE_CMD_CBS		0x1
+#define V_FW_DCB_IEEE_CMD_CBS(x)	((x) << S_FW_DCB_IEEE_CMD_CBS)
+#define G_FW_DCB_IEEE_CMD_CBS(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_CBS) & M_FW_DCB_IEEE_CMD_CBS)
+#define F_FW_DCB_IEEE_CMD_CBS	V_FW_DCB_IEEE_CMD_CBS(1U)
+
+#define S_FW_DCB_IEEE_CMD_ETS_WILLING		8
+#define M_FW_DCB_IEEE_CMD_ETS_WILLING		0x1
+#define V_FW_DCB_IEEE_CMD_ETS_WILLING(x)	\
+    ((x) << S_FW_DCB_IEEE_CMD_ETS_WILLING)
+#define G_FW_DCB_IEEE_CMD_ETS_WILLING(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_ETS_WILLING) & M_FW_DCB_IEEE_CMD_ETS_WILLING)
+#define F_FW_DCB_IEEE_CMD_ETS_WILLING	V_FW_DCB_IEEE_CMD_ETS_WILLING(1U)
+
+#define S_FW_DCB_IEEE_CMD_ETS_MAX_TC	0
+#define M_FW_DCB_IEEE_CMD_ETS_MAX_TC	0xff
+#define V_FW_DCB_IEEE_CMD_ETS_MAX_TC(x)	((x) << S_FW_DCB_IEEE_CMD_ETS_MAX_TC)
+#define G_FW_DCB_IEEE_CMD_ETS_MAX_TC(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_ETS_MAX_TC) & M_FW_DCB_IEEE_CMD_ETS_MAX_TC)
+
+#define S_FW_DCB_IEEE_CMD_NUM_APPS	0
+#define M_FW_DCB_IEEE_CMD_NUM_APPS	0x7
+#define V_FW_DCB_IEEE_CMD_NUM_APPS(x)	((x) << S_FW_DCB_IEEE_CMD_NUM_APPS)
+#define G_FW_DCB_IEEE_CMD_NUM_APPS(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_NUM_APPS) & M_FW_DCB_IEEE_CMD_NUM_APPS)
+
+#define S_FW_DCB_IEEE_CMD_MULTI_PEER	31
+#define M_FW_DCB_IEEE_CMD_MULTI_PEER	0x1
+#define V_FW_DCB_IEEE_CMD_MULTI_PEER(x)	((x) << S_FW_DCB_IEEE_CMD_MULTI_PEER)
+#define G_FW_DCB_IEEE_CMD_MULTI_PEER(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_MULTI_PEER) & M_FW_DCB_IEEE_CMD_MULTI_PEER)
+#define F_FW_DCB_IEEE_CMD_MULTI_PEER	V_FW_DCB_IEEE_CMD_MULTI_PEER(1U)
+
+#define S_FW_DCB_IEEE_CMD_INVALIDATED		30
+#define M_FW_DCB_IEEE_CMD_INVALIDATED		0x1
+#define V_FW_DCB_IEEE_CMD_INVALIDATED(x)	\
+    ((x) << S_FW_DCB_IEEE_CMD_INVALIDATED)
+#define G_FW_DCB_IEEE_CMD_INVALIDATED(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_INVALIDATED) & M_FW_DCB_IEEE_CMD_INVALIDATED)
+#define F_FW_DCB_IEEE_CMD_INVALIDATED	V_FW_DCB_IEEE_CMD_INVALIDATED(1U)
+
+/* Hand-written */
+#define S_FW_DCB_IEEE_CMD_APP_PROTOCOL	16
+#define M_FW_DCB_IEEE_CMD_APP_PROTOCOL	0xffff
+#define V_FW_DCB_IEEE_CMD_APP_PROTOCOL(x)	((x) << S_FW_DCB_IEEE_CMD_APP_PROTOCOL)
+#define G_FW_DCB_IEEE_CMD_APP_PROTOCOL(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_APP_PROTOCOL) & M_FW_DCB_IEEE_CMD_APP_PROTOCOL)
+
+#define S_FW_DCB_IEEE_CMD_APP_SELECT	3
+#define M_FW_DCB_IEEE_CMD_APP_SELECT	0x7
+#define V_FW_DCB_IEEE_CMD_APP_SELECT(x)	((x) << S_FW_DCB_IEEE_CMD_APP_SELECT)
+#define G_FW_DCB_IEEE_CMD_APP_SELECT(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_APP_SELECT) & M_FW_DCB_IEEE_CMD_APP_SELECT)
+
+#define S_FW_DCB_IEEE_CMD_APP_PRIORITY	0
+#define M_FW_DCB_IEEE_CMD_APP_PRIORITY	0x7
+#define V_FW_DCB_IEEE_CMD_APP_PRIORITY(x)	((x) << S_FW_DCB_IEEE_CMD_APP_PRIORITY)
+#define G_FW_DCB_IEEE_CMD_APP_PRIORITY(x)	\
+    (((x) >> S_FW_DCB_IEEE_CMD_APP_PRIORITY) & M_FW_DCB_IEEE_CMD_APP_PRIORITY)
+
+
 struct fw_error_cmd {
 	__be32 op_to_type;
 	__be32 len16_pkd;
@@ -7349,49 +9221,49 @@
 	} u;
 };
 
-#define S_FW_ERROR_CMD_FATAL	4
-#define M_FW_ERROR_CMD_FATAL	0x1
-#define V_FW_ERROR_CMD_FATAL(x)	((x) << S_FW_ERROR_CMD_FATAL)
-#define G_FW_ERROR_CMD_FATAL(x)	\
+#define S_FW_ERROR_CMD_FATAL		4
+#define M_FW_ERROR_CMD_FATAL		0x1
+#define V_FW_ERROR_CMD_FATAL(x)		((x) << S_FW_ERROR_CMD_FATAL)
+#define G_FW_ERROR_CMD_FATAL(x)		\
     (((x) >> S_FW_ERROR_CMD_FATAL) & M_FW_ERROR_CMD_FATAL)
-#define F_FW_ERROR_CMD_FATAL	V_FW_ERROR_CMD_FATAL(1U)
+#define F_FW_ERROR_CMD_FATAL		V_FW_ERROR_CMD_FATAL(1U)
 
-#define S_FW_ERROR_CMD_TYPE	0
-#define M_FW_ERROR_CMD_TYPE	0xf
-#define V_FW_ERROR_CMD_TYPE(x)	((x) << S_FW_ERROR_CMD_TYPE)
-#define G_FW_ERROR_CMD_TYPE(x)	\
+#define S_FW_ERROR_CMD_TYPE		0
+#define M_FW_ERROR_CMD_TYPE		0xf
+#define V_FW_ERROR_CMD_TYPE(x)		((x) << S_FW_ERROR_CMD_TYPE)
+#define G_FW_ERROR_CMD_TYPE(x)		\
     (((x) >> S_FW_ERROR_CMD_TYPE) & M_FW_ERROR_CMD_TYPE)
 
-#define S_FW_ERROR_CMD_PFN	8
-#define M_FW_ERROR_CMD_PFN	0x7
-#define V_FW_ERROR_CMD_PFN(x)	((x) << S_FW_ERROR_CMD_PFN)
-#define G_FW_ERROR_CMD_PFN(x)	\
+#define S_FW_ERROR_CMD_PFN		8
+#define M_FW_ERROR_CMD_PFN		0x7
+#define V_FW_ERROR_CMD_PFN(x)		((x) << S_FW_ERROR_CMD_PFN)
+#define G_FW_ERROR_CMD_PFN(x)		\
     (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN)
 
-#define S_FW_ERROR_CMD_VFN	0
-#define M_FW_ERROR_CMD_VFN	0xff
-#define V_FW_ERROR_CMD_VFN(x)	((x) << S_FW_ERROR_CMD_VFN)
-#define G_FW_ERROR_CMD_VFN(x)	\
+#define S_FW_ERROR_CMD_VFN		0
+#define M_FW_ERROR_CMD_VFN		0xff
+#define V_FW_ERROR_CMD_VFN(x)		((x) << S_FW_ERROR_CMD_VFN)
+#define G_FW_ERROR_CMD_VFN(x)		\
     (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN)
 
-#define S_FW_ERROR_CMD_PFN	8
-#define M_FW_ERROR_CMD_PFN	0x7
-#define V_FW_ERROR_CMD_PFN(x)	((x) << S_FW_ERROR_CMD_PFN)
-#define G_FW_ERROR_CMD_PFN(x)	\
+#define S_FW_ERROR_CMD_PFN		8
+#define M_FW_ERROR_CMD_PFN		0x7
+#define V_FW_ERROR_CMD_PFN(x)		((x) << S_FW_ERROR_CMD_PFN)
+#define G_FW_ERROR_CMD_PFN(x)		\
     (((x) >> S_FW_ERROR_CMD_PFN) & M_FW_ERROR_CMD_PFN)
 
-#define S_FW_ERROR_CMD_VFN	0
-#define M_FW_ERROR_CMD_VFN	0xff
-#define V_FW_ERROR_CMD_VFN(x)	((x) << S_FW_ERROR_CMD_VFN)
-#define G_FW_ERROR_CMD_VFN(x)	\
+#define S_FW_ERROR_CMD_VFN		0
+#define M_FW_ERROR_CMD_VFN		0xff
+#define V_FW_ERROR_CMD_VFN(x)		((x) << S_FW_ERROR_CMD_VFN)
+#define G_FW_ERROR_CMD_VFN(x)		\
     (((x) >> S_FW_ERROR_CMD_VFN) & M_FW_ERROR_CMD_VFN)
 
-#define S_FW_ERROR_CMD_MV	15
-#define M_FW_ERROR_CMD_MV	0x1
-#define V_FW_ERROR_CMD_MV(x)	((x) << S_FW_ERROR_CMD_MV)
-#define G_FW_ERROR_CMD_MV(x)	\
+#define S_FW_ERROR_CMD_MV		15
+#define M_FW_ERROR_CMD_MV		0x1
+#define V_FW_ERROR_CMD_MV(x)		((x) << S_FW_ERROR_CMD_MV)
+#define G_FW_ERROR_CMD_MV(x)		\
     (((x) >> S_FW_ERROR_CMD_MV) & M_FW_ERROR_CMD_MV)
-#define F_FW_ERROR_CMD_MV	V_FW_ERROR_CMD_MV(1U)
+#define F_FW_ERROR_CMD_MV		V_FW_ERROR_CMD_MV(1U)
 
 struct fw_debug_cmd {
 	__be32 op_type;
@@ -7417,12 +9289,102 @@
 	} u;
 };
 
-#define S_FW_DEBUG_CMD_TYPE	0
-#define M_FW_DEBUG_CMD_TYPE	0xff
-#define V_FW_DEBUG_CMD_TYPE(x)	((x) << S_FW_DEBUG_CMD_TYPE)
-#define G_FW_DEBUG_CMD_TYPE(x)	\
+#define S_FW_DEBUG_CMD_TYPE		0
+#define M_FW_DEBUG_CMD_TYPE		0xff
+#define V_FW_DEBUG_CMD_TYPE(x)		((x) << S_FW_DEBUG_CMD_TYPE)
+#define G_FW_DEBUG_CMD_TYPE(x)		\
     (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE)
 
+enum fw_diag_cmd_type {
+	FW_DIAG_CMD_TYPE_OFLDIAG = 0,
+};
+
+enum fw_diag_cmd_ofldiag_op {
+	FW_DIAG_CMD_OFLDIAG_TEST_NONE = 0,
+	FW_DIAG_CMD_OFLDIAG_TEST_START,
+	FW_DIAG_CMD_OFLDIAG_TEST_STOP,
+	FW_DIAG_CMD_OFLDIAG_TEST_STATUS,
+};
+
+enum fw_diag_cmd_ofldiag_status {
+	FW_DIAG_CMD_OFLDIAG_STATUS_IDLE = 0,
+	FW_DIAG_CMD_OFLDIAG_STATUS_RUNNING,
+	FW_DIAG_CMD_OFLDIAG_STATUS_FAILED,
+	FW_DIAG_CMD_OFLDIAG_STATUS_PASSED,
+};
+
+struct fw_diag_cmd {
+	__be32 op_type;
+	__be32 len16_pkd;
+	union fw_diag_test {
+		struct fw_diag_test_ofldiag {
+			__u8   test_op;
+			__u8   r3;
+			__be16 test_status;
+			__be32 duration;
+		} ofldiag;
+	} u;
+};
+
+#define S_FW_DIAG_CMD_TYPE		0
+#define M_FW_DIAG_CMD_TYPE		0xff
+#define V_FW_DIAG_CMD_TYPE(x)		((x) << S_FW_DIAG_CMD_TYPE)
+#define G_FW_DIAG_CMD_TYPE(x)		\
+    (((x) >> S_FW_DIAG_CMD_TYPE) & M_FW_DIAG_CMD_TYPE)
+
+struct fw_hma_cmd {
+	__be32 op_pkd;
+	__be32 retval_len16;
+	__be32 mode_to_pcie_params;
+	__be32 naddr_size;
+	__be32 addr_size_pkd;
+	__be32 r6;
+	__be64 phy_address[5];
+};
+
+#define S_FW_HMA_CMD_MODE	31
+#define M_FW_HMA_CMD_MODE	0x1
+#define V_FW_HMA_CMD_MODE(x)	((x) << S_FW_HMA_CMD_MODE)
+#define G_FW_HMA_CMD_MODE(x)	\
+    (((x) >> S_FW_HMA_CMD_MODE) & M_FW_HMA_CMD_MODE)
+#define F_FW_HMA_CMD_MODE	V_FW_HMA_CMD_MODE(1U)
+
+#define S_FW_HMA_CMD_SOC	30
+#define M_FW_HMA_CMD_SOC	0x1
+#define V_FW_HMA_CMD_SOC(x)	((x) << S_FW_HMA_CMD_SOC)
+#define G_FW_HMA_CMD_SOC(x)	(((x) >> S_FW_HMA_CMD_SOC) & M_FW_HMA_CMD_SOC)
+#define F_FW_HMA_CMD_SOC	V_FW_HMA_CMD_SOC(1U)
+
+#define S_FW_HMA_CMD_EOC	29
+#define M_FW_HMA_CMD_EOC	0x1
+#define V_FW_HMA_CMD_EOC(x)	((x) << S_FW_HMA_CMD_EOC)
+#define G_FW_HMA_CMD_EOC(x)	(((x) >> S_FW_HMA_CMD_EOC) & M_FW_HMA_CMD_EOC)
+#define F_FW_HMA_CMD_EOC	V_FW_HMA_CMD_EOC(1U)
+
+#define S_FW_HMA_CMD_PCIE_PARAMS	0
+#define M_FW_HMA_CMD_PCIE_PARAMS	0x7ffffff
+#define V_FW_HMA_CMD_PCIE_PARAMS(x)	((x) << S_FW_HMA_CMD_PCIE_PARAMS)
+#define G_FW_HMA_CMD_PCIE_PARAMS(x)	\
+    (((x) >> S_FW_HMA_CMD_PCIE_PARAMS) & M_FW_HMA_CMD_PCIE_PARAMS)
+
+#define S_FW_HMA_CMD_NADDR	12
+#define M_FW_HMA_CMD_NADDR	0x3f
+#define V_FW_HMA_CMD_NADDR(x)	((x) << S_FW_HMA_CMD_NADDR)
+#define G_FW_HMA_CMD_NADDR(x)	\
+    (((x) >> S_FW_HMA_CMD_NADDR) & M_FW_HMA_CMD_NADDR)
+
+#define S_FW_HMA_CMD_SIZE	0
+#define M_FW_HMA_CMD_SIZE	0xfff
+#define V_FW_HMA_CMD_SIZE(x)	((x) << S_FW_HMA_CMD_SIZE)
+#define G_FW_HMA_CMD_SIZE(x)	\
+    (((x) >> S_FW_HMA_CMD_SIZE) & M_FW_HMA_CMD_SIZE)
+
+#define S_FW_HMA_CMD_ADDR_SIZE		11
+#define M_FW_HMA_CMD_ADDR_SIZE		0x1fffff
+#define V_FW_HMA_CMD_ADDR_SIZE(x)	((x) << S_FW_HMA_CMD_ADDR_SIZE)
+#define G_FW_HMA_CMD_ADDR_SIZE(x)	\
+    (((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE)
+
 /******************************************************************************
  *   P C I E   F W   R E G I S T E R
  **************************************/
@@ -7526,6 +9488,53 @@
 
 
 /******************************************************************************
+ *   P C I E   F W   P F 0   R E G I S T E R
+ **********************************************/
+
+/*
+ *	this register is available as 32-bit of persistent storage (accross
+ *	PL_RST based chip-reset) for boot drivers (i.e. firmware and driver
+ *	will not write it)
+ */
+
+
+/******************************************************************************
+ *   P C I E   F W   P F 7   R E G I S T E R
+ **********************************************/
+
+/*
+ * PF7 stores the Firmware Device Log parameters which allows Host Drivers to
+ * access the "devlog" which needing to contact firmware.  The encoding is
+ * mostly the same as that returned by the DEVLOG command except for the size
+ * which is encoded as the number of entries in multiples-1 of 128 here rather
+ * than the memory size as is done in the DEVLOG command.  Thus, 0 means 128
+ * and 15 means 2048.  This of course in turn constrains the allowed values
+ * for the devlog size ...
+ */
+#define PCIE_FW_PF_DEVLOG		7
+
+#define S_PCIE_FW_PF_DEVLOG_NENTRIES128	28
+#define M_PCIE_FW_PF_DEVLOG_NENTRIES128	0xf
+#define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
+	((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128)
+#define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
+	(((x) >> S_PCIE_FW_PF_DEVLOG_NENTRIES128) & \
+	 M_PCIE_FW_PF_DEVLOG_NENTRIES128)
+
+#define S_PCIE_FW_PF_DEVLOG_ADDR16	4
+#define M_PCIE_FW_PF_DEVLOG_ADDR16	0xffffff
+#define V_PCIE_FW_PF_DEVLOG_ADDR16(x)	((x) << S_PCIE_FW_PF_DEVLOG_ADDR16)
+#define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \
+	(((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16)
+
+#define S_PCIE_FW_PF_DEVLOG_MEMTYPE	0
+#define M_PCIE_FW_PF_DEVLOG_MEMTYPE	0xf
+#define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x)	((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE)
+#define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \
+	(((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE)
+
+
+/******************************************************************************
  *   B I N A R Y   H E A D E R   F O R M A T
  **********************************************/
 
@@ -7548,7 +9557,7 @@
 	__u8	intfver_fcoe;
 	__u32	reserved2;
 	__u32	reserved3;
-	__u32	reserved4;
+	__be32	magic;			/* runtime or bootstrap fw */
 	__be32	flags;
 	__be32	reserved6[23];
 };
@@ -7555,7 +9564,8 @@
 
 enum fw_hdr_chip {
 	FW_HDR_CHIP_T4,
-	FW_HDR_CHIP_T5
+	FW_HDR_CHIP_T5,
+	FW_HDR_CHIP_T6
 };
 
 #define S_FW_HDR_FW_VER_MAJOR	24
@@ -7587,18 +9597,95 @@
     (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD)
 
 enum {
-	FW_HDR_INTFVER_NIC	= 0x00,
-	FW_HDR_INTFVER_VNIC	= 0x00,
-	FW_HDR_INTFVER_OFLD	= 0x00,
-	FW_HDR_INTFVER_RI	= 0x00,
-	FW_HDR_INTFVER_ISCSIPDU	= 0x00,
-	FW_HDR_INTFVER_ISCSI	= 0x00,
-	FW_HDR_INTFVER_FCOEPDU  = 0x00,
-	FW_HDR_INTFVER_FCOE	= 0x00,
+	T4FW_VERSION_MAJOR	= 0x01,
+	T4FW_VERSION_MINOR	= 0x10,
+	T4FW_VERSION_MICRO	= 0x3f,
+	T4FW_VERSION_BUILD	= 0x00,
+
+	T5FW_VERSION_MAJOR	= 0x01,
+	T5FW_VERSION_MINOR	= 0x10,
+	T5FW_VERSION_MICRO	= 0x3f,
+	T5FW_VERSION_BUILD	= 0x00,
+
+	T6FW_VERSION_MAJOR	= 0x01,
+	T6FW_VERSION_MINOR	= 0x10,
+	T6FW_VERSION_MICRO	= 0x3f,
+	T6FW_VERSION_BUILD	= 0x00,
 };
 
+enum {
+	/* T4
+	 */
+	T4FW_HDR_INTFVER_NIC	= 0x00,
+	T4FW_HDR_INTFVER_VNIC	= 0x00,
+	T4FW_HDR_INTFVER_OFLD	= 0x00,
+	T4FW_HDR_INTFVER_RI	= 0x00,
+	T4FW_HDR_INTFVER_ISCSIPDU= 0x00,
+	T4FW_HDR_INTFVER_ISCSI	= 0x00,
+	T4FW_HDR_INTFVER_FCOEPDU  = 0x00,
+	T4FW_HDR_INTFVER_FCOE	= 0x00,
+
+	/* T5
+	 */
+	T5FW_HDR_INTFVER_NIC	= 0x00,
+	T5FW_HDR_INTFVER_VNIC	= 0x00,
+	T5FW_HDR_INTFVER_OFLD	= 0x00,
+	T5FW_HDR_INTFVER_RI	= 0x00,
+	T5FW_HDR_INTFVER_ISCSIPDU= 0x00,
+	T5FW_HDR_INTFVER_ISCSI	= 0x00,
+	T5FW_HDR_INTFVER_FCOEPDU= 0x00,
+	T5FW_HDR_INTFVER_FCOE	= 0x00,
+
+	/* T6
+	 */
+	T6FW_HDR_INTFVER_NIC	= 0x00,
+	T6FW_HDR_INTFVER_VNIC	= 0x00,
+	T6FW_HDR_INTFVER_OFLD	= 0x00,
+	T6FW_HDR_INTFVER_RI	= 0x00,
+	T6FW_HDR_INTFVER_ISCSIPDU= 0x00,
+	T6FW_HDR_INTFVER_ISCSI	= 0x00,
+	T6FW_HDR_INTFVER_FCOEPDU= 0x00,
+	T6FW_HDR_INTFVER_FCOE	= 0x00,
+};
+
+enum {
+	FW_HDR_MAGIC_RUNTIME	= 0x00000000,
+	FW_HDR_MAGIC_BOOTSTRAP	= 0x626f6f74,
+};
+
 enum fw_hdr_flags {
 	FW_HDR_FLAGS_RESET_HALT	= 0x00000001,
 };
 
+/*
+ *	External PHY firmware binary header format
+ */
+struct fw_ephy_hdr {
+	__u8	ver;
+	__u8	reserved;
+	__be16	len512;			/* bin length in units of 512-bytes */
+	__be32	magic;
+
+	__be16	vendor_id;
+	__be16	device_id;
+	__be32	version;
+
+	__be32	reserved1[4];
+};
+
+enum {
+	FW_EPHY_HDR_MAGIC	= 0x65706879,
+};
+	
+struct fw_ifconf_dhcp_info {
+	__be32		addr;
+	__be32		mask;
+	__be16		vlanid;
+	__be16		mtu;
+	__be32		gw;
+	__u8		op;
+	__u8		len;
+	__u8		data[270];
+};
+
 #endif /* _T4FW_INTERFACE_H_ */

Modified: trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt	2018-05-28 00:17:55 UTC (rev 10120)
@@ -5,62 +5,121 @@
 # niqflint = 1023	ingress queues with freelists and/or interrupts
 # nethctrl = 64K	Ethernet or ctrl egress queues
 # neq = 64K		egress queues of all kinds, including freelists
-# nexactf = 336		MPS TCAM entries, can oversubscribe.
+# nexactf = 512		MPS TCAM entries, can oversubscribe.
 #
 
 [global]
 	rss_glb_config_mode = basicvirtual
-	rss_glb_config_options = tnlmapen, hashtoeplitz, tnlalllkp
+	rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
 
 	# PL_TIMEOUT register
-	pl_timeout_value = 200		# the timeout value in units of us
+	pl_timeout_value = 10000	# the timeout value in units of us
 
-	sge_timer_value = 1, 5, 10, 50, 100, 200	# usecs
+	# SGE_THROTTLE_CONTROL
+	bar2throttlecount = 500		# bar2throttlecount in us
 
-	# TP_SHIFT_CNT
-	reg[0x7dc0] = 0x62f8849
+	sge_timer_value = 1, 5, 10, 50, 100, 200	# SGE_TIMER_VALUE* in usecs
 
-	# TP_GLOBAL_CONFIG
-	reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+	reg[0x1124] = 0x00000400/0x00000400 # SGE_CONTROL2, enable VFIFO; if
+					# SGE_VFIFO_SIZE is not set, then
+					# firmware will set it up in function
+					# of number of egress queues used
 
+	reg[0x1130] = 0x00d5ffeb	# SGE_DBP_FETCH_THRESHOLD, fetch
+					# threshold set to queue depth
+					# minus 128-entries for FL and HP
+					# queues, and 0xfff for LP which
+					# prompts the firmware to set it up
+					# in function of egress queues
+					# used
+
+	reg[0x113c] = 0x0002ffc0	# SGE_VFIFO_SIZE, set to 0x2ffc0 which
+					# prompts the firmware to set it up in
+					# function of number of egress queues
+					# used 
+
+	# enable TP_OUT_CONFIG.IPIDSPLITMODE
+	reg[0x7d04] = 0x00010000/0x00010000
+
+	# disable TP_PARA_REG3.RxFragEn
+	reg[0x7d6c] = 0x00000000/0x00007000
+
+	# enable TP_PARA_REG6.EnableCSnd
+	reg[0x7d78] = 0x00000400/0x00000000
+
+	reg[0x7dc0] = 0x0e2f8849	# TP_SHIFT_CNT
+
 	filterMode = fragmentation, mpshittype, protocol, vlan, port, fcoe
 	filterMask = protocol, fcoe
 
-	# TP rx and tx channels (0 = auto).
+	tp_pmrx = 36, 512
+	tp_pmrx_pagesize = 64K
+
+	# TP number of RX channels (0 = auto)
 	tp_nrxch = 0
-	tp_ntxch = 0
 
-	# TP rx and tx payload memory (% of the total EDRAM + DDR3).
-	tp_pmrx = 38
-	tp_pmtx = 60
-	tp_pmrx_pagesize = 64K
+	tp_pmtx = 46, 512
 	tp_pmtx_pagesize = 64K
 
+	# TP number of TX channels (0 = auto)
+	tp_ntxch = 0
+
+	# TP OFLD MTUs
+	tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+	# TP_GLOBAL_CONFIG
+	reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+	# TP_PC_CONFIG
+	reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+	# TP_PARA_REG0
+	reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+	# cluster, lan, or wan.
+	tp_tcptuning = lan
+
+	# MC configuration
+	mc_mode_brc[0] = 1		# mc0 - 1: enable BRC, 0: enable RBC
+	mc_mode_brc[1] = 1		# mc1 - 1: enable BRC, 0: enable RBC
+
+	# ULP_TX_CONFIG
+	reg[0x8dc0] = 0x00000004/0x00000004 # Enable more error msg for ...
+					    # TPT error.
+
 # PFs 0-3.  These get 8 MSI/8 MSI-X vectors each.  VFs are supported by
 # these 4 PFs only.  Not used here at all.
 [function "0"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "0/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "1"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "1/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "2"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "2/*"]
 	nvi = 1
+	rssnvi = 0
 
 [function "3"]
 	nvf = 16
 	nvi = 1
+	rssnvi = 0
 [function "3/*"]
 	nvi = 1
+	rssnvi = 0
 
 # PF4 is the resource-rich PF that the bus/nexus driver attaches to.
 # It gets 32 MSI/128 MSI-X vectors.
@@ -68,18 +127,24 @@
 	wx_caps = all
 	r_caps = all
 	nvi = 32
-	niqflint = 256
-	nethctrl = 128
-	neq = 256
+	rssnvi = 8
+	niqflint = 512
+	nethctrl = 1024
+	neq = 2048
+	nqpcq = 8192
 	nexactf = 328
 	cmask = all
 	pmask = all
 
 	# driver will mask off features it won't use
-	protocol = ofld
+	protocol = ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif
 
 	tp_l2t = 4096
 	tp_ddp = 2
+	tp_ddp_iscsi = 2
+	tp_stag = 2
+	tp_pbl = 5
+	tp_rq = 7
 
 	# TCAM has 8K cells; each region must start at a multiple of 128 cell.
 	# Each entry in these categories takes 4 cells each.  nhash will use the
@@ -94,11 +159,13 @@
 # Not used right now.
 [function "5"]
 	nvi = 1
+	rssnvi = 0
 
 # PF6 is the FCoE Controller PF. It gets 32 MSI/40 MSI-X vectors.
 # Not used right now.
 [function "6"]
 	nvi = 1
+	rssnvi = 0
 
 # The following function, 1023, is not an actual PCIE function but is used to
 # configure and reserve firmware internal resources that come from the global
@@ -107,6 +174,7 @@
 	wx_caps = all
 	r_caps = all
 	nvi = 4
+	rssnvi = 0
 	cmask = all
 	pmask = all
 	nexactf = 8
@@ -148,7 +216,7 @@
 
 [fini]
 	version = 0x1
-	checksum = 0x93f11b53
+	checksum = 0x168d5243
 #
-# $FreeBSD: release/9.2.0/sys/dev/cxgbe/firmware/t5fw_cfg.txt 253777 2013-07-29 19:21:54Z np $
+# $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t5fw_cfg.txt 309379 2016-12-02 00:23:10Z jhb $
 #


Property changes on: trunk/sys/dev/cxgbe/firmware/t5fw_cfg.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Deleted: svn:keywords
## -1 +0,0 ##
-MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt	2018-05-28 00:17:55 UTC (rev 10120)
@@ -149,7 +149,7 @@
 
 	# Percentage of dynamic memory (in either the EDRAM or external MEM)
 	# to use for TP RX payload
-	tp_pmrx = 30
+	tp_pmrx = 30, 512
 
 	# TP RX payload page size
 	tp_pmrx_pagesize = 64K
@@ -159,7 +159,7 @@
 
 	# Percentage of dynamic memory (in either the EDRAM or external MEM)
 	# to use for TP TX payload
-	tp_pmtx = 50
+	tp_pmtx = 50, 512
 
 	# TP TX payload page size
 	tp_pmtx_pagesize = 64K
@@ -463,7 +463,7 @@
 
 [fini]
 	version = 0x1425000d
-	checksum = 0xe56cb999
+	checksum = 0x22f1530b
 
 # Total resources used by above allocations:
 #   Virtual Interfaces: 104
@@ -473,5 +473,5 @@
 #   MSI-X Vectors: 736
 #   Virtual Functions: 64
 #
-# $FreeBSD: release/9.2.0/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt 252814 2013-07-05 18:27:38Z np $
+# $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt 274612 2014-11-17 07:20:03Z np $
 #


Property changes on: trunk/sys/dev/cxgbe/firmware/t5fw_cfg_fpga.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Deleted: svn:keywords
## -1 +0,0 ##
-MidnightBSD=%H
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt
===================================================================
--- trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,49 +1,34 @@
 # Chelsio T5 Factory Default configuration file.
 #
-# Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
+# Copyright (C) 2010-2017 Chelsio Communications.  All rights reserved.
 #
-#   DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES.  MODIFICATION OF
-#   THIS FILE WILL RESULT IN A NON-FUNCTIONAL T4 ADAPTER AND MAY RESULT
-#   IN PHYSICAL DAMAGE TO T4 ADAPTERS.
+#   DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES.  MODIFICATION OF THIS FILE
+#   WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+#   TO ADAPTERS.
 
-# This file provides the default, power-on configuration for 4-port T4-based
+
+# This file provides the default, power-on configuration for 4-port T5-based
 # adapters shipped from the factory.  These defaults are designed to address
-# the needs of the vast majority of T4 customers.  The basic idea is to have
-# a default configuration which allows a customer to plug a T4 adapter in and
-# have it work regardless of OS, driver or application except in the most
-# unusual and/or demanding customer applications.
+# the needs of the vast majority of Terminator customers.  The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
 #
-# Many of the T4 resources which are described by this configuration are
-# finite.  This requires balancing the configuration/operation needs of
+# Many of the Terminator resources which are described by this configuration
+# are finite.  This requires balancing the configuration/operation needs of
 # device drivers across OSes and a large number of customer application.
 #
 # Some of the more important resources to allocate and their constaints are:
-#  1. Virtual Interfaces: 128.
-#  2. Ingress Queues with Free Lists: 1024.  PCI-E SR-IOV Virtual Functions
-#     must use a power of 2 Ingress Queues.
-#  3. Egress Queues: 128K.  PCI-E SR-IOV Virtual Functions must use a
-#     power of 2 Egress Queues.
-#  4. MSI-X Vectors: 1088.  A complication here is that the PCI-E SR-IOV
-#     Virtual Functions based off of a Physical Function all get the
-#     same umber of MSI-X Vectors as the base Physical Function.
-#     Additionally, regardless of whether Virtual Functions are enabled or
-#     not, their MSI-X "needs" are counted by the PCI-E implementation.
-#     And finally, all Physical Funcations capable of supporting Virtual
-#     Functions (PF0-3) must have the same number of configured TotalVFs in
-#     their SR-IOV Capabilities.
+#  1. Virtual Interfaces: 256.
+#  2. Ingress Queues with Free Lists: 1024.
+#  3. Egress Queues: 128K.
+#  4. MSI-X Vectors: 1088.
 #  5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
 #     address matching on Ingress Packets.
 #
 # Some of the important OS/Driver resource needs are:
 #  6. Some OS Drivers will manage all resources through a single Physical
-#     Function (currently PF0 but it could be any Physical Function).  Thus,
-#     this "Unified PF"  will need to have enough resources allocated to it
-#     to allow for this.  And because of the MSI-X resource allocation
-#     constraints mentioned above, this probably means we'll either have to
-#     severely limit the TotalVFs if we continue to use PF0 as the Unified PF
-#     or we'll need to move the Unified PF into the PF4-7 range since those
-#     Physical Functions don't have any Virtual Functions associated with
-#     them.
+#     Function (currently PF4 but it could be any Physical Function).
 #  7. Some OS Drivers will manage different ports and functions (NIC,
 #     storage, etc.) on different Physical Functions.  For example, NIC
 #     functions for ports 0-3 on PF0-3, FCoE on PF4, iSCSI on PF5, etc.
@@ -64,12 +49,10 @@
 #     for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
 #     (Plus a few for Firmware Event Queues, etc.)
 #
-#  9. Some customers will want to use T4's PCI-E SR-IOV Capability to allow
-#     Virtual Machines to directly access T4 functionality via SR-IOV
-#     Virtual Functions and "PCI Device Passthrough" -- this is especially
-#     true for the NIC application functionality.  (Note that there is
-#     currently no ability to use the TOE, FCoE, iSCSI, etc. via Virtual
-#     Functions so this is in fact solely limited to NIC.)
+#  9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+#     Machines to directly access T6 functionality via SR-IOV Virtual Functions
+#     and "PCI Device Passthrough" -- this is especially true for the NIC
+#     application functionality.
 #
 
 
@@ -80,7 +63,7 @@
 	rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
 
 	# PL_TIMEOUT register
-	pl_timeout_value = 200		# the timeout value in units of us
+	pl_timeout_value = 10000	# the timeout value in units of us
 
 	# The following Scatter Gather Engine (SGE) settings assume a 4KB Host
 	# Page Size and a 64B L1 Cache Line Size. It programs the
@@ -108,7 +91,8 @@
 	reg[0x105c] = 128		# SGE_FL_BUFFER_SIZE6
 	reg[0x1060] = 8192		# SGE_FL_BUFFER_SIZE7
 	reg[0x1064] = 16384		# SGE_FL_BUFFER_SIZE8
-	reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+	reg[0x10a4] = 0x00280000/0x3ffc0000 # SGE_DBFIFO_STATUS
+	reg[0x1118] = 0x00002800/0x00003c00 # SGE_DBFIFO_STATUS2
 	reg[0x10a8] = 0x402000/0x402000	# SGE_DOORBELL_CONTROL
 
 	# SGE_THROTTLE_CONTROL
@@ -135,13 +119,26 @@
 					# function of number of egress queues
 					# used 
 
-	reg[0x7dc0] = 0x062f8849	# TP_SHIFT_CNT
+	# enable TP_OUT_CONFIG.IPIDSPLITMODE
+	reg[0x7d04] = 0x00010000/0x00010000
 
-	# Selection of tuples for LE filter lookup, fields (and widths which
-	# must sum to <= 36): { IP Fragment (1), MPS Match Type (3),
-	# IP Protocol (8), [Inner] VLAN (17), Port (3), FCoE (1) }
-	#
-	filterMode = srvrsram, fragmentation, mpshittype, protocol, vlan, port, fcoe
+	# disable TP_PARA_REG3.RxFragEn
+	reg[0x7d6c] = 0x00000000/0x00007000
+
+	# enable TP_PARA_REG6.EnableCSnd
+	reg[0x7d78] = 0x00000400/0x00000000
+
+	reg[0x7dc0] = 0x0e2f8849	# TP_SHIFT_CNT
+
+	# TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+	# filter control: compact, fcoemask
+	# server sram   : srvrsram
+	# filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+	#		  protocol, tos, vlan, vnic_id, port, fcoe
+	# valid filterModes are described the Terminator 5 Data Book
+	filterMode = fcoemask, srvrsram, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+	# filter tuples enforced in LE active region (equal to or subset of filterMode)
 	filterMask = protocol, fcoe
 
 	# Percentage of dynamic memory (in either the EDRAM or external MEM)
@@ -164,12 +161,32 @@
 	# TP number of TX channels
 	tp_ntxch = 0		# 0 (auto) = equal number of ports
 
+	# TP OFLD MTUs
+	tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
 	# TP_GLOBAL_CONFIG
 	reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
 
+	# TP_PC_CONFIG
+	reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+	# TP_PARA_REG0
+	reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+	# ULPRX iSCSI Page Sizes
+	reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
 	# LE_DB_CONFIG
 	reg[0x19c04] = 0x00400000/0x00400000 # LE Server SRAM Enable
 
+	# MC configuration
+	mc_mode_brc[0] = 1		# mc0 - 1: enable BRC, 0: enable RBC
+	mc_mode_brc[1] = 1		# mc1 - 1: enable BRC, 0: enable RBC
+
+	# ULP_TX_CONFIG
+	reg[0x8dc0] = 0x00000004/0x00000004 # Enable more error msg for ...
+					    # TPT error.
+
 # Some "definitions" to make the rest of this a bit more readable.  We support
 # 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
 # per function per port ...
@@ -198,7 +215,7 @@
 # NEQ_NIC = 64			# NIC Egress Queues (FL, ETHCTRL/TX)
 # NMPSTCAM_NIC = 16		# NIC MPS TCAM Entries (NPORTS*4)
 # NMSIX_NIC = 32		# NIC MSI-X Interrupt Vectors (FLIQ)
-# 
+#
 # NVI_OFLD = 0			# Offload uses NIC function to access ports
 # NFLIQ_OFLD = 16		# Offload Ingress Queues with Free Lists
 # NETHCTRL_OFLD = 0		# Offload Ethernet Control/TX Queues
@@ -302,20 +319,21 @@
 # PF2_INT = 8			# NCPUS
 # PF3_INT = 8			# NCPUS
 # PF0_3_INT = 32		# PF0_INT + PF1_INT + PF2_INT + PF3_INT
-# 
+#
 # PF4_INT = 128			# NMSIX_UNIFIED
 # PF5_INT = 32			# NMSIX_STORAGE
 # PF6_INT = 32			# NMSIX_STORAGE
 # PF7_INT = 0			# Nothing Assigned
 # PF4_7_INT = 192		# PF4_INT + PF5_INT + PF6_INT + PF7_INT
-# 
+#
 # PF0_7_INT = 224		# PF0_3_INT + PF4_7_INT
-# 
+#
 # With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
 # but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
 #
 # NVF = 16
 
+
 # For those OSes which manage different ports on different PFs, we need
 # only enough resources to support a single port's NIC application functions
 # on PF0-3.  The below assumes that we're only doing NIC with NCPUS "Queue
@@ -334,6 +352,7 @@
 	cmask = all		# access to all channels
 	pmask = 0x1		# access to only one port
 
+
 [function "1"]
 	nvf = 16		# NVF on this function
 	wx_caps = all		# write/execute permissions for all commands
@@ -346,6 +365,7 @@
 	cmask = all		# access to all channels
 	pmask = 0x2		# access to only one port
 
+
 [function "2"]
 	nvf = 16		# NVF on this function
 	wx_caps = all		# write/execute permissions for all commands
@@ -358,6 +378,7 @@
 	cmask = all		# access to all channels
 	pmask = 0x4		# access to only one port
 
+
 [function "3"]
 	nvf = 16		# NVF on this function
 	wx_caps = all		# write/execute permissions for all commands
@@ -370,6 +391,7 @@
 	cmask = all		# access to all channels
 	pmask = 0x8		# access to only one port
 
+
 # Some OS Drivers manage all application functions for all ports via PF4.
 # Thus we need to provide a large number of resources here.  For Egress
 # Queues we need to account for both TX Queues as well as Free List Queues
@@ -383,6 +405,7 @@
 	niqflint = 170		# NFLIQ_UNIFIED + NLFIQ_WD
 	nethctrl = 100		# NETHCTRL_UNIFIED + NETHCTRL_WD
 	neq = 256		# NEQ_UNIFIED + NEQ_WD
+	nqpcq = 12288 
 	nexactf = 40		# NMPSTCAM_UNIFIED
 	cmask = all		# access to all channels
 	pmask = all		# access to all four ports ...
@@ -392,7 +415,7 @@
 	nfilter = 496		# number of filter region entries
 	nserver = 496		# number of server region entries
 	nhash = 12288		# number of hash region entries
-	protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu
+	protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif
 	tp_l2t = 3072
 	tp_ddp = 2
 	tp_ddp_iscsi = 2
@@ -400,6 +423,7 @@
 	tp_pbl = 5
 	tp_rq = 7
 
+
 # We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
 # need to have Virtual Interfaces on each of the four ports with up to NCPUS
 # "Queue Sets" each.
@@ -411,12 +435,12 @@
 	niqflint = 34		# NPORTS*NCPUS + NMSIX_EXTRA
 	nethctrl = 32		# NPORTS*NCPUS
 	neq = 64		# NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
-	nexactf = 4		# NPORTS
+	nexactf = 16		# (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
 	cmask = all		# access to all channels
 	pmask = all		# access to all four ports ...
 	nserver = 16
 	nhash = 2048
-	tp_l2t = 1024
+	tp_l2t = 1020
 	protocol = iscsi_initiator_fofld
 	tp_ddp_iscsi = 2
 	iscsi_ntask = 2048
@@ -424,6 +448,7 @@
 	iscsi_nconn_per_session = 1
 	iscsi_ninitiator_instance = 64
 
+
 [function "6"]
 	wx_caps = all		# write/execute permissions for all commands
 	r_caps = all		# read permissions for all commands
@@ -437,6 +462,7 @@
 	cmask = all		# access to all channels
 	pmask = all		# access to all four ports ...
 	nhash = 2048
+	tp_l2t = 4
 	protocol = fcoe_initiator
 	tp_ddp = 2
 	fcoe_nfcf = 16
@@ -443,6 +469,7 @@
 	fcoe_nvnp = 32
 	fcoe_nssn = 1024
 
+
 # The following function, 1023, is not an actual PCIE function but is used to
 # configure and reserve firmware internal resources that come from the global
 # resource pool.
@@ -456,6 +483,7 @@
 	nexactf = 8		# NPORTS + DCBX +
 	nfilter = 16		# number of filter region entries
 
+
 # For Virtual functions, we only allow NIC functionality and we only allow
 # access to one port (1 << PF).  Note that because of limitations in the
 # Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
@@ -466,46 +494,50 @@
 	wx_caps = 0x82		# DMAQ | VF
 	r_caps = 0x86		# DMAQ | VF | PORT
 	nvi = 1			# 1 port
-	niqflint = 4		# 2 "Queue Sets" + NXIQ
-	nethctrl = 2		# 2 "Queue Sets"
-	neq = 4			# 2 "Queue Sets" * 2
+	niqflint = 6		# 2 "Queue Sets" + NXIQ
+	nethctrl = 4		# 2 "Queue Sets"
+	neq = 8			# 2 "Queue Sets" * 2
 	nexactf = 4
 	cmask = all		# access to all channels
 	pmask = 0x1		# access to only one port ...
 
+
 [function "1/*"]		# NVF
 	wx_caps = 0x82		# DMAQ | VF
 	r_caps = 0x86		# DMAQ | VF | PORT
 	nvi = 1			# 1 port
-	niqflint = 4		# 2 "Queue Sets" + NXIQ
-	nethctrl = 2		# 2 "Queue Sets"
-	neq = 4			# 2 "Queue Sets" * 2
+	niqflint = 6		# 2 "Queue Sets" + NXIQ
+	nethctrl = 4		# 2 "Queue Sets"
+	neq = 8			# 2 "Queue Sets" * 2
 	nexactf = 4
 	cmask = all		# access to all channels
 	pmask = 0x2		# access to only one port ...
 
+
 [function "2/*"]		# NVF
 	wx_caps = 0x82		# DMAQ | VF
 	r_caps = 0x86		# DMAQ | VF | PORT
 	nvi = 1			# 1 port
-	niqflint = 4		# 2 "Queue Sets" + NXIQ
-	nethctrl = 2		# 2 "Queue Sets"
-	neq = 4			# 2 "Queue Sets" * 2
+	niqflint = 6		# 2 "Queue Sets" + NXIQ
+	nethctrl = 4		# 2 "Queue Sets"
+	neq = 8			# 2 "Queue Sets" * 2
 	nexactf = 4
 	cmask = all		# access to all channels
 	pmask = 0x4		# access to only one port ...
 
+
 [function "3/*"]		# NVF
 	wx_caps = 0x82		# DMAQ | VF
 	r_caps = 0x86		# DMAQ | VF | PORT
 	nvi = 1			# 1 port
-	niqflint = 4		# 2 "Queue Sets" + NXIQ
-	nethctrl = 2		# 2 "Queue Sets"
-	neq = 4			# 2 "Queue Sets" * 2
+	niqflint = 6		# 2 "Queue Sets" + NXIQ
+	nethctrl = 4		# 2 "Queue Sets"
+	neq = 8			# 2 "Queue Sets" * 2
 	nexactf = 4
 	cmask = all		# access to all channels
 	pmask = 0x8		# access to only one port ...
 
+
 # MPS features a 196608 bytes ingress buffer that is used for ingress buffering
 # for packets from the wire as well as the loopback path of the L2 switch. The
 # folling params control how the buffer memory is distributed and the L2 flow
@@ -527,7 +559,11 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
+
 [port "1"]
 	dcb = ppp, dcbx
 	bg_mem = 25
@@ -535,7 +571,11 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
+
 [port "2"]
 	dcb = ppp, dcbx
 	bg_mem = 25
@@ -543,7 +583,11 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
+
 [port "3"]
 	dcb = ppp, dcbx
 	bg_mem = 25
@@ -551,10 +595,14 @@
 	hwm = 30
 	lwm = 15
 	dwm = 30
+	dcb_app_tlv[0] = 0x8906, ethertype, 3
+	dcb_app_tlv[1] = 0x8914, ethertype, 3
+	dcb_app_tlv[2] = 3260, socketnum, 5
 
+
 [fini]
-	version = 0x1425000f
-	checksum = 0x23a2d850
+	version = 0x01000028
+	checksum = 0x36228c7d
 
 # Total resources used by above allocations:
 #   Virtual Interfaces: 104
@@ -564,5 +612,5 @@
 #   MSI-X Vectors: 736
 #   Virtual Functions: 64
 #
-# $FreeBSD: release/9.2.0/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt 252814 2013-07-05 18:27:38Z np $
+# $FreeBSD: stable/10/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt 319270 2017-05-31 00:16:43Z np $
 #


Property changes on: trunk/sys/dev/cxgbe/firmware/t5fw_cfg_uwire.txt
___________________________________________________________________
Added: mnbsd:nokeywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Deleted: svn:keywords
## -1 +0,0 ##
-MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/if_cc.c
===================================================================
--- trunk/sys/dev/cxgbe/if_cc.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/if_cc.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/if_cc.c 309560 2016-12-05 20:43:25Z jhb $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+static int
+mod_event(module_t mod, int cmd, void *arg)
+{
+
+	return (0);
+}
+static moduledata_t if_cc_mod = {"if_cc", mod_event};
+DECLARE_MODULE(if_cc, if_cc_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_VERSION(if_cc, 1);
+MODULE_DEPEND(if_cc, cc, 1, 1, 1);


Property changes on: trunk/sys/dev/cxgbe/if_cc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/if_ccv.c
===================================================================
--- trunk/sys/dev/cxgbe/if_ccv.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/if_ccv.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/if_ccv.c 309560 2016-12-05 20:43:25Z jhb $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+static int
+mod_event(module_t mod, int cmd, void *arg)
+{
+
+	return (0);
+}
+static moduledata_t if_ccv_mod = {"if_ccv", mod_event};
+DECLARE_MODULE(if_ccv, if_ccv_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_VERSION(if_ccv, 1);
+MODULE_DEPEND(if_ccv, ccv, 1, 1, 1);


Property changes on: trunk/sys/dev/cxgbe/if_ccv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/if_cxl.c
===================================================================
--- trunk/sys/dev/cxgbe/if_cxl.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/if_cxl.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/if_cxl.c 281263 2015-04-08 04:40:04Z np $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+static int
+mod_event(module_t mod, int cmd, void *arg)
+{
+
+	return (0);
+}
+static moduledata_t if_cxl_mod = {"if_cxl", mod_event};
+DECLARE_MODULE(if_cxl, if_cxl_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_VERSION(if_cxl, 1);
+MODULE_DEPEND(if_cxl, cxl, 1, 1, 1);


Property changes on: trunk/sys/dev/cxgbe/if_cxl.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/if_cxlv.c
===================================================================
--- trunk/sys/dev/cxgbe/if_cxlv.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/if_cxlv.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,45 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/if_cxlv.c 309447 2016-12-02 22:53:33Z jhb $");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+static int
+mod_event(module_t mod, int cmd, void *arg)
+{
+
+	return (0);
+}
+static moduledata_t if_cxlv_mod = {"if_cxlv", mod_event};
+DECLARE_MODULE(if_cxlv, if_cxlv_mod, SI_SUB_EXEC, SI_ORDER_ANY);
+MODULE_VERSION(if_cxlv, 1);
+MODULE_DEPEND(if_cxlv, cxlv, 1, 1, 1);


Property changes on: trunk/sys/dev/cxgbe/if_cxlv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/cm.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/cm.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/cm.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,2560 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer in the documentation and/or other materials
+ *	  provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/cm.c 319272 2017-05-31 00:43:52Z np $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockio.h>
+#include <sys/taskqueue.h>
+#include <netinet/in.h>
+#include <net/route.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp.h>
+#include <netinet/tcpip.h>
+
+#include <netinet/toecore.h>
+
+struct sge_iq;
+struct rss_header;
+#include <linux/types.h>
+#include "offload.h"
+#include "tom/t4_tom.h"
+
+#define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
+
+#include "iw_cxgbe.h"
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/if_vlan.h>
+#include <net/netevent.h>
+
+static spinlock_t req_lock;
+static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
+static struct work_struct c4iw_task;
+static struct workqueue_struct *c4iw_taskq;
+static LIST_HEAD(err_cqe_list);
+static spinlock_t err_cqe_lock;
+
+static void process_req(struct work_struct *ctx);
+static void start_ep_timer(struct c4iw_ep *ep);
+static int stop_ep_timer(struct c4iw_ep *ep);
+static int set_tcpinfo(struct c4iw_ep *ep);
+static void process_timeout(struct c4iw_ep *ep);
+static void process_err_cqes(void);
+static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
+static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
+static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
+static void *alloc_ep(int size, gfp_t flags);
+static struct rtentry * find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
+		__be16 peer_port, u8 tos);
+static void close_socket(struct socket *so);
+static int send_mpa_req(struct c4iw_ep *ep);
+static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
+static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
+static void close_complete_upcall(struct c4iw_ep *ep, int status);
+static int send_abort(struct c4iw_ep *ep);
+static void peer_close_upcall(struct c4iw_ep *ep);
+static void peer_abort_upcall(struct c4iw_ep *ep);
+static void connect_reply_upcall(struct c4iw_ep *ep, int status);
+static int connect_request_upcall(struct c4iw_ep *ep);
+static void established_upcall(struct c4iw_ep *ep);
+static int process_mpa_reply(struct c4iw_ep *ep);
+static int process_mpa_request(struct c4iw_ep *ep);
+static void process_peer_close(struct c4iw_ep *ep);
+static void process_conn_error(struct c4iw_ep *ep);
+static void process_close_complete(struct c4iw_ep *ep);
+static void ep_timeout(unsigned long arg);
+static void init_iwarp_socket(struct socket *so, void *arg);
+static void uninit_iwarp_socket(struct socket *so);
+static void process_data(struct c4iw_ep *ep);
+static void process_connected(struct c4iw_ep *ep);
+static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
+static void process_socket_event(struct c4iw_ep *ep);
+static void release_ep_resources(struct c4iw_ep *ep);
+static int process_terminate(struct c4iw_ep *ep);
+static int terminate(struct sge_iq *iq, const struct rss_header *rss,
+    struct mbuf *m);
+static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
+#define START_EP_TIMER(ep) \
+    do { \
+	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
+		__func__, __LINE__, (ep)); \
+	    start_ep_timer(ep); \
+    } while (0)
+
+#define STOP_EP_TIMER(ep) \
+    ({ \
+	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
+		__func__, __LINE__, (ep)); \
+	    stop_ep_timer(ep); \
+    })
+
+#ifdef KTR
+static char *states[] = {
+	"idle",
+	"listen",
+	"connecting",
+	"mpa_wait_req",
+	"mpa_req_sent",
+	"mpa_req_rcvd",
+	"mpa_rep_sent",
+	"fpdu_mode",
+	"aborting",
+	"closing",
+	"moribund",
+	"dead",
+	NULL,
+};
+#endif
+
+
+static void deref_cm_id(struct c4iw_ep_common *epc)
+{
+      epc->cm_id->rem_ref(epc->cm_id);
+      epc->cm_id = NULL;
+      set_bit(CM_ID_DEREFED, &epc->history);
+}
+
+static void ref_cm_id(struct c4iw_ep_common *epc)
+{
+      set_bit(CM_ID_REFED, &epc->history);
+      epc->cm_id->add_ref(epc->cm_id);
+}
+
+static void deref_qp(struct c4iw_ep *ep)
+{
+	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
+	clear_bit(QP_REFERENCED, &ep->com.flags);
+	set_bit(QP_DEREFED, &ep->com.history);
+}
+
+static void ref_qp(struct c4iw_ep *ep)
+{
+	set_bit(QP_REFERENCED, &ep->com.flags);
+	set_bit(QP_REFED, &ep->com.history);
+	c4iw_qp_add_ref(&ep->com.qp->ibqp);
+}
+
+static void process_timeout(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+	int abort = 1;
+
+	mutex_lock(&ep->com.mutex);
+	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
+			ep, ep->hwtid, ep->com.state);
+	set_bit(TIMEDOUT, &ep->com.history);
+	switch (ep->com.state) {
+	case MPA_REQ_SENT:
+		connect_reply_upcall(ep, -ETIMEDOUT);
+		break;
+	case MPA_REQ_WAIT:
+	case MPA_REQ_RCVD:
+	case MPA_REP_SENT:
+	case FPDU_MODE:
+		break;
+	case CLOSING:
+	case MORIBUND:
+		if (ep->com.cm_id && ep->com.qp) {
+			attrs.next_state = C4IW_QP_STATE_ERROR;
+			c4iw_modify_qp(ep->com.dev, ep->com.qp,
+					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+		}
+		close_complete_upcall(ep, -ETIMEDOUT);
+		break;
+	case ABORTING:
+	case DEAD:
+		/*
+		 * These states are expected if the ep timed out at the same
+		 * time as another thread was calling stop_ep_timer().
+		 * So we silently do nothing for these states.
+		 */
+		abort = 0;
+		break;
+	default:
+		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
+				, __func__, ep, ep->hwtid, ep->com.state);
+		abort = 0;
+	}
+	mutex_unlock(&ep->com.mutex);
+	if (abort)
+		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+	c4iw_put_ep(&ep->com);
+	return;
+}
+
+struct cqe_list_entry {
+	struct list_head entry;
+	struct c4iw_dev *rhp;
+	struct t4_cqe err_cqe;
+};
+
+static void
+process_err_cqes(void)
+{
+	unsigned long flag;
+	struct cqe_list_entry *cle;
+
+	spin_lock_irqsave(&err_cqe_lock, flag);
+	while (!list_empty(&err_cqe_list)) {
+		struct list_head *tmp;
+		tmp = err_cqe_list.next;
+		list_del(tmp);
+		tmp->next = tmp->prev = NULL;
+		spin_unlock_irqrestore(&err_cqe_lock, flag);
+		cle = list_entry(tmp, struct cqe_list_entry, entry);
+		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
+		free(cle, M_CXGBE);
+		spin_lock_irqsave(&err_cqe_lock, flag);
+	}
+	spin_unlock_irqrestore(&err_cqe_lock, flag);
+
+	return;
+}
+
+static void
+process_req(struct work_struct *ctx)
+{
+	struct c4iw_ep_common *epc;
+	unsigned long flag;
+	int ep_events;
+
+	process_err_cqes();
+	spin_lock_irqsave(&req_lock, flag);
+	while (!TAILQ_EMPTY(&req_list)) {
+		epc = TAILQ_FIRST(&req_list);
+		TAILQ_REMOVE(&req_list, epc, entry);
+		epc->entry.tqe_prev = NULL;
+		ep_events = epc->ep_events;
+		epc->ep_events = 0;
+		spin_unlock_irqrestore(&req_lock, flag);
+		CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, events 0x%x", __func__,
+		    epc->so, epc, ep_events);
+		if (ep_events & C4IW_EVENT_TERM)
+			process_terminate((struct c4iw_ep *)epc);
+		if (ep_events & C4IW_EVENT_TIMEOUT)
+			process_timeout((struct c4iw_ep *)epc);
+		if (ep_events & C4IW_EVENT_SOCKET)
+			process_socket_event((struct c4iw_ep *)epc);
+		c4iw_put_ep(epc);
+		process_err_cqes();
+		spin_lock_irqsave(&req_lock, flag);
+	}
+	spin_unlock_irqrestore(&req_lock, flag);
+}
+
+/*
+ * XXX: doesn't belong here in the iWARP driver.
+ * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
+ *      set.  Is this a valid assumption for active open?
+ */
+static int
+set_tcpinfo(struct c4iw_ep *ep)
+{
+	struct socket *so = ep->com.so;
+	struct inpcb *inp = sotoinpcb(so);
+	struct tcpcb *tp;
+	struct toepcb *toep;
+	int rc = 0;
+
+	INP_WLOCK(inp);
+	tp = intotcpcb(inp);
+	if ((tp->t_flags & TF_TOE) == 0) {
+		rc = EINVAL;
+		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
+		    __func__, so, ep);
+		goto done;
+	}
+	toep = TOEPCB(so);
+
+	ep->hwtid = toep->tid;
+	ep->snd_seq = tp->snd_nxt;
+	ep->rcv_seq = tp->rcv_nxt;
+	ep->emss = max(tp->t_maxseg, 128);
+done:
+	INP_WUNLOCK(inp);
+	return (rc);
+
+}
+
+static struct rtentry *
+find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
+		__be16 peer_port, u8 tos)
+{
+	struct route iproute;
+	struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
+
+	CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip,
+	    peer_ip, ntohs(local_port), ntohs(peer_port));
+	bzero(&iproute, sizeof iproute);
+	dst->sin_family = AF_INET;
+	dst->sin_len = sizeof *dst;
+	dst->sin_addr.s_addr = peer_ip;
+
+	rtalloc(&iproute);
+	CTR2(KTR_IW_CXGBE, "%s:frtE %p", __func__, (uint64_t)iproute.ro_rt);
+	return iproute.ro_rt;
+}
+
+static void
+close_socket(struct socket *so)
+{
+
+	uninit_iwarp_socket(so);
+	sodisconnect(so);
+}
+
+static void
+process_peer_close(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+	int disconnect = 1;
+	int release = 0;
+
+	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
+	    ep->com.so, states[ep->com.state]);
+
+	mutex_lock(&ep->com.mutex);
+	switch (ep->com.state) {
+
+		case MPA_REQ_WAIT:
+			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING",
+			    __func__, ep);
+			__state_set(&ep->com, CLOSING);
+			break;
+
+		case MPA_REQ_SENT:
+			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING",
+			    __func__, ep);
+			__state_set(&ep->com, DEAD);
+			connect_reply_upcall(ep, -ECONNABORTED);
+
+			disconnect = 0;
+			STOP_EP_TIMER(ep);
+			close_socket(ep->com.so);
+			deref_cm_id(&ep->com);
+			release = 1;
+			break;
+
+		case MPA_REQ_RCVD:
+
+			/*
+			 * We're gonna mark this puppy DEAD, but keep
+			 * the reference on it until the ULP accepts or
+			 * rejects the CR.
+			 */
+			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
+			    __func__, ep);
+			__state_set(&ep->com, CLOSING);
+			c4iw_get_ep(&ep->com);
+			break;
+
+		case MPA_REP_SENT:
+			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
+			    __func__, ep);
+			__state_set(&ep->com, CLOSING);
+			break;
+
+		case FPDU_MODE:
+			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
+			    __func__, ep);
+			START_EP_TIMER(ep);
+			__state_set(&ep->com, CLOSING);
+			attrs.next_state = C4IW_QP_STATE_CLOSING;
+			c4iw_modify_qp(ep->com.dev, ep->com.qp,
+					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+			peer_close_upcall(ep);
+			break;
+
+		case ABORTING:
+			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
+			    __func__, ep);
+			disconnect = 0;
+			break;
+
+		case CLOSING:
+			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
+			    __func__, ep);
+			__state_set(&ep->com, MORIBUND);
+			disconnect = 0;
+			break;
+
+		case MORIBUND:
+			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
+			    ep);
+			STOP_EP_TIMER(ep);
+			if (ep->com.cm_id && ep->com.qp) {
+				attrs.next_state = C4IW_QP_STATE_IDLE;
+				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+			}
+			close_socket(ep->com.so);
+			close_complete_upcall(ep, 0);
+			__state_set(&ep->com, DEAD);
+			release = 1;
+			disconnect = 0;
+			break;
+
+		case DEAD:
+			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
+			    __func__, ep);
+			disconnect = 0;
+			break;
+
+		default:
+			panic("%s: ep %p state %d", __func__, ep,
+			    ep->com.state);
+			break;
+	}
+
+	mutex_unlock(&ep->com.mutex);
+
+	if (disconnect) {
+
+		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
+		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
+	}
+	if (release) {
+
+		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
+		c4iw_put_ep(&ep->com);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
+	return;
+}
+
+static void
+process_conn_error(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+	int ret;
+	int state;
+
+	mutex_lock(&ep->com.mutex);
+	state = ep->com.state;
+	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
+	    __func__, ep, ep->com.so, ep->com.so->so_error,
+	    states[ep->com.state]);
+
+	switch (state) {
+
+		case MPA_REQ_WAIT:
+			STOP_EP_TIMER(ep);
+			break;
+
+		case MPA_REQ_SENT:
+			STOP_EP_TIMER(ep);
+			connect_reply_upcall(ep, -ECONNRESET);
+			break;
+
+		case MPA_REP_SENT:
+			ep->com.rpl_err = ECONNRESET;
+			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
+			break;
+
+		case MPA_REQ_RCVD:
+
+			/*
+			 * We're gonna mark this puppy DEAD, but keep
+			 * the reference on it until the ULP accepts or
+			 * rejects the CR.
+			 */
+			c4iw_get_ep(&ep->com);
+			break;
+
+		case MORIBUND:
+		case CLOSING:
+			STOP_EP_TIMER(ep);
+			/*FALLTHROUGH*/
+		case FPDU_MODE:
+
+			if (ep->com.cm_id && ep->com.qp) {
+
+				attrs.next_state = C4IW_QP_STATE_ERROR;
+				ret = c4iw_modify_qp(ep->com.qp->rhp,
+					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
+					&attrs, 1);
+				if (ret)
+					log(LOG_ERR,
+							"%s - qp <- error failed!\n",
+							__func__);
+			}
+			peer_abort_upcall(ep);
+			break;
+
+		case ABORTING:
+			break;
+
+		case DEAD:
+			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
+			    __func__, ep->com.so->so_error);
+			mutex_unlock(&ep->com.mutex);
+			return;
+
+		default:
+			panic("%s: ep %p state %d", __func__, ep, state);
+			break;
+	}
+
+	if (state != ABORTING) {
+		close_socket(ep->com.so);
+		__state_set(&ep->com, DEAD);
+		c4iw_put_ep(&ep->com);
+	}
+	mutex_unlock(&ep->com.mutex);
+	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
+	return;
+}
+
+static void
+process_close_complete(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+	int release = 0;
+
+	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
+	    ep->com.so, states[ep->com.state]);
+
+	/* The cm_id may be null if we failed to connect */
+	mutex_lock(&ep->com.mutex);
+	set_bit(CLOSE_CON_RPL, &ep->com.history);
+
+	switch (ep->com.state) {
+
+		case CLOSING:
+			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
+			    __func__, ep);
+			__state_set(&ep->com, MORIBUND);
+			break;
+
+		case MORIBUND:
+			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
+			    ep);
+			STOP_EP_TIMER(ep);
+
+			if ((ep->com.cm_id) && (ep->com.qp)) {
+
+				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
+				    __func__, ep);
+				attrs.next_state = C4IW_QP_STATE_IDLE;
+				c4iw_modify_qp(ep->com.dev,
+						ep->com.qp,
+						C4IW_QP_ATTR_NEXT_STATE,
+						&attrs, 1);
+			}
+
+			close_socket(ep->com.so);
+			close_complete_upcall(ep, 0);
+			__state_set(&ep->com, DEAD);
+			release = 1;
+			break;
+
+		case ABORTING:
+			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
+			break;
+
+		case DEAD:
+			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
+			break;
+		default:
+			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
+					__func__, ep);
+			panic("%s:pcc6 %p unknown ep state", __func__, ep);
+			break;
+	}
+	mutex_unlock(&ep->com.mutex);
+
+	if (release) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
+		c4iw_put_ep(&ep->com);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
+	return;
+}
+
+static void
+init_iwarp_socket(struct socket *so, void *arg)
+{
+	int rc;
+	struct sockopt sopt;
+	int on = 1;
+
+	/* Note that SOCK_LOCK(so) is same as SOCKBUF_LOCK(&so->so_rcv) */
+	SOCK_LOCK(so);
+	soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
+	so->so_state |= SS_NBIO;
+	SOCK_UNLOCK(so);
+	sopt.sopt_dir = SOPT_SET;
+	sopt.sopt_level = IPPROTO_TCP;
+	sopt.sopt_name = TCP_NODELAY;
+	sopt.sopt_val = (caddr_t)&on;
+	sopt.sopt_valsize = sizeof on;
+	sopt.sopt_td = NULL;
+	rc = sosetopt(so, &sopt);
+	if (rc) {
+		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
+		    __func__, so, rc);
+	}
+}
+
+static void
+uninit_iwarp_socket(struct socket *so)
+{
+
+	SOCKBUF_LOCK(&so->so_rcv);
+	soupcall_clear(so, SO_RCV);
+	SOCKBUF_UNLOCK(&so->so_rcv);
+}
+
+static void
+process_data(struct c4iw_ep *ep)
+{
+	struct sockaddr_in *local, *remote;
+	int disconnect = 0;
+
+	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__,
+	    ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc);
+
+	switch (state_read(&ep->com)) {
+	case MPA_REQ_SENT:
+		disconnect = process_mpa_reply(ep);
+		break;
+	case MPA_REQ_WAIT:
+		in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
+		in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
+		ep->com.local_addr = *local;
+		ep->com.remote_addr = *remote;
+		free(local, M_SONAME);
+		free(remote, M_SONAME);
+		disconnect = process_mpa_request(ep);
+		break;
+	default:
+		if (ep->com.so->so_rcv.sb_cc)
+			log(LOG_ERR, "%s: Unexpected streaming data.  "
+			    "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n",
+			    __func__, ep, state_read(&ep->com), ep->com.so,
+			    ep->com.so->so_state, ep->com.so->so_rcv.sb_cc);
+		break;
+	}
+	if (disconnect)
+		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
+
+}
+
+static void
+process_connected(struct c4iw_ep *ep)
+{
+	struct socket *so = ep->com.so;
+
+	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
+		if (send_mpa_req(ep))
+			goto err;
+	} else {
+		connect_reply_upcall(ep, -so->so_error);
+		goto err;
+	}
+	return;
+err:
+	close_socket(so);
+	state_set(&ep->com, DEAD);
+	c4iw_put_ep(&ep->com);
+	return;
+}
+
+void
+process_newconn(struct iw_cm_id *parent_cm_id, struct socket *child_so)
+{
+	struct c4iw_ep *child_ep;
+	struct sockaddr_in *local;
+	struct sockaddr_in *remote;
+	struct c4iw_ep *parent_ep = parent_cm_id->provider_data;
+	int ret = 0;
+
+	MPASS(child_so != NULL);
+
+	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+
+	CTR5(KTR_IW_CXGBE,
+	    "%s: parent so %p, parent ep %p, child so %p, child ep %p",
+	     __func__, parent_ep->com.so, parent_ep, child_so, child_ep);
+
+	in_getsockaddr(child_so, (struct sockaddr **)&local);
+	in_getpeeraddr(child_so, (struct sockaddr **)&remote);
+
+	child_ep->com.local_addr = *local;
+	child_ep->com.remote_addr = *remote;
+	child_ep->com.dev = parent_ep->com.dev;
+	child_ep->com.so = child_so;
+	child_ep->com.cm_id = NULL;
+	child_ep->com.thread = parent_ep->com.thread;
+	child_ep->parent_ep = parent_ep;
+
+	free(local, M_SONAME);
+	free(remote, M_SONAME);
+
+	init_iwarp_socket(child_so, &child_ep->com);
+	c4iw_get_ep(&parent_ep->com);
+	init_timer(&child_ep->timer);
+	state_set(&child_ep->com, MPA_REQ_WAIT);
+	START_EP_TIMER(child_ep);
+
+	/* maybe the request has already been queued up on the socket... */
+	ret = process_mpa_request(child_ep);
+	if (ret == 2)
+		/* ABORT */
+		c4iw_ep_disconnect(child_ep, 1, GFP_KERNEL);
+	else if (ret == 1)
+		/* CLOSE */
+		c4iw_ep_disconnect(child_ep, 0, GFP_KERNEL);
+
+	return;
+}
+
+static int
+add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&req_lock, flag);
+	if (ep && ep->com.so) {
+		ep->com.ep_events |= new_ep_event;
+		if (!ep->com.entry.tqe_prev) {
+			c4iw_get_ep(&ep->com);
+			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
+			queue_work(c4iw_taskq, &c4iw_task);
+		}
+	}
+	spin_unlock_irqrestore(&req_lock, flag);
+
+	return (0);
+}
+
+static int
+c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
+{
+	struct c4iw_ep *ep = arg;
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
+	    __func__, so, so->so_state, ep, states[ep->com.state],
+	    ep->com.entry.tqe_prev);
+
+	MPASS(ep->com.so == so);
+	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
+
+	return (SU_OK);
+}
+
+
+static int
+terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+	struct adapter *sc = iq->adapter;
+	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
+	unsigned int tid = GET_TID(cpl);
+	struct toepcb *toep = lookup_tid(sc, tid);
+	struct socket *so;
+	struct c4iw_ep *ep;
+
+	INP_WLOCK(toep->inp);
+	so = inp_inpcbtosocket(toep->inp);
+	ep = so->so_rcv.sb_upcallarg;
+	INP_WUNLOCK(toep->inp);
+
+	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
+	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
+
+	return 0;
+}
+
+static void
+process_socket_event(struct c4iw_ep *ep)
+{
+	int state = state_read(&ep->com);
+	struct socket *so = ep->com.so;
+
+	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
+	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
+	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
+
+	if (state == CONNECTING) {
+		process_connected(ep);
+		return;
+	}
+
+	if (state == LISTEN) {
+		/* socket listening events are handled at IWCM */
+		CTR3(KTR_IW_CXGBE, "%s Invalid ep state:%u, ep:%p", __func__,
+			    ep->com.state, ep);
+		BUG();
+		return;
+	}
+
+	/* connection error */
+	if (so->so_error) {
+		process_conn_error(ep);
+		return;
+	}
+
+	/* peer close */
+	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
+		process_peer_close(ep);
+		/*
+		 * check whether socket disconnect event is pending before
+		 * returning. Fallthrough if yes.
+		 */
+		if (!(so->so_state & SS_ISDISCONNECTED))
+			return;
+	}
+
+	/* close complete */
+	if (so->so_state & SS_ISDISCONNECTED) {
+		process_close_complete(ep);
+		return;
+	}
+
+	/* rx data */
+	process_data(ep);
+}
+
+SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
+
+static int dack_mode = 0;
+TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0,
+		"Delayed ack mode (default = 0)");
+
+int c4iw_max_read_depth = 8;
+TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0,
+		"Per-connection max ORD/IRD (default = 8)");
+
+static int enable_tcp_timestamps;
+TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0,
+		"Enable tcp timestamps (default = 0)");
+
+static int enable_tcp_sack;
+TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0,
+		"Enable tcp SACK (default = 0)");
+
+static int enable_tcp_window_scaling = 1;
+TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0,
+		"Enable tcp window scaling (default = 1)");
+
+int c4iw_debug = 1;
+TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0,
+		"Enable debug logging (default = 0)");
+
+static int peer2peer = 1;
+TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0,
+		"Support peer2peer ULPs (default = 1)");
+
+static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
+TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0,
+		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
+
+static int ep_timeout_secs = 60;
+TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0,
+		"CM Endpoint operation timeout in seconds (default = 60)");
+
+static int mpa_rev = 1;
+TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0,
+		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
+
+static int markers_enabled;
+TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0,
+		"Enable MPA MARKERS (default(0) = disabled)");
+
+static int crc_enabled = 1;
+TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0,
+		"Enable MPA CRC (default(1) = enabled)");
+
+static int rcv_win = 256 * 1024;
+TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0,
+		"TCP receive window in bytes (default = 256KB)");
+
+static int snd_win = 128 * 1024;
+TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win);
+SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0,
+		"TCP send window in bytes (default = 128KB)");
+
+static void
+start_ep_timer(struct c4iw_ep *ep)
+{
+
+	if (timer_pending(&ep->timer)) {
+		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
+		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
+		    ep);
+		return;
+	}
+	clear_bit(TIMEOUT, &ep->com.flags);
+	c4iw_get_ep(&ep->com);
+	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
+	ep->timer.data = (unsigned long)ep;
+	ep->timer.function = ep_timeout;
+	add_timer(&ep->timer);
+}
+
+static int
+stop_ep_timer(struct c4iw_ep *ep)
+{
+
+	del_timer_sync(&ep->timer);
+	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
+		c4iw_put_ep(&ep->com);
+		return 0;
+	}
+	return 1;
+}
+
+static enum
+c4iw_ep_state state_read(struct c4iw_ep_common *epc)
+{
+	enum c4iw_ep_state state;
+
+	mutex_lock(&epc->mutex);
+	state = epc->state;
+	mutex_unlock(&epc->mutex);
+
+	return (state);
+}
+
+static void
+__state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+
+	epc->state = new;
+}
+
+static void
+state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
+{
+
+	mutex_lock(&epc->mutex);
+	__state_set(epc, new);
+	mutex_unlock(&epc->mutex);
+}
+
+static void *
+alloc_ep(int size, gfp_t gfp)
+{
+	struct c4iw_ep_common *epc;
+
+	epc = kzalloc(size, gfp);
+	if (epc == NULL)
+		return (NULL);
+
+	kref_init(&epc->kref);
+	mutex_init(&epc->mutex);
+	c4iw_init_wr_wait(&epc->wr_wait);
+
+	return (epc);
+}
+
+void _c4iw_free_ep(struct kref *kref)
+{
+	struct c4iw_ep *ep;
+	struct c4iw_ep_common *epc;
+
+	ep = container_of(kref, struct c4iw_ep, com.kref);
+	epc = &ep->com;
+	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
+	    __func__, epc));
+	if (test_bit(QP_REFERENCED, &ep->com.flags))
+		deref_qp(ep);
+	kfree(ep);
+}
+
+static void release_ep_resources(struct c4iw_ep *ep)
+{
+	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
+	set_bit(RELEASE_RESOURCES, &ep->com.flags);
+	c4iw_put_ep(&ep->com);
+	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
+}
+
+static int
+send_mpa_req(struct c4iw_ep *ep)
+{
+	int mpalen;
+	struct mpa_message *mpa;
+	struct mpa_v2_conn_params mpa_v2_params;
+	struct mbuf *m;
+	char mpa_rev_to_use = mpa_rev;
+	int err = 0;
+
+	if (ep->retry_with_mpa_v1)
+		mpa_rev_to_use = 1;
+	mpalen = sizeof(*mpa) + ep->plen;
+	if (mpa_rev_to_use == 2)
+		mpalen += sizeof(struct mpa_v2_conn_params);
+
+	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
+	if (mpa == NULL) {
+		err = -ENOMEM;
+		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
+				__func__, ep, err);
+		goto err;
+	}
+
+	memset(mpa, 0, mpalen);
+	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
+	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
+		(markers_enabled ? MPA_MARKERS : 0) |
+		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
+	mpa->private_data_size = htons(ep->plen);
+	mpa->revision = mpa_rev_to_use;
+
+	if (mpa_rev_to_use == 1) {
+		ep->tried_with_mpa_v1 = 1;
+		ep->retry_with_mpa_v1 = 0;
+	}
+
+	if (mpa_rev_to_use == 2) {
+		mpa->private_data_size +=
+			htons(sizeof(struct mpa_v2_conn_params));
+		mpa_v2_params.ird = htons((u16)ep->ird);
+		mpa_v2_params.ord = htons((u16)ep->ord);
+
+		if (peer2peer) {
+			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
+
+			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
+				mpa_v2_params.ord |=
+				    htons(MPA_V2_RDMA_WRITE_RTR);
+			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
+				mpa_v2_params.ord |=
+					htons(MPA_V2_RDMA_READ_RTR);
+			}
+		}
+		memcpy(mpa->private_data, &mpa_v2_params,
+			sizeof(struct mpa_v2_conn_params));
+
+		if (ep->plen) {
+
+			memcpy(mpa->private_data +
+				sizeof(struct mpa_v2_conn_params),
+				ep->mpa_pkt + sizeof(*mpa), ep->plen);
+		}
+	} else {
+
+		if (ep->plen)
+			memcpy(mpa->private_data,
+					ep->mpa_pkt + sizeof(*mpa), ep->plen);
+		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
+	}
+
+	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
+	if (m == NULL) {
+		err = -ENOMEM;
+		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
+				__func__, ep, err);
+		free(mpa, M_CXGBE);
+		goto err;
+	}
+	m_copyback(m, 0, mpalen, (void *)mpa);
+	free(mpa, M_CXGBE);
+
+	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
+			ep->com.thread);
+	if (err) {
+		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
+				__func__, ep, err);
+		goto err;
+	}
+
+	START_EP_TIMER(ep);
+	state_set(&ep->com, MPA_REQ_SENT);
+	ep->mpa_attr.initiator = 1;
+	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
+	return 0;
+err:
+	connect_reply_upcall(ep, err);
+	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
+	return err;
+}
+
+static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+	int mpalen ;
+	struct mpa_message *mpa;
+	struct mpa_v2_conn_params mpa_v2_params;
+	struct mbuf *m;
+	int err;
+
+	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
+	    ep->plen);
+
+	mpalen = sizeof(*mpa) + plen;
+
+	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+
+		mpalen += sizeof(struct mpa_v2_conn_params);
+		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
+		    ep->mpa_attr.version, mpalen);
+	}
+
+	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
+	if (mpa == NULL)
+		return (-ENOMEM);
+
+	memset(mpa, 0, mpalen);
+	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+	mpa->flags = MPA_REJECT;
+	mpa->revision = mpa_rev;
+	mpa->private_data_size = htons(plen);
+
+	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+
+		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+		mpa->private_data_size +=
+			htons(sizeof(struct mpa_v2_conn_params));
+		mpa_v2_params.ird = htons(((u16)ep->ird) |
+				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
+				 0));
+		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
+					(p2p_type ==
+					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
+					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
+					 FW_RI_INIT_P2PTYPE_READ_REQ ?
+					 MPA_V2_RDMA_READ_RTR : 0) : 0));
+		memcpy(mpa->private_data, &mpa_v2_params,
+				sizeof(struct mpa_v2_conn_params));
+
+		if (ep->plen)
+			memcpy(mpa->private_data +
+					sizeof(struct mpa_v2_conn_params), pdata, plen);
+		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
+		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
+	} else
+		if (plen)
+			memcpy(mpa->private_data, pdata, plen);
+
+	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
+	if (m == NULL) {
+		free(mpa, M_CXGBE);
+		return (-ENOMEM);
+	}
+	m_copyback(m, 0, mpalen, (void *)mpa);
+	free(mpa, M_CXGBE);
+
+	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
+	if (!err)
+		ep->snd_seq += mpalen;
+	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
+	return err;
+}
+
+static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
+{
+	int mpalen;
+	struct mpa_message *mpa;
+	struct mbuf *m;
+	struct mpa_v2_conn_params mpa_v2_params;
+	int err;
+
+	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
+
+	mpalen = sizeof(*mpa) + plen;
+
+	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+
+		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
+		    ep->mpa_attr.version);
+		mpalen += sizeof(struct mpa_v2_conn_params);
+	}
+
+	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
+	if (mpa == NULL)
+		return (-ENOMEM);
+
+	memset(mpa, 0, sizeof(*mpa));
+	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
+	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
+		(markers_enabled ? MPA_MARKERS : 0);
+	mpa->revision = ep->mpa_attr.version;
+	mpa->private_data_size = htons(plen);
+
+	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+
+		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
+		mpa->private_data_size +=
+			htons(sizeof(struct mpa_v2_conn_params));
+		mpa_v2_params.ird = htons((u16)ep->ird);
+		mpa_v2_params.ord = htons((u16)ep->ord);
+		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
+		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
+
+		if (peer2peer && (ep->mpa_attr.p2p_type !=
+			FW_RI_INIT_P2PTYPE_DISABLED)) {
+
+			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
+
+			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
+
+				mpa_v2_params.ord |=
+					htons(MPA_V2_RDMA_WRITE_RTR);
+				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
+				    __func__, ep, p2p_type, mpa_v2_params.ird,
+				    mpa_v2_params.ord);
+			}
+			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
+
+				mpa_v2_params.ord |=
+					htons(MPA_V2_RDMA_READ_RTR);
+				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
+				    __func__, ep, p2p_type, mpa_v2_params.ird,
+				    mpa_v2_params.ord);
+			}
+		}
+
+		memcpy(mpa->private_data, &mpa_v2_params,
+			sizeof(struct mpa_v2_conn_params));
+
+		if (ep->plen)
+			memcpy(mpa->private_data +
+				sizeof(struct mpa_v2_conn_params), pdata, plen);
+	} else
+		if (plen)
+			memcpy(mpa->private_data, pdata, plen);
+
+	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
+	if (m == NULL) {
+		free(mpa, M_CXGBE);
+		return (-ENOMEM);
+	}
+	m_copyback(m, 0, mpalen, (void *)mpa);
+	free(mpa, M_CXGBE);
+
+
+	state_set(&ep->com, MPA_REP_SENT);
+	ep->snd_seq += mpalen;
+	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
+			ep->com.thread);
+	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
+	return err;
+}
+
+
+
+static void close_complete_upcall(struct c4iw_ep *ep, int status)
+{
+	struct iw_cm_event event;
+
+	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CLOSE;
+	event.status = status;
+
+	if (ep->com.cm_id) {
+
+		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
+		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+		deref_cm_id(&ep->com);
+		set_bit(CLOSE_UPCALL, &ep->com.history);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
+}
+
+static int
+send_abort(struct c4iw_ep *ep)
+{
+	struct socket *so = ep->com.so;
+	struct sockopt sopt;
+	int rc;
+	struct linger l;
+
+	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
+	    states[ep->com.state], ep->hwtid);
+
+	l.l_onoff = 1;
+	l.l_linger = 0;
+
+	/* linger_time of 0 forces RST to be sent */
+	sopt.sopt_dir = SOPT_SET;
+	sopt.sopt_level = SOL_SOCKET;
+	sopt.sopt_name = SO_LINGER;
+	sopt.sopt_val = (caddr_t)&l;
+	sopt.sopt_valsize = sizeof l;
+	sopt.sopt_td = NULL;
+	rc = sosetopt(so, &sopt);
+	if (rc != 0) {
+		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
+		    __func__, so, rc);
+	}
+
+	uninit_iwarp_socket(so);
+	sodisconnect(so);
+	set_bit(ABORT_CONN, &ep->com.history);
+
+	/*
+	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
+	 * request it has sent. But the current TOE driver is not propagating
+	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
+	 * around de-refer 'ep' (which was refered before sending ABORT request)
+	 * here instead of doing it in abort_rpl() handler of iw_cxgbe driver.
+	 */
+	c4iw_put_ep(&ep->com);
+
+	return (0);
+}
+
+static void peer_close_upcall(struct c4iw_ep *ep)
+{
+	struct iw_cm_event event;
+
+	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_DISCONNECT;
+
+	if (ep->com.cm_id) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
+		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+		set_bit(DISCONN_UPCALL, &ep->com.history);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
+}
+
+static void peer_abort_upcall(struct c4iw_ep *ep)
+{
+	struct iw_cm_event event;
+
+	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CLOSE;
+	event.status = -ECONNRESET;
+
+	if (ep->com.cm_id) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
+		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+		deref_cm_id(&ep->com);
+		set_bit(ABORT_UPCALL, &ep->com.history);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
+}
+
+static void connect_reply_upcall(struct c4iw_ep *ep, int status)
+{
+	struct iw_cm_event event;
+
+	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CONNECT_REPLY;
+	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
+					-ECONNRESET : status;
+	event.local_addr = ep->com.local_addr;
+	event.remote_addr = ep->com.remote_addr;
+
+	if ((status == 0) || (status == -ECONNREFUSED)) {
+
+		if (!ep->tried_with_mpa_v1) {
+
+			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
+			/* this means MPA_v2 is used */
+			event.private_data_len = ep->plen -
+				sizeof(struct mpa_v2_conn_params);
+			event.private_data = ep->mpa_pkt +
+				sizeof(struct mpa_message) +
+				sizeof(struct mpa_v2_conn_params);
+		} else {
+
+			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
+			/* this means MPA_v1 is used */
+			event.private_data_len = ep->plen;
+			event.private_data = ep->mpa_pkt +
+				sizeof(struct mpa_message);
+		}
+	}
+
+	if (ep->com.cm_id) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
+		set_bit(CONN_RPL_UPCALL, &ep->com.history);
+		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+	}
+
+	if(status == -ECONNABORTED) {
+
+		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
+		return;
+	}
+
+	if (status < 0) {
+
+		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
+		deref_cm_id(&ep->com);
+	}
+
+	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
+}
+
+static int connect_request_upcall(struct c4iw_ep *ep)
+{
+	struct iw_cm_event event;
+	int ret;
+
+	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
+	    ep->tried_with_mpa_v1);
+
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_CONNECT_REQUEST;
+	event.local_addr = ep->com.local_addr;
+	event.remote_addr = ep->com.remote_addr;
+	event.provider_data = ep;
+	event.so = ep->com.so;
+
+	if (!ep->tried_with_mpa_v1) {
+		/* this means MPA_v2 is used */
+		event.ord = ep->ord;
+		event.ird = ep->ird;
+		event.private_data_len = ep->plen -
+			sizeof(struct mpa_v2_conn_params);
+		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
+			sizeof(struct mpa_v2_conn_params);
+	} else {
+
+		/* this means MPA_v1 is used. Send max supported */
+		event.ord = c4iw_max_read_depth;
+		event.ird = c4iw_max_read_depth;
+		event.private_data_len = ep->plen;
+		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
+	}
+
+	c4iw_get_ep(&ep->com);
+	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
+	    &event);
+	if(ret)
+		c4iw_put_ep(&ep->com);
+
+	set_bit(CONNREQ_UPCALL, &ep->com.history);
+	c4iw_put_ep(&ep->parent_ep->com);
+	return ret;
+}
+
+static void established_upcall(struct c4iw_ep *ep)
+{
+	struct iw_cm_event event;
+
+	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
+	memset(&event, 0, sizeof(event));
+	event.event = IW_CM_EVENT_ESTABLISHED;
+	event.ird = ep->ird;
+	event.ord = ep->ord;
+
+	if (ep->com.cm_id) {
+
+		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
+		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
+		set_bit(ESTAB_UPCALL, &ep->com.history);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
+}
+
+
+/*
+ * process_mpa_reply - process streaming mode MPA reply
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
+static int process_mpa_reply(struct c4iw_ep *ep)
+{
+	struct mpa_message *mpa;
+	struct mpa_v2_conn_params *mpa_v2_params;
+	u16 plen;
+	u16 resp_ird, resp_ord;
+	u8 rtr_mismatch = 0, insuff_ird = 0;
+	struct c4iw_qp_attributes attrs;
+	enum c4iw_qp_attr_mask mask;
+	int err;
+	struct mbuf *top, *m;
+	int flags = MSG_DONTWAIT;
+	struct uio uio;
+	int disconnect = 0;
+
+	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
+
+	/*
+	 * Stop mpa timer.  If it expired, then
+	 * we ignore the MPA reply.  process_timeout()
+	 * will abort the connection.
+	 */
+	if (STOP_EP_TIMER(ep))
+		return 0;
+
+	uio.uio_resid = 1000000;
+	uio.uio_td = ep->com.thread;
+	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
+
+	if (err) {
+
+		if (err == EWOULDBLOCK) {
+
+			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
+			START_EP_TIMER(ep);
+			return 0;
+		}
+		err = -err;
+		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
+		goto err;
+	}
+
+	if (ep->com.so->so_rcv.sb_mb) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
+		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
+		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
+	}
+
+	m = top;
+
+	do {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
+		/*
+		 * If we get more than the supported amount of private data
+		 * then we must fail this connection.
+		 */
+		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
+
+			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
+			    ep->mpa_pkt_len + m->m_len);
+			err = (-EINVAL);
+			goto err_stop_timer;
+		}
+
+		/*
+		 * copy the new data into our accumulation buffer.
+		 */
+		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
+		ep->mpa_pkt_len += m->m_len;
+		if (!m->m_next)
+			m = m->m_nextpkt;
+		else
+			m = m->m_next;
+	} while (m);
+
+	m_freem(top);
+	/*
+	 * if we don't even have the mpa message, then bail.
+	 */
+	if (ep->mpa_pkt_len < sizeof(*mpa)) {
+		return 0;
+	}
+	mpa = (struct mpa_message *) ep->mpa_pkt;
+
+	/* Validate MPA header. */
+	if (mpa->revision > mpa_rev) {
+
+		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
+		    mpa->revision, mpa_rev);
+		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
+				" Received = %d\n", __func__, mpa_rev, mpa->revision);
+		err = -EPROTO;
+		goto err_stop_timer;
+	}
+
+	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
+		err = -EPROTO;
+		goto err_stop_timer;
+	}
+
+	plen = ntohs(mpa->private_data_size);
+
+	/*
+	 * Fail if there's too much private data.
+	 */
+	if (plen > MPA_MAX_PRIVATE_DATA) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
+		err = -EPROTO;
+		goto err_stop_timer;
+	}
+
+	/*
+	 * If plen does not account for pkt size
+	 */
+	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
+		STOP_EP_TIMER(ep);
+		err = -EPROTO;
+		goto err_stop_timer;
+	}
+
+	ep->plen = (u8) plen;
+
+	/*
+	 * If we don't have all the pdata yet, then bail.
+	 * We'll continue process when more data arrives.
+	 */
+	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
+		return 0;
+	}
+
+	if (mpa->flags & MPA_REJECT) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
+		err = -ECONNREFUSED;
+		goto err_stop_timer;
+	}
+
+	/*
+	 * If we get here we have accumulated the entire mpa
+	 * start reply message including private data. And
+	 * the MPA header is valid.
+	 */
+	state_set(&ep->com, FPDU_MODE);
+	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+	ep->mpa_attr.recv_marker_enabled = markers_enabled;
+	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+	ep->mpa_attr.version = mpa->revision;
+	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+
+	if (mpa->revision == 2) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
+		ep->mpa_attr.enhanced_rdma_conn =
+			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
+
+		if (ep->mpa_attr.enhanced_rdma_conn) {
+
+			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
+			mpa_v2_params = (struct mpa_v2_conn_params *)
+				(ep->mpa_pkt + sizeof(*mpa));
+			resp_ird = ntohs(mpa_v2_params->ird) &
+				MPA_V2_IRD_ORD_MASK;
+			resp_ord = ntohs(mpa_v2_params->ord) &
+				MPA_V2_IRD_ORD_MASK;
+
+			/*
+			 * This is a double-check. Ideally, below checks are
+			 * not required since ird/ord stuff has been taken
+			 * care of in c4iw_accept_cr
+			 */
+			if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
+
+				CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep);
+				err = -ENOMEM;
+				ep->ird = resp_ord;
+				ep->ord = resp_ird;
+				insuff_ird = 1;
+			}
+
+			if (ntohs(mpa_v2_params->ird) &
+				MPA_V2_PEER2PEER_MODEL) {
+
+				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
+				if (ntohs(mpa_v2_params->ord) &
+					MPA_V2_RDMA_WRITE_RTR) {
+
+					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
+					ep->mpa_attr.p2p_type =
+						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
+				}
+				else if (ntohs(mpa_v2_params->ord) &
+					MPA_V2_RDMA_READ_RTR) {
+
+					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
+					ep->mpa_attr.p2p_type =
+						FW_RI_INIT_P2PTYPE_READ_REQ;
+				}
+			}
+		}
+	} else {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
+
+		if (mpa->revision == 1) {
+
+			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
+
+			if (peer2peer) {
+
+				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
+				ep->mpa_attr.p2p_type = p2p_type;
+			}
+		}
+	}
+
+	if (set_tcpinfo(ep)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
+		printf("%s set_tcpinfo error\n", __func__);
+		err = -ECONNRESET;
+		goto err;
+	}
+
+	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
+	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
+	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
+	    ep->mpa_attr.p2p_type);
+
+	/*
+	 * If responder's RTR does not match with that of initiator, assign
+	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
+	 * generated when moving QP to RTS state.
+	 * A TERM message will be sent after QP has moved to RTS state
+	 */
+	if ((ep->mpa_attr.version == 2) && peer2peer &&
+		(ep->mpa_attr.p2p_type != p2p_type)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
+		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+		rtr_mismatch = 1;
+	}
+
+
+	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
+	attrs.mpa_attr = ep->mpa_attr;
+	attrs.max_ird = ep->ird;
+	attrs.max_ord = ep->ord;
+	attrs.llp_stream_handle = ep;
+	attrs.next_state = C4IW_QP_STATE_RTS;
+
+	mask = C4IW_QP_ATTR_NEXT_STATE |
+		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
+		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
+
+	/* bind QP and TID with INIT_WR */
+	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
+
+	if (err) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
+		goto err;
+	}
+
+	/*
+	 * If responder's RTR requirement did not match with what initiator
+	 * supports, generate TERM message
+	 */
+	if (rtr_mismatch) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
+		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
+		attrs.layer_etype = LAYER_MPA | DDP_LLP;
+		attrs.ecode = MPA_NOMATCH_RTR;
+		attrs.next_state = C4IW_QP_STATE_TERMINATE;
+		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+		err = -ENOMEM;
+		disconnect = 1;
+		goto out;
+	}
+
+	/*
+	 * Generate TERM if initiator IRD is not sufficient for responder
+	 * provided ORD. Currently, we do the same behaviour even when
+	 * responder provided IRD is also not sufficient as regards to
+	 * initiator ORD.
+	 */
+	if (insuff_ird) {
+
+		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
+		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
+				__func__);
+		attrs.layer_etype = LAYER_MPA | DDP_LLP;
+		attrs.ecode = MPA_INSUFF_IRD;
+		attrs.next_state = C4IW_QP_STATE_TERMINATE;
+		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+		err = -ENOMEM;
+		disconnect = 1;
+		goto out;
+	}
+	goto out;
+err_stop_timer:
+	STOP_EP_TIMER(ep);
+err:
+	disconnect = 2;
+out:
+	connect_reply_upcall(ep, err);
+	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
+	return disconnect;
+}
+
+/*
+ * process_mpa_request - process streaming mode MPA request
+ *
+ * Returns:
+ *
+ * 0 upon success indicating a connect request was delivered to the ULP
+ * or the mpa request is incomplete but valid so far.
+ *
+ * 1 if a failure requires the caller to close the connection.
+ *
+ * 2 if a failure requires the caller to abort the connection.
+ */
+static int
+process_mpa_request(struct c4iw_ep *ep)
+{
+	struct mpa_message *mpa;
+	u16 plen;
+	int flags = MSG_DONTWAIT;
+	int rc;
+	struct iovec iov;
+	struct uio uio;
+	enum c4iw_ep_state state = state_read(&ep->com);
+
+	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
+
+	if (state != MPA_REQ_WAIT)
+		return 0;
+
+	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
+	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
+	uio.uio_iov = &iov;
+	uio.uio_iovcnt = 1;
+	uio.uio_offset = 0;
+	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
+	uio.uio_segflg = UIO_SYSSPACE;
+	uio.uio_rw = UIO_READ;
+	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
+
+	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
+	if (rc == EAGAIN)
+		return 0;
+	else if (rc)
+		goto err_stop_timer;
+
+	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
+	    __func__, ep->com.so));
+	ep->mpa_pkt_len += uio.uio_offset;
+
+	/*
+	 * If we get more than the supported amount of private data then we must
+	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
+	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
+	 * byte is filled by the soreceive above.
+	 */
+
+	/* Don't even have the MPA message.  Wait for more data to arrive. */
+	if (ep->mpa_pkt_len < sizeof(*mpa))
+		return 0;
+	mpa = (struct mpa_message *) ep->mpa_pkt;
+
+	/*
+	 * Validate MPA Header.
+	 */
+	if (mpa->revision > mpa_rev) {
+		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
+		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
+		goto err_stop_timer;
+	}
+
+	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
+		goto err_stop_timer;
+
+	/*
+	 * Fail if there's too much private data.
+	 */
+	plen = ntohs(mpa->private_data_size);
+	if (plen > MPA_MAX_PRIVATE_DATA)
+		goto err_stop_timer;
+
+	/*
+	 * If plen does not account for pkt size
+	 */
+	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
+		goto err_stop_timer;
+
+	ep->plen = (u8) plen;
+
+	/*
+	 * If we don't have all the pdata yet, then bail.
+	 */
+	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
+		return 0;
+
+	/*
+	 * If we get here we have accumulated the entire mpa
+	 * start reply message including private data.
+	 */
+	ep->mpa_attr.initiator = 0;
+	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
+	ep->mpa_attr.recv_marker_enabled = markers_enabled;
+	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
+	ep->mpa_attr.version = mpa->revision;
+	if (mpa->revision == 1)
+		ep->tried_with_mpa_v1 = 1;
+	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
+
+	if (mpa->revision == 2) {
+		ep->mpa_attr.enhanced_rdma_conn =
+		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
+		if (ep->mpa_attr.enhanced_rdma_conn) {
+			struct mpa_v2_conn_params *mpa_v2_params;
+			u16 ird, ord;
+
+			mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)];
+			ird = ntohs(mpa_v2_params->ird);
+			ord = ntohs(mpa_v2_params->ord);
+
+			ep->ird = ird & MPA_V2_IRD_ORD_MASK;
+			ep->ord = ord & MPA_V2_IRD_ORD_MASK;
+			if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) {
+				if (ord & MPA_V2_RDMA_WRITE_RTR) {
+					ep->mpa_attr.p2p_type =
+					    FW_RI_INIT_P2PTYPE_RDMA_WRITE;
+				} else if (ord & MPA_V2_RDMA_READ_RTR) {
+					ep->mpa_attr.p2p_type =
+					    FW_RI_INIT_P2PTYPE_READ_REQ;
+				}
+			}
+		}
+	} else if (mpa->revision == 1 && peer2peer)
+		ep->mpa_attr.p2p_type = p2p_type;
+
+	if (set_tcpinfo(ep))
+		goto err_stop_timer;
+
+	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
+	    "xmit_marker_enabled = %d, version = %d", __func__,
+	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
+	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
+
+	state_set(&ep->com, MPA_REQ_RCVD);
+	STOP_EP_TIMER(ep);
+
+	/* drive upcall */
+	mutex_lock(&ep->parent_ep->com.mutex);
+	if (ep->parent_ep->com.state != DEAD) {
+		if (connect_request_upcall(ep))
+			goto err_unlock_parent;
+	} else
+		goto err_unlock_parent;
+	mutex_unlock(&ep->parent_ep->com.mutex);
+	return 0;
+
+err_unlock_parent:
+	mutex_unlock(&ep->parent_ep->com.mutex);
+	goto err_out;
+err_stop_timer:
+	STOP_EP_TIMER(ep);
+err_out:
+	return 2;
+}
+
+/*
+ * Upcall from the adapter indicating data has been transmitted.
+ * For us its just the single MPA request or reply.  We can now free
+ * the skb holding the mpa message.
+ */
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+	int err;
+	struct c4iw_ep *ep = to_ep(cm_id);
+	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
+	int abort = 0;
+
+	if ((state_read(&ep->com) == DEAD) ||
+			(state_read(&ep->com) != MPA_REQ_RCVD)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
+		c4iw_put_ep(&ep->com);
+		return -ECONNRESET;
+	}
+	set_bit(ULP_REJECT, &ep->com.history);
+
+	if (mpa_rev == 0) {
+
+		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
+		abort = 1;
+	}
+	else {
+
+		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
+		abort = send_mpa_reject(ep, pdata, pdata_len);
+	}
+	stop_ep_timer(ep);
+	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
+	c4iw_put_ep(&ep->com);
+	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
+	return 0;
+}
+
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+	int err;
+	struct c4iw_qp_attributes attrs;
+	enum c4iw_qp_attr_mask mask;
+	struct c4iw_ep *ep = to_ep(cm_id);
+	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
+	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
+	int abort = 0;
+
+	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
+
+	if (state_read(&ep->com) == DEAD) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
+		err = -ECONNRESET;
+		goto err_out;
+	}
+
+	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
+	BUG_ON(!qp);
+
+	set_bit(ULP_ACCEPT, &ep->com.history);
+
+	if ((conn_param->ord > c4iw_max_read_depth) ||
+		(conn_param->ird > c4iw_max_read_depth)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
+		err = -EINVAL;
+		goto err_abort;
+	}
+
+	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
+
+		if (conn_param->ord > ep->ird) {
+
+			CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep);
+			ep->ird = conn_param->ird;
+			ep->ord = conn_param->ord;
+			send_mpa_reject(ep, conn_param->private_data,
+					conn_param->private_data_len);
+			err = -ENOMEM;
+			goto err_abort;
+		}
+
+		if (conn_param->ird > ep->ord) {
+
+			CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep);
+
+			if (!ep->ord) {
+
+				CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep);
+				conn_param->ird = 1;
+			}
+			else {
+				CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep);
+				err = -ENOMEM;
+				goto err_abort;
+			}
+		}
+
+	}
+	ep->ird = conn_param->ird;
+	ep->ord = conn_param->ord;
+
+	if (ep->mpa_attr.version != 2) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep);
+
+		if (peer2peer && ep->ird == 0) {
+
+			CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep);
+			ep->ird = 1;
+		}
+	}
+
+
+	ep->com.cm_id = cm_id;
+	ref_cm_id(&ep->com);
+	ep->com.qp = qp;
+	ref_qp(ep);
+	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
+
+	/* bind QP to EP and move to RTS */
+	attrs.mpa_attr = ep->mpa_attr;
+	attrs.max_ird = ep->ird;
+	attrs.max_ord = ep->ord;
+	attrs.llp_stream_handle = ep;
+	attrs.next_state = C4IW_QP_STATE_RTS;
+
+	/* bind QP and TID with INIT_WR */
+	mask = C4IW_QP_ATTR_NEXT_STATE |
+		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+		C4IW_QP_ATTR_MPA_ATTR |
+		C4IW_QP_ATTR_MAX_IRD |
+		C4IW_QP_ATTR_MAX_ORD;
+
+	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
+
+	if (err) {
+
+		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
+		goto err_defef_cm_id;
+	}
+	err = send_mpa_reply(ep, conn_param->private_data,
+			conn_param->private_data_len);
+
+	if (err) {
+
+		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
+		goto err_defef_cm_id;
+	}
+
+	state_set(&ep->com, FPDU_MODE);
+	established_upcall(ep);
+	c4iw_put_ep(&ep->com);
+	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
+	return 0;
+err_defef_cm_id:
+	deref_cm_id(&ep->com);
+err_abort:
+	abort = 1;
+err_out:
+	if (abort)
+		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+	c4iw_put_ep(&ep->com);
+	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
+	return err;
+}
+
+
+
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+	int err = 0;
+	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+	struct c4iw_ep *ep = NULL;
+	struct rtentry *rt;
+
+	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
+
+	if ((conn_param->ord > c4iw_max_read_depth) ||
+		(conn_param->ird > c4iw_max_read_depth)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
+		err = -EINVAL;
+		goto out;
+	}
+	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+	init_timer(&ep->timer);
+	ep->plen = conn_param->private_data_len;
+
+	if (ep->plen) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
+		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
+				conn_param->private_data, ep->plen);
+	}
+	ep->ird = conn_param->ird;
+	ep->ord = conn_param->ord;
+
+	if (peer2peer && ep->ord == 0) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
+		ep->ord = 1;
+	}
+
+	ep->com.dev = dev;
+	ep->com.cm_id = cm_id;
+	ref_cm_id(&ep->com);
+	ep->com.qp = get_qhp(dev, conn_param->qpn);
+
+	if (!ep->com.qp) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
+		err = -EINVAL;
+		goto fail2;
+	}
+	ref_qp(ep);
+	ep->com.thread = curthread;
+	ep->com.so = cm_id->so;
+
+	/* find a route */
+	rt = find_route(
+		cm_id->local_addr.sin_addr.s_addr,
+		cm_id->remote_addr.sin_addr.s_addr,
+		cm_id->local_addr.sin_port,
+		cm_id->remote_addr.sin_port, 0);
+
+	if (!rt) {
+
+		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
+		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
+		err = -EHOSTUNREACH;
+		goto fail2;
+	}
+
+	if (!(rt->rt_ifp->if_capenable & IFCAP_TOE) ||
+	    TOEDEV(rt->rt_ifp) == NULL) {
+		err = -ENOPROTOOPT;
+		goto fail3;
+	}
+	RTFREE(rt);
+
+	state_set(&ep->com, CONNECTING);
+	ep->tos = 0;
+	ep->com.local_addr = cm_id->local_addr;
+	ep->com.remote_addr = cm_id->remote_addr;
+	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
+		ep->com.thread);
+
+	if (!err) {
+		init_iwarp_socket(cm_id->so, &ep->com);
+		goto out;
+	} else {
+		goto fail2;
+	}
+
+fail3:
+	RTFREE(rt);
+fail2:
+	deref_cm_id(&ep->com);
+	c4iw_put_ep(&ep->com);
+	ep = NULL;	/* CTR shouldn't display already-freed ep. */
+out:
+	CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep);
+	return err;
+}
+
+/*
+ * iwcm->create_listen_ep.  Returns -errno on failure.
+ */
+int
+c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog)
+{
+	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
+	struct c4iw_listen_ep *ep;
+	struct socket *so = cm_id->so;
+
+	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
+	ep->com.cm_id = cm_id;
+	ref_cm_id(&ep->com);
+	ep->com.dev = dev;
+	ep->backlog = backlog;
+	ep->com.local_addr = cm_id->local_addr;
+	ep->com.thread = curthread;
+	state_set(&ep->com, LISTEN);
+	ep->com.so = so;
+
+	cm_id->provider_data = ep;
+	return (0);
+}
+
+void
+c4iw_destroy_listen_ep(struct iw_cm_id *cm_id)
+{
+	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
+
+	CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, state %s", __func__, cm_id,
+	    cm_id->so, states[ep->com.state]);
+
+	state_set(&ep->com, DEAD);
+	deref_cm_id(&ep->com);
+	c4iw_put_ep(&ep->com);
+
+	return;
+}
+
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
+{
+	int ret = 0;
+	int close = 0;
+	int fatal = 0;
+	struct c4iw_rdev *rdev;
+
+	mutex_lock(&ep->com.mutex);
+
+	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
+
+	rdev = &ep->com.dev->rdev;
+
+	if (c4iw_fatal_error(rdev)) {
+
+		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
+		fatal = 1;
+		close_complete_upcall(ep, -ECONNRESET);
+		ep->com.state = DEAD;
+	}
+	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
+	    states[ep->com.state]);
+
+	switch (ep->com.state) {
+
+		case MPA_REQ_WAIT:
+		case MPA_REQ_SENT:
+		case MPA_REQ_RCVD:
+		case MPA_REP_SENT:
+		case FPDU_MODE:
+			close = 1;
+			if (abrupt)
+				ep->com.state = ABORTING;
+			else {
+				ep->com.state = CLOSING;
+				START_EP_TIMER(ep);
+			}
+			set_bit(CLOSE_SENT, &ep->com.flags);
+			break;
+
+		case CLOSING:
+
+			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+
+				close = 1;
+				if (abrupt) {
+					STOP_EP_TIMER(ep);
+					ep->com.state = ABORTING;
+				} else
+					ep->com.state = MORIBUND;
+			}
+			break;
+
+		case MORIBUND:
+		case ABORTING:
+		case DEAD:
+			CTR3(KTR_IW_CXGBE,
+			    "%s ignoring disconnect ep %p state %u", __func__,
+			    ep, ep->com.state);
+			break;
+
+		default:
+			BUG();
+			break;
+	}
+
+	mutex_unlock(&ep->com.mutex);
+
+	if (close) {
+
+		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
+
+		if (abrupt) {
+
+			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
+			set_bit(EP_DISC_ABORT, &ep->com.history);
+			close_complete_upcall(ep, -ECONNRESET);
+			ret = send_abort(ep);
+			if (ret)
+				fatal = 1;
+		} else {
+
+			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
+			set_bit(EP_DISC_CLOSE, &ep->com.history);
+
+			if (!ep->parent_ep)
+				__state_set(&ep->com, MORIBUND);
+			sodisconnect(ep->com.so);
+		}
+
+	}
+
+	if (fatal) {
+		set_bit(EP_DISC_FAIL, &ep->com.history);
+		if (!abrupt) {
+			STOP_EP_TIMER(ep);
+			close_complete_upcall(ep, -EIO);
+		}
+		if (ep->com.qp) {
+			struct c4iw_qp_attributes attrs;
+
+			attrs.next_state = C4IW_QP_STATE_ERROR;
+			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
+						C4IW_QP_ATTR_NEXT_STATE,
+						&attrs, 1);
+			if (ret) {
+				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
+				printf("%s - qp <- error failed!\n", __func__);
+			}
+		}
+		release_ep_resources(ep);
+		ep->com.state = DEAD;
+		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
+	}
+	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
+	return ret;
+}
+
+#ifdef C4IW_EP_REDIRECT
+int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
+		struct l2t_entry *l2t)
+{
+	struct c4iw_ep *ep = ctx;
+
+	if (ep->dst != old)
+		return 0;
+
+	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
+			l2t);
+	dst_hold(new);
+	cxgb4_l2t_release(ep->l2t);
+	ep->l2t = l2t;
+	dst_release(old);
+	ep->dst = new;
+	return 1;
+}
+#endif
+
+
+
+static void ep_timeout(unsigned long arg)
+{
+	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
+
+	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
+
+		/*
+		 * Only insert if it is not already on the list.
+		 */
+		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
+			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
+			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
+		}
+	}
+}
+
+static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
+{
+	uint64_t val = be64toh(*rpl);
+	int ret;
+	struct c4iw_wr_wait *wr_waitp;
+
+	ret = (int)((val >> 8) & 0xff);
+	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
+	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
+	if (wr_waitp)
+		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
+
+	return (0);
+}
+
+static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
+{
+	struct cqe_list_entry *cle;
+	unsigned long flag;
+
+	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
+	cle->rhp = sc->iwarp_softc;
+	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
+
+	spin_lock_irqsave(&err_cqe_lock, flag);
+	list_add_tail(&cle->entry, &err_cqe_list);
+	queue_work(c4iw_taskq, &c4iw_task);
+	spin_unlock_irqrestore(&err_cqe_lock, flag);
+
+	return (0);
+}
+
+static int
+process_terminate(struct c4iw_ep *ep)
+{
+	struct c4iw_qp_attributes attrs;
+
+	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
+
+	if (ep && ep->com.qp) {
+
+		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
+				ep->hwtid, ep->com.qp->wq.sq.qid);
+		attrs.next_state = C4IW_QP_STATE_TERMINATE;
+		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
+				1);
+	} else
+		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
+								ep->hwtid);
+	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
+
+	return 0;
+}
+
+int __init c4iw_cm_init(void)
+{
+
+	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
+	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
+	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
+	t4_register_an_handler(c4iw_ev_handler);
+
+	TAILQ_INIT(&req_list);
+	spin_lock_init(&req_lock);
+	INIT_LIST_HEAD(&err_cqe_list);
+	spin_lock_init(&err_cqe_lock);
+
+	INIT_WORK(&c4iw_task, process_req);
+
+	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
+	if (!c4iw_taskq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void __exit c4iw_cm_term(void)
+{
+	WARN_ON(!TAILQ_EMPTY(&req_list));
+	WARN_ON(!list_empty(&err_cqe_list));
+	flush_workqueue(c4iw_taskq);
+	destroy_workqueue(c4iw_taskq);
+
+	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
+	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
+	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
+	t4_register_an_handler(NULL);
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/cm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/cq.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/cq.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/cq.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,941 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer in the documentation and/or other materials
+ *	  provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/cq.c 314776 2017-03-06 15:16:15Z np $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/bus.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <sys/sbuf.h>
+
+#include "iw_cxgbe.h"
+#include "user.h"
+
+static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+		      struct c4iw_dev_ucontext *uctx)
+{
+	struct adapter *sc = rdev->adap;
+	struct fw_ri_res_wr *res_wr;
+	struct fw_ri_res *res;
+	int wr_len;
+	struct c4iw_wr_wait wr_wait;
+	struct wrqe *wr;
+
+	wr_len = sizeof *res_wr + sizeof *res;
+	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+                if (wr == NULL)
+                        return (0);
+        res_wr = wrtod(wr);
+	memset(res_wr, 0, wr_len);
+	res_wr->op_nres = cpu_to_be32(
+			V_FW_WR_OP(FW_RI_RES_WR) |
+			V_FW_RI_RES_WR_NRES(1) |
+			F_FW_WR_COMPL);
+	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+	res_wr->cookie = (unsigned long) &wr_wait;
+	res = res_wr->res;
+	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+	res->u.cq.op = FW_RI_RES_OP_RESET;
+	res->u.cq.iqid = cpu_to_be32(cq->cqid);
+
+	c4iw_init_wr_wait(&wr_wait);
+
+	t4_wrq_tx(sc, wr);
+
+	c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+
+	kfree(cq->sw_queue);
+	contigfree(cq->queue, cq->memsize, M_DEVBUF);
+	c4iw_put_cqid(rdev, cq->cqid, uctx);
+	return 0;
+}
+
+static int
+create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
+    struct c4iw_dev_ucontext *uctx)
+{
+	struct adapter *sc = rdev->adap;
+	struct fw_ri_res_wr *res_wr;
+	struct fw_ri_res *res;
+	int wr_len;
+	int user = (uctx != &rdev->uctx);
+	struct c4iw_wr_wait wr_wait;
+	int ret;
+	struct wrqe *wr;
+
+	cq->cqid = c4iw_get_cqid(rdev, uctx);
+	if (!cq->cqid) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	if (!user) {
+		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
+		if (!cq->sw_queue) {
+			ret = -ENOMEM;
+			goto err2;
+		}
+	}
+
+	cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
+	    PAGE_SIZE, 0);
+        if (cq->queue)
+                cq->dma_addr = vtophys(cq->queue);
+        else {
+		ret = -ENOMEM;
+                goto err3;
+	}
+
+	pci_unmap_addr_set(cq, mapping, cq->dma_addr);
+	memset(cq->queue, 0, cq->memsize);
+
+	/* build fw_ri_res_wr */
+	wr_len = sizeof *res_wr + sizeof *res;
+
+	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+	if (wr == NULL)
+        	return (0);
+        res_wr = wrtod(wr);
+
+	memset(res_wr, 0, wr_len);
+	res_wr->op_nres = cpu_to_be32(
+			V_FW_WR_OP(FW_RI_RES_WR) |
+			V_FW_RI_RES_WR_NRES(1) |
+			F_FW_WR_COMPL);
+	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+	res_wr->cookie = (unsigned long) &wr_wait;
+	res = res_wr->res;
+	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
+	res->u.cq.op = FW_RI_RES_OP_WRITE;
+	res->u.cq.iqid = cpu_to_be32(cq->cqid);
+	//Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
+	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
+			V_FW_RI_RES_WR_IQANUS(0) |
+			V_FW_RI_RES_WR_IQANUD(1) |
+			F_FW_RI_RES_WR_IQANDST |
+			V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
+	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
+			F_FW_RI_RES_WR_IQDROPRSS |
+			V_FW_RI_RES_WR_IQPCIECH(2) |
+			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
+			F_FW_RI_RES_WR_IQO |
+			V_FW_RI_RES_WR_IQESIZE(1));
+	res->u.cq.iqsize = cpu_to_be16(cq->size);
+	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
+
+	c4iw_init_wr_wait(&wr_wait);
+
+	t4_wrq_tx(sc, wr);
+
+	CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
+	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+	if (ret)
+		goto err4;
+
+	cq->gen = 1;
+	cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
+	    sc->sge_gts_reg);
+	cq->rdev = rdev;
+
+	if (user) {
+		cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
+		    (cq->cqid << rdev->cqshift));
+		cq->ugts &= PAGE_MASK;
+		CTR5(KTR_IW_CXGBE,
+		    "%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
+		    cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
+	}
+	return 0;
+err4:
+	contigfree(cq->queue, cq->memsize, M_DEVBUF);
+err3:
+	kfree(cq->sw_queue);
+err2:
+	c4iw_put_cqid(rdev, cq->cqid, uctx);
+err1:
+	return ret;
+}
+
+static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
+{
+	struct t4_cqe cqe;
+
+	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
+	    cq, cq->sw_cidx, cq->sw_pidx);
+	memset(&cqe, 0, sizeof(cqe));
+	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+				 V_CQE_OPCODE(FW_RI_SEND) |
+				 V_CQE_TYPE(0) |
+				 V_CQE_SWCQE(1) |
+				 V_CQE_QPID(wq->sq.qid));
+	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+	cq->sw_queue[cq->sw_pidx] = cqe;
+	t4_swcq_produce(cq);
+}
+
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+	int flushed = 0;
+	int in_use = wq->rq.in_use - count;
+
+	BUG_ON(in_use < 0);
+	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
+	    __func__, wq, cq, wq->rq.in_use, count);
+	while (in_use--) {
+		insert_recv_cqe(wq, cq);
+		flushed++;
+	}
+	return flushed;
+}
+
+static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
+			  struct t4_swsqe *swcqe)
+{
+	struct t4_cqe cqe;
+
+	CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
+	    cq, cq->sw_cidx, cq->sw_pidx);
+	memset(&cqe, 0, sizeof(cqe));
+	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+				 V_CQE_OPCODE(swcqe->opcode) |
+				 V_CQE_TYPE(1) |
+				 V_CQE_SWCQE(1) |
+				 V_CQE_QPID(wq->sq.qid));
+	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
+	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+	cq->sw_queue[cq->sw_pidx] = cqe;
+	t4_swcq_produce(cq);
+}
+
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+{
+	int flushed = 0;
+	struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
+	int in_use = wq->sq.in_use - count;
+
+	BUG_ON(in_use < 0);
+	while (in_use--) {
+		swsqe->signaled = 0;
+		insert_sq_cqe(wq, cq, swsqe);
+		swsqe++;
+		if (swsqe == (wq->sq.sw_sq + wq->sq.size))
+			swsqe = wq->sq.sw_sq;
+		flushed++;
+	}
+	return flushed;
+}
+
+/*
+ * Move all CQEs from the HWCQ into the SWCQ.
+ */
+void c4iw_flush_hw_cq(struct t4_cq *cq)
+{
+	struct t4_cqe *cqe = NULL, *swcqe;
+	int ret;
+
+	CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
+	ret = t4_next_hw_cqe(cq, &cqe);
+	while (!ret) {
+		CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
+		    __func__, cq->cidx, cq->sw_pidx);
+		swcqe = &cq->sw_queue[cq->sw_pidx];
+		*swcqe = *cqe;
+		swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+		t4_swcq_produce(cq);
+		t4_hwcq_consume(cq);
+		ret = t4_next_hw_cqe(cq, &cqe);
+	}
+}
+
+static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
+{
+	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
+		return 0;
+
+	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
+		return 0;
+
+	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
+		return 0;
+
+	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
+		return 0;
+	return 1;
+}
+
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+	struct t4_cqe *cqe;
+	u32 ptr;
+
+	*count = 0;
+	ptr = cq->sw_cidx;
+	while (ptr != cq->sw_pidx) {
+		cqe = &cq->sw_queue[ptr];
+		if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
+				      wq->sq.oldest_read)) &&
+		    (CQE_QPID(cqe) == wq->sq.qid))
+			(*count)++;
+		if (++ptr == cq->size)
+			ptr = 0;
+	}
+	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
+}
+
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
+{
+	struct t4_cqe *cqe;
+	u32 ptr;
+
+	*count = 0;
+	CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
+	ptr = cq->sw_cidx;
+	while (ptr != cq->sw_pidx) {
+		cqe = &cq->sw_queue[ptr];
+		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
+		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
+			(*count)++;
+		if (++ptr == cq->size)
+			ptr = 0;
+	}
+	CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
+}
+
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+	struct t4_swsqe *swsqe;
+	u16 ptr = wq->sq.cidx;
+	int count = wq->sq.in_use;
+	int unsignaled = 0;
+
+	swsqe = &wq->sq.sw_sq[ptr];
+	while (count--)
+		if (!swsqe->signaled) {
+			if (++ptr == wq->sq.size)
+				ptr = 0;
+			swsqe = &wq->sq.sw_sq[ptr];
+			unsignaled++;
+		} else if (swsqe->complete) {
+
+			/*
+			 * Insert this completed cqe into the swcq.
+			 */
+			CTR3(KTR_IW_CXGBE,
+			    "%s moving cqe into swcq sq idx %u cq idx %u",
+			    __func__, ptr, cq->sw_pidx);
+			swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+			t4_swcq_produce(cq);
+			swsqe->signaled = 0;
+			wq->sq.in_use -= unsignaled;
+			break;
+		} else
+			break;
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+				struct t4_cqe *read_cqe)
+{
+	read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+	read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
+	read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+				 V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+				 V_CQE_OPCODE(FW_RI_READ_REQ) |
+				 V_CQE_TYPE(1));
+	read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
+}
+
+/*
+ * Return a ptr to the next read wr in the SWSQ or NULL.
+ */
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+	u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+	if (rptr == wq->sq.size)
+		rptr = 0;
+	while (rptr != wq->sq.pidx) {
+		wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+		if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+			return;
+		if (++rptr == wq->sq.size)
+			rptr = 0;
+	}
+	wq->sq.oldest_read = NULL;
+}
+
+/*
+ * poll_cq
+ *
+ * Caller must:
+ *     check the validity of the first CQE,
+ *     supply the wq assicated with the qpid.
+ *
+ * credit: cq credit to return to sge.
+ * cqe_flushed: 1 iff the CQE is flushed.
+ * cqe: copy of the polled CQE.
+ *
+ * return value:
+ *    0		    CQE returned ok.
+ *    -EAGAIN       CQE skipped, try again.
+ *    -EOVERFLOW    CQ overflow detected.
+ */
+static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
+{
+	int ret = 0;
+	struct t4_cqe *hw_cqe, read_cqe;
+
+	*cqe_flushed = 0;
+	*credit = 0;
+	ret = t4_next_cqe(cq, &hw_cqe);
+	if (ret)
+		return ret;
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
+	    CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
+	    CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
+	CTR5(KTR_IW_CXGBE,
+	    "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
+	    __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
+	    CQE_WRID_LOW(hw_cqe));
+
+	/*
+	 * skip cqe's not affiliated with a QP.
+	 */
+	if (wq == NULL) {
+		ret = -EAGAIN;
+		goto skip_cqe;
+	}
+
+	/*
+	 * Special cqe for drain WR completions...
+	 */
+	if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+		*cookie = CQE_DRAIN_COOKIE(hw_cqe);
+		*cqe = *hw_cqe;
+		goto skip_cqe;
+	}
+
+	/*
+	 * Gotta tweak READ completions:
+	 *	1) the cqe doesn't contain the sq_wptr from the wr.
+	 *	2) opcode not reflected from the wr.
+	 *	3) read_len not reflected from the wr.
+	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
+	 */
+	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
+
+		/*
+		 * If this is an unsolicited read response, then the read
+		 * was generated by the kernel driver as part of peer-2-peer
+		 * connection setup.  So ignore the completion.
+		 */
+		if (!wq->sq.oldest_read) {
+			if (CQE_STATUS(hw_cqe))
+				t4_set_wq_in_error(wq);
+			ret = -EAGAIN;
+			goto skip_cqe;
+		}
+
+		/*
+		 * Don't write to the HWCQ, so create a new read req CQE
+		 * in local memory.
+		 */
+		create_read_req_cqe(wq, hw_cqe, &read_cqe);
+		hw_cqe = &read_cqe;
+		advance_oldest_read(wq);
+	}
+
+	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
+		*cqe_flushed = t4_wq_in_error(wq);
+		t4_set_wq_in_error(wq);
+		goto proc_cqe;
+	}
+
+	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+		ret = -EAGAIN;
+		goto skip_cqe;
+	}
+
+	/*
+	 * RECV completion.
+	 */
+	if (RQ_TYPE(hw_cqe)) {
+
+		/*
+		 * HW only validates 4 bits of MSN.  So we must validate that
+		 * the MSN in the SEND is the next expected MSN.  If its not,
+		 * then we complete this with T4_ERR_MSN and mark the wq in
+		 * error.
+		 */
+
+		if (t4_rq_empty(wq)) {
+			t4_set_wq_in_error(wq);
+			ret = -EAGAIN;
+			goto skip_cqe;
+		}
+		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
+			t4_set_wq_in_error(wq);
+			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
+			goto proc_cqe;
+		}
+		goto proc_cqe;
+	}
+
+	/*
+	 * If we get here its a send completion.
+	 *
+	 * Handle out of order completion. These get stuffed
+	 * in the SW SQ. Then the SW SQ is walked to move any
+	 * now in-order completions into the SW CQ.  This handles
+	 * 2 cases:
+	 *	1) reaping unsignaled WRs when the first subsequent
+	 *	   signaled WR is completed.
+	 *	2) out of order read completions.
+	 */
+	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
+		struct t4_swsqe *swsqe;
+
+		CTR2(KTR_IW_CXGBE,
+		    "%s out of order completion going in sw_sq at idx %u",
+		    __func__, CQE_WRID_SQ_IDX(hw_cqe));
+		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+		swsqe->cqe = *hw_cqe;
+		swsqe->complete = 1;
+		ret = -EAGAIN;
+		goto flush_wq;
+	}
+
+proc_cqe:
+	*cqe = *hw_cqe;
+
+	/*
+	 * Reap the associated WR(s) that are freed up with this
+	 * completion.
+	 */
+	if (SQ_TYPE(hw_cqe)) {
+		wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+		CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
+		     __func__, wq->sq.cidx);
+		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+		t4_sq_consume(wq);
+	} else {
+		CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
+		     __func__, wq->rq.cidx);
+		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
+		BUG_ON(t4_rq_empty(wq));
+		t4_rq_consume(wq);
+	}
+
+flush_wq:
+	/*
+	 * Flush any completed cqes that are now in-order.
+	 */
+	flush_completed_wrs(wq, cq);
+
+skip_cqe:
+	if (SW_CQE(hw_cqe)) {
+		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
+		     __func__, cq, cq->cqid, cq->sw_cidx);
+		t4_swcq_consume(cq);
+	} else {
+		CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
+		     __func__, cq, cq->cqid, cq->cidx);
+		t4_hwcq_consume(cq);
+	}
+	return ret;
+}
+
+/*
+ * Get one cq entry from c4iw and map it to openib.
+ *
+ * Returns:
+ *	0			cqe returned
+ *	-ENODATA		EMPTY;
+ *	-EAGAIN			caller must try again
+ *	any other -errno	fatal error
+ */
+static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
+{
+	struct c4iw_qp *qhp = NULL;
+	struct t4_cqe cqe = {0, 0}, *rd_cqe;
+	struct t4_wq *wq;
+	u32 credit = 0;
+	u8 cqe_flushed;
+	u64 cookie = 0;
+	int ret;
+
+	ret = t4_next_cqe(&chp->cq, &rd_cqe);
+
+	if (ret)
+		return ret;
+
+	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
+	if (!qhp)
+		wq = NULL;
+	else {
+		spin_lock(&qhp->lock);
+		wq = &(qhp->wq);
+	}
+	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
+	if (ret)
+		goto out;
+
+	wc->wr_id = cookie;
+	wc->qp = &qhp->ibqp;
+	wc->vendor_err = CQE_STATUS(&cqe);
+	wc->wc_flags = 0;
+
+	CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
+	    __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
+	    CQE_STATUS(&cqe));
+	CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
+	    __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
+	    (unsigned long long)cookie);
+
+	if (CQE_TYPE(&cqe) == 0) {
+		if (!CQE_STATUS(&cqe))
+			wc->byte_len = CQE_LEN(&cqe);
+		else
+			wc->byte_len = 0;
+		wc->opcode = IB_WC_RECV;
+		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
+		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
+			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
+			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+		}
+	} else {
+		switch (CQE_OPCODE(&cqe)) {
+		case FW_RI_RDMA_WRITE:
+			wc->opcode = IB_WC_RDMA_WRITE;
+			break;
+		case FW_RI_READ_REQ:
+			wc->opcode = IB_WC_RDMA_READ;
+			wc->byte_len = CQE_LEN(&cqe);
+			break;
+		case FW_RI_SEND_WITH_INV:
+		case FW_RI_SEND_WITH_SE_INV:
+			wc->opcode = IB_WC_SEND;
+			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+			break;
+		case FW_RI_SEND:
+		case FW_RI_SEND_WITH_SE:
+			wc->opcode = IB_WC_SEND;
+			break;
+		case FW_RI_BIND_MW:
+			wc->opcode = IB_WC_BIND_MW;
+			break;
+
+		case FW_RI_LOCAL_INV:
+			wc->opcode = IB_WC_LOCAL_INV;
+			break;
+		case FW_RI_FAST_REGISTER:
+			wc->opcode = IB_WC_FAST_REG_MR;
+			break;
+		case C4IW_DRAIN_OPCODE:
+			wc->opcode = IB_WC_SEND;
+			break;
+		default:
+			printf("Unexpected opcode %d "
+			       "in the CQE received for QPID = 0x%0x\n",
+			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (cqe_flushed)
+		wc->status = IB_WC_WR_FLUSH_ERR;
+	else {
+
+		switch (CQE_STATUS(&cqe)) {
+		case T4_ERR_SUCCESS:
+			wc->status = IB_WC_SUCCESS;
+			break;
+		case T4_ERR_STAG:
+			wc->status = IB_WC_LOC_ACCESS_ERR;
+			break;
+		case T4_ERR_PDID:
+			wc->status = IB_WC_LOC_PROT_ERR;
+			break;
+		case T4_ERR_QPID:
+		case T4_ERR_ACCESS:
+			wc->status = IB_WC_LOC_ACCESS_ERR;
+			break;
+		case T4_ERR_WRAP:
+			wc->status = IB_WC_GENERAL_ERR;
+			break;
+		case T4_ERR_BOUND:
+			wc->status = IB_WC_LOC_LEN_ERR;
+			break;
+		case T4_ERR_INVALIDATE_SHARED_MR:
+		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+			wc->status = IB_WC_MW_BIND_ERR;
+			break;
+		case T4_ERR_CRC:
+		case T4_ERR_MARKER:
+		case T4_ERR_PDU_LEN_ERR:
+		case T4_ERR_OUT_OF_RQE:
+		case T4_ERR_DDP_VERSION:
+		case T4_ERR_RDMA_VERSION:
+		case T4_ERR_DDP_QUEUE_NUM:
+		case T4_ERR_MSN:
+		case T4_ERR_TBIT:
+		case T4_ERR_MO:
+		case T4_ERR_MSN_RANGE:
+		case T4_ERR_IRD_OVERFLOW:
+		case T4_ERR_OPCODE:
+		case T4_ERR_INTERNAL_ERR:
+			wc->status = IB_WC_FATAL_ERR;
+			break;
+		case T4_ERR_SWFLUSH:
+			wc->status = IB_WC_WR_FLUSH_ERR;
+			break;
+		default:
+			printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
+			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
+			wc->status = IB_WC_FATAL_ERR;
+		}
+	}
+out:
+	if (wq)
+		spin_unlock(&qhp->lock);
+	return ret;
+}
+
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+	struct c4iw_cq *chp;
+	unsigned long flags;
+	int npolled;
+	int err = 0;
+
+	chp = to_c4iw_cq(ibcq);
+
+	spin_lock_irqsave(&chp->lock, flags);
+	for (npolled = 0; npolled < num_entries; ++npolled) {
+		do {
+			err = c4iw_poll_cq_one(chp, wc + npolled);
+		} while (err == -EAGAIN);
+		if (err)
+			break;
+	}
+	spin_unlock_irqrestore(&chp->lock, flags);
+	return !err || err == -ENODATA ? npolled : err;
+}
+
+int c4iw_destroy_cq(struct ib_cq *ib_cq)
+{
+	struct c4iw_cq *chp;
+	struct c4iw_ucontext *ucontext;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
+	chp = to_c4iw_cq(ib_cq);
+
+	remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
+	atomic_dec(&chp->refcnt);
+	wait_event(chp->wait, !atomic_read(&chp->refcnt));
+
+	ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
+				  : NULL;
+	destroy_cq(&chp->rhp->rdev, &chp->cq,
+		   ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
+	kfree(chp);
+	return 0;
+}
+
+struct ib_cq *
+c4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
+    struct ib_ucontext *ib_context, struct ib_udata *udata)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_cq *chp;
+	struct c4iw_create_cq_resp uresp;
+	struct c4iw_ucontext *ucontext = NULL;
+	int ret;
+	size_t memsize, hwentries;
+	struct c4iw_mm_entry *mm, *mm2;
+
+	CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
+
+	rhp = to_c4iw_dev(ibdev);
+
+	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
+	if (!chp)
+		return ERR_PTR(-ENOMEM);
+
+	if (ib_context)
+		ucontext = to_c4iw_ucontext(ib_context);
+
+	/* account for the status page. */
+	entries++;
+
+	/* IQ needs one extra entry to differentiate full vs empty. */
+	entries++;
+
+	/*
+	 * entries must be multiple of 16 for HW.
+	 */
+	entries = roundup(entries, 16);
+
+	/*
+	 * Make actual HW queue 2x to avoid cidx_inc overflows.
+	 */
+	hwentries = entries * 2;
+
+	/*
+	 * Make HW queue at least 64 entries so GTS updates aren't too
+	 * frequent.
+	 */
+	if (hwentries < 64)
+		hwentries = 64;
+
+	memsize = hwentries * sizeof *chp->cq.queue;
+
+	/*
+	 * memsize must be a multiple of the page size if its a user cq.
+	 */
+	if (ucontext) {
+		memsize = roundup(memsize, PAGE_SIZE);
+		hwentries = memsize / sizeof *chp->cq.queue;
+		while (hwentries > T4_MAX_IQ_SIZE) {
+			memsize -= PAGE_SIZE;
+			hwentries = memsize / sizeof *chp->cq.queue;
+		}
+	}
+	chp->cq.size = hwentries;
+	chp->cq.memsize = memsize;
+
+	ret = create_cq(&rhp->rdev, &chp->cq,
+			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+	if (ret)
+		goto err1;
+
+	chp->rhp = rhp;
+	chp->cq.size--;				/* status page */
+	chp->ibcq.cqe = entries - 2;
+	spin_lock_init(&chp->lock);
+	spin_lock_init(&chp->comp_handler_lock);
+	atomic_set(&chp->refcnt, 1);
+	init_waitqueue_head(&chp->wait);
+	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+	if (ret)
+		goto err2;
+
+	if (ucontext) {
+		mm = kmalloc(sizeof *mm, GFP_KERNEL);
+		if (!mm)
+			goto err3;
+		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+		if (!mm2)
+			goto err4;
+
+		memset(&uresp, 0, sizeof(uresp));
+		uresp.qid_mask = rhp->rdev.cqmask;
+		uresp.cqid = chp->cq.cqid;
+		uresp.size = chp->cq.size;
+		uresp.memsize = chp->cq.memsize;
+		spin_lock(&ucontext->mmap_lock);
+		uresp.key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		uresp.gts_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		spin_unlock(&ucontext->mmap_lock);
+		ret = ib_copy_to_udata(udata, &uresp,
+					sizeof(uresp) - sizeof(uresp.reserved));
+		if (ret)
+			goto err5;
+
+		mm->key = uresp.key;
+		mm->addr = vtophys(chp->cq.queue);
+		mm->len = chp->cq.memsize;
+		insert_mmap(ucontext, mm);
+
+		mm2->key = uresp.gts_key;
+		mm2->addr = chp->cq.ugts;
+		mm2->len = PAGE_SIZE;
+		insert_mmap(ucontext, mm2);
+	}
+	CTR6(KTR_IW_CXGBE,
+	    "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
+	    __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
+	    (unsigned long long) chp->cq.dma_addr);
+	return &chp->ibcq;
+err5:
+	kfree(mm2);
+err4:
+	kfree(mm);
+err3:
+	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
+err2:
+	destroy_cq(&chp->rhp->rdev, &chp->cq,
+		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+	kfree(chp);
+	return ERR_PTR(ret);
+}
+
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
+{
+	return -ENOSYS;
+}
+
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+	struct c4iw_cq *chp;
+	int ret;
+	unsigned long flag;
+
+	chp = to_c4iw_cq(ibcq);
+	spin_lock_irqsave(&chp->lock, flag);
+	ret = t4_arm_cq(&chp->cq,
+			(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
+	spin_unlock_irqrestore(&chp->lock, flag);
+	if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
+		ret = 0;
+	return ret;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/cq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/device.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/device.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/device.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,363 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer in the documentation and/or other materials
+ *	  provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/device.c 318799 2017-05-24 18:16:20Z np $");
+
+#include "opt_inet.h"
+
+#include <sys/ktr.h>
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <rdma/ib_verbs.h>
+#include <linux/idr.h>
+
+#ifdef TCP_OFFLOAD
+#include "iw_cxgbe.h"
+
+void
+c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+    struct c4iw_dev_ucontext *uctx)
+{
+	struct list_head *pos, *nxt;
+	struct c4iw_qid_list *entry;
+
+	mutex_lock(&uctx->lock);
+	list_for_each_safe(pos, nxt, &uctx->qpids) {
+		entry = list_entry(pos, struct c4iw_qid_list, entry);
+		list_del_init(&entry->entry);
+		if (!(entry->qid & rdev->qpmask)) {
+			c4iw_put_resource(&rdev->resource.qid_table,
+					  entry->qid);
+			mutex_lock(&rdev->stats.lock);
+			rdev->stats.qid.cur -= rdev->qpmask + 1;
+			mutex_unlock(&rdev->stats.lock);
+		}
+		kfree(entry);
+	}
+
+	list_for_each_safe(pos, nxt, &uctx->qpids) {
+		entry = list_entry(pos, struct c4iw_qid_list, entry);
+		list_del_init(&entry->entry);
+		kfree(entry);
+	}
+	mutex_unlock(&uctx->lock);
+}
+
+void
+c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+
+	INIT_LIST_HEAD(&uctx->qpids);
+	INIT_LIST_HEAD(&uctx->cqids);
+	mutex_init(&uctx->lock);
+}
+
+static int
+c4iw_rdev_open(struct c4iw_rdev *rdev)
+{
+	struct adapter *sc = rdev->adap;
+	struct sge_params *sp = &sc->params.sge;
+	int rc;
+
+	c4iw_init_dev_ucontext(rdev, &rdev->uctx);
+
+	/* XXX: we can probably make this work */
+	if (sp->eq_s_qpp > PAGE_SHIFT || sp->iq_s_qpp > PAGE_SHIFT) {
+		device_printf(sc->dev,
+		    "doorbell density too high (eq %d, iq %d, pg %d).\n",
+		    sp->eq_s_qpp, sp->eq_s_qpp, PAGE_SHIFT);
+		rc = -EINVAL;
+		goto err1;
+	}
+
+	rdev->qpshift = PAGE_SHIFT - sp->eq_s_qpp;
+	rdev->qpmask = (1 << sp->eq_s_qpp) - 1;
+	rdev->cqshift = PAGE_SHIFT - sp->iq_s_qpp;
+	rdev->cqmask = (1 << sp->iq_s_qpp) - 1;
+
+	if (c4iw_num_stags(rdev) == 0) {
+		rc = -EINVAL;
+		goto err1;
+	}
+
+	rdev->stats.pd.total = T4_MAX_NUM_PD;
+	rdev->stats.stag.total = sc->vres.stag.size;
+	rdev->stats.pbl.total = sc->vres.pbl.size;
+	rdev->stats.rqt.total = sc->vres.rq.size;
+	rdev->stats.qid.total = sc->vres.qp.size;
+
+	rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+	if (rc) {
+		device_printf(sc->dev, "error %d initializing resources\n", rc);
+		goto err1;
+	}
+	rc = c4iw_pblpool_create(rdev);
+	if (rc) {
+		device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
+		goto err2;
+	}
+	rc = c4iw_rqtpool_create(rdev);
+	if (rc) {
+		device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
+		goto err3;
+	}
+
+	return (0);
+err3:
+	c4iw_pblpool_destroy(rdev);
+err2:
+	c4iw_destroy_resource(&rdev->resource);
+err1:
+	return (rc);
+}
+
+static void c4iw_rdev_close(struct c4iw_rdev *rdev)
+{
+	c4iw_pblpool_destroy(rdev);
+	c4iw_rqtpool_destroy(rdev);
+	c4iw_destroy_resource(&rdev->resource);
+}
+
+static void
+c4iw_dealloc(struct c4iw_dev *iwsc)
+{
+
+	c4iw_rdev_close(&iwsc->rdev);
+	idr_destroy(&iwsc->cqidr);
+	idr_destroy(&iwsc->qpidr);
+	idr_destroy(&iwsc->mmidr);
+	ib_dealloc_device(&iwsc->ibdev);
+}
+
+static struct c4iw_dev *
+c4iw_alloc(struct adapter *sc)
+{
+	struct c4iw_dev *iwsc;
+	int rc;
+
+	iwsc = (struct c4iw_dev *)ib_alloc_device(sizeof(*iwsc));
+	if (iwsc == NULL) {
+		device_printf(sc->dev, "Cannot allocate ib device.\n");
+		return (ERR_PTR(-ENOMEM));
+	}
+	iwsc->rdev.adap = sc;
+
+	rc = c4iw_rdev_open(&iwsc->rdev);
+	if (rc != 0) {
+		device_printf(sc->dev, "Unable to open CXIO rdev (%d)\n", rc);
+		ib_dealloc_device(&iwsc->ibdev);
+		return (ERR_PTR(rc));
+	}
+
+	idr_init(&iwsc->cqidr);
+	idr_init(&iwsc->qpidr);
+	idr_init(&iwsc->mmidr);
+	spin_lock_init(&iwsc->lock);
+	mutex_init(&iwsc->rdev.stats.lock);
+
+	return (iwsc);
+}
+
+static int c4iw_mod_load(void);
+static int c4iw_mod_unload(void);
+static int c4iw_activate(struct adapter *);
+static int c4iw_deactivate(struct adapter *);
+
+static struct uld_info c4iw_uld_info = {
+	.uld_id = ULD_IWARP,
+	.activate = c4iw_activate,
+	.deactivate = c4iw_deactivate,
+};
+
+static int
+c4iw_activate(struct adapter *sc)
+{
+	struct c4iw_dev *iwsc;
+	int rc;
+
+	ASSERT_SYNCHRONIZED_OP(sc);
+
+	if (uld_active(sc, ULD_IWARP)) {
+		KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
+		return (0);
+	}
+
+	if (sc->rdmacaps == 0) {
+		device_printf(sc->dev,
+		    "RDMA not supported or RDMA cap is not enabled.\n");
+		return (ENOSYS);
+	}
+
+	iwsc = c4iw_alloc(sc);
+	if (IS_ERR(iwsc)) {
+		rc = -PTR_ERR(iwsc);
+		device_printf(sc->dev, "initialization failed: %d\n", rc);
+		return (rc);
+	}
+
+	sc->iwarp_softc = iwsc;
+
+	rc = -c4iw_register_device(iwsc);
+	if (rc) {
+		device_printf(sc->dev, "RDMA registration failed: %d\n", rc);
+		c4iw_dealloc(iwsc);
+		sc->iwarp_softc = NULL;
+	}
+
+	return (rc);
+}
+
+static int
+c4iw_deactivate(struct adapter *sc)
+{
+	struct c4iw_dev *iwsc = sc->iwarp_softc;
+
+	ASSERT_SYNCHRONIZED_OP(sc);
+
+	c4iw_unregister_device(iwsc);
+	c4iw_dealloc(iwsc);
+	sc->iwarp_softc = NULL;
+
+	return (0);
+}
+
+static void
+c4iw_activate_all(struct adapter *sc, void *arg __unused)
+{
+
+	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwact") != 0)
+		return;
+
+	/* Activate iWARP if any port on this adapter has IFCAP_TOE enabled. */
+	if (sc->offload_map && !uld_active(sc, ULD_IWARP))
+		(void) t4_activate_uld(sc, ULD_IWARP);
+
+	end_synchronized_op(sc, 0);
+}
+
+static void
+c4iw_deactivate_all(struct adapter *sc, void *arg __unused)
+{
+
+	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4iwdea") != 0)
+		return;
+
+	if (uld_active(sc, ULD_IWARP))
+	    (void) t4_deactivate_uld(sc, ULD_IWARP);
+
+	end_synchronized_op(sc, 0);
+}
+
+static int
+c4iw_mod_load(void)
+{
+	int rc;
+
+	rc = -c4iw_cm_init();
+	if (rc != 0)
+		return (rc);
+
+	rc = t4_register_uld(&c4iw_uld_info);
+	if (rc != 0) {
+		c4iw_cm_term();
+		return (rc);
+	}
+
+	t4_iterate(c4iw_activate_all, NULL);
+
+	return (rc);
+}
+
+static int
+c4iw_mod_unload(void)
+{
+
+	t4_iterate(c4iw_deactivate_all, NULL);
+
+	c4iw_cm_term();
+
+	if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
+		return (EBUSY);
+
+	return (0);
+}
+
+#endif
+#undef MODULE_VERSION
+#include <sys/module.h>
+
+/*
+ * t4_tom won't load on kernels without TCP_OFFLOAD and this module's dependency
+ * on t4_tom ensures that it won't either.  So we don't directly check for
+ * TCP_OFFLOAD here.
+ */
+static int
+c4iw_modevent(module_t mod, int cmd, void *arg)
+{
+	int rc = 0;
+
+#ifdef TCP_OFFLOAD
+	switch (cmd) {
+	case MOD_LOAD:
+		rc = c4iw_mod_load();
+		if (rc == 0)
+			printf("iw_cxgbe: Chelsio T4/T5/T6 RDMA driver loaded.\n");
+		break;
+
+	case MOD_UNLOAD:
+		rc = c4iw_mod_unload();
+		break;
+
+	default:
+		rc = EINVAL;
+	}
+#else
+	printf("t4_tom: compiled without TCP_OFFLOAD support.\n");
+	rc = EOPNOTSUPP;
+#endif
+	return (rc);
+}
+
+static moduledata_t c4iw_mod_data = {
+	"iw_cxgbe",
+	c4iw_modevent,
+	0
+};
+
+MODULE_VERSION(iw_cxgbe, 1);
+MODULE_DEPEND(iw_cxgbe, t4nex, 1, 1, 1);
+MODULE_DEPEND(iw_cxgbe, t4_tom, 1, 1, 1);
+MODULE_DEPEND(iw_cxgbe, ibcore, 1, 1, 1);
+DECLARE_MODULE(iw_cxgbe, c4iw_mod_data, SI_SUB_EXEC, SI_ORDER_ANY);


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/device.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/ev.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/ev.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/ev.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,216 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/ev.c 309378 2016-12-01 23:38:52Z jhb $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <linux/slab.h>
+
+#include "iw_cxgbe.h"
+
+static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
+			  struct c4iw_qp *qhp,
+			  struct t4_cqe *err_cqe,
+			  enum ib_event_type ib_event)
+{
+	struct ib_event event;
+	struct c4iw_qp_attributes attrs;
+	unsigned long flag;
+
+	if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
+	    (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
+		CTR4(KTR_IW_CXGBE, "%s AE received after RTS - "
+		     "qp state %d qpid 0x%x status 0x%x", __func__,
+		     qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe));
+		return;
+	}
+
+	printf("AE qpid 0x%x opcode %d status 0x%x "
+	       "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+	       CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+	       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
+	       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+	if (qhp->attr.state == C4IW_QP_STATE_RTS) {
+		attrs.next_state = C4IW_QP_STATE_TERMINATE;
+		c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
+			       &attrs, 0);
+	}
+
+	event.event = ib_event;
+	event.device = chp->ibcq.device;
+	if (ib_event == IB_EVENT_CQ_ERR)
+		event.element.cq = &chp->ibcq;
+	else
+		event.element.qp = &qhp->ibqp;
+	if (qhp->ibqp.event_handler)
+		(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+
+	spin_lock_irqsave(&chp->comp_handler_lock, flag);
+	(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+	spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+}
+
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+	struct c4iw_cq *chp;
+	struct c4iw_qp *qhp;
+	u32 cqid;
+
+	spin_lock_irq(&dev->lock);
+	qhp = get_qhp(dev, CQE_QPID(err_cqe));
+	if (!qhp) {
+		printf("BAD AE qpid 0x%x opcode %d "
+		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+		       CQE_QPID(err_cqe),
+		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+		       CQE_WRID_LOW(err_cqe));
+		spin_unlock_irq(&dev->lock);
+		goto out;
+	}
+
+	if (SQ_TYPE(err_cqe))
+		cqid = qhp->attr.scq;
+	else
+		cqid = qhp->attr.rcq;
+	chp = get_chp(dev, cqid);
+	if (!chp) {
+		printf("BAD AE cqid 0x%x qpid 0x%x opcode %d "
+		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n",
+		       cqid, CQE_QPID(err_cqe),
+		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
+		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
+		       CQE_WRID_LOW(err_cqe));
+		spin_unlock_irq(&dev->lock);
+		goto out;
+	}
+
+	c4iw_qp_add_ref(&qhp->ibqp);
+	atomic_inc(&chp->refcnt);
+	spin_unlock_irq(&dev->lock);
+
+	/* Bad incoming write */
+	if (RQ_TYPE(err_cqe) &&
+	    (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
+		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
+		goto done;
+	}
+
+	switch (CQE_STATUS(err_cqe)) {
+
+	/* Completion Events */
+	case T4_ERR_SUCCESS:
+		printf(KERN_ERR MOD "AE with status 0!\n");
+		break;
+
+	case T4_ERR_STAG:
+	case T4_ERR_PDID:
+	case T4_ERR_QPID:
+	case T4_ERR_ACCESS:
+	case T4_ERR_WRAP:
+	case T4_ERR_BOUND:
+	case T4_ERR_INVALIDATE_SHARED_MR:
+	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
+		break;
+
+	/* Device Fatal Errors */
+	case T4_ERR_ECC:
+	case T4_ERR_ECC_PSTAG:
+	case T4_ERR_INTERNAL_ERR:
+		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
+		break;
+
+	/* QP Fatal Errors */
+	case T4_ERR_OUT_OF_RQE:
+	case T4_ERR_PBL_ADDR_BOUND:
+	case T4_ERR_CRC:
+	case T4_ERR_MARKER:
+	case T4_ERR_PDU_LEN_ERR:
+	case T4_ERR_DDP_VERSION:
+	case T4_ERR_RDMA_VERSION:
+	case T4_ERR_OPCODE:
+	case T4_ERR_DDP_QUEUE_NUM:
+	case T4_ERR_MSN:
+	case T4_ERR_TBIT:
+	case T4_ERR_MO:
+	case T4_ERR_MSN_GAP:
+	case T4_ERR_MSN_RANGE:
+	case T4_ERR_RQE_ADDR_BOUND:
+	case T4_ERR_IRD_OVERFLOW:
+		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+		break;
+
+	default:
+		printf("Unknown T4 status 0x%x QPID 0x%x\n",
+		       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
+		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
+		break;
+	}
+done:
+	if (atomic_dec_and_test(&chp->refcnt))
+		wake_up(&chp->wait);
+	c4iw_qp_rem_ref(&qhp->ibqp);
+out:
+	return;
+}
+
+int c4iw_ev_handler(struct sge_iq *iq, const struct rsp_ctrl *rc)
+{
+	struct c4iw_dev *dev = iq->adapter->iwarp_softc;
+	u32 qid = be32_to_cpu(rc->pldbuflen_qid);
+	struct c4iw_cq *chp;
+	unsigned long flag;
+
+	spin_lock_irqsave(&dev->lock, flag);
+	chp = get_chp(dev, qid);
+	if (chp) {
+		atomic_inc(&chp->refcnt);
+		spin_unlock_irqrestore(&dev->lock, flag);
+
+		spin_lock_irqsave(&chp->comp_handler_lock, flag);
+		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+		spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+		if (atomic_dec_and_test(&chp->refcnt))
+			wake_up(&chp->wait);
+	} else {
+		CTR2(KTR_IW_CXGBE, "%s unknown cqid 0x%x", __func__, qid);
+		spin_unlock_irqrestore(&dev->lock, flag);
+	}
+
+	return 0;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/ev.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/id_table.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/id_table.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/id_table.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,119 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2011-2013 Chelsio Communications.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/id_table.c 256694 2013-10-17 18:37:25Z np $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <sys/libkern.h>
+#include "iw_cxgbe.h"
+
+#define RANDOM_SKIP 16
+
+/*
+ * Trivial bitmap-based allocator. If the random flag is set, the
+ * allocator is designed to:
+ * - pseudo-randomize the id returned such that it is not trivially predictable.
+ * - avoid reuse of recently used id (at the expense of predictability)
+ */
+u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
+{
+	unsigned long flags;
+	u32 obj;
+
+	spin_lock_irqsave(&alloc->lock, flags);
+
+	obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last);
+	if (obj >= alloc->max)
+		obj = find_first_zero_bit(alloc->table, alloc->max);
+
+	if (obj < alloc->max) {
+		if (alloc->flags & C4IW_ID_TABLE_F_RANDOM)
+			alloc->last += arc4random() % RANDOM_SKIP;
+		else
+			alloc->last = obj + 1;
+		if (alloc->last >= alloc->max)
+			alloc->last = 0;
+		set_bit(obj, alloc->table);
+		obj += alloc->start;
+	} else
+		obj = -1;
+
+	spin_unlock_irqrestore(&alloc->lock, flags);
+	return obj;
+}
+
+void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
+{
+	unsigned long flags;
+
+	obj -= alloc->start;
+	BUG_ON((int)obj < 0);
+
+	spin_lock_irqsave(&alloc->lock, flags);
+	clear_bit(obj, alloc->table);
+	spin_unlock_irqrestore(&alloc->lock, flags);
+}
+
+int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
+			u32 reserved, u32 flags)
+{
+	int i;
+
+	alloc->start = start;
+	alloc->flags = flags;
+	if (flags & C4IW_ID_TABLE_F_RANDOM)
+		alloc->last = arc4random() % RANDOM_SKIP;
+	else
+		alloc->last = 0;
+	alloc->max  = num;
+	spin_lock_init(&alloc->lock);
+	alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof(long),
+				GFP_KERNEL);
+	if (!alloc->table)
+		return -ENOMEM;
+
+	bitmap_zero(alloc->table, num);
+	if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
+		for (i = 0; i < reserved; ++i)
+			set_bit(i, alloc->table);
+
+	return 0;
+}
+
+void c4iw_id_table_free(struct c4iw_id_table *alloc)
+{
+	kfree(alloc->table);
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/id_table.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,1001 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer.
+ *      - Redistributions in binary form must reproduce the above
+ *	  copyright notice, this list of conditions and the following
+ *	  disclaimer in the documentation and/or other materials
+ *	  provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h 318799 2017-05-24 18:16:20Z np $
+ */
+#ifndef __IW_CXGB4_H__
+#define __IW_CXGB4_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <sys/vmem.h>
+
+#include <asm/byteorder.h>
+
+#include <netinet/in.h>
+#include <netinet/toecore.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+
+#undef prefetch
+
+#include "common/common.h"
+#include "common/t4_msg.h"
+#include "common/t4_regs.h"
+#include "common/t4_tcb.h"
+#include "t4_l2t.h"
+
+#define DRV_NAME "iw_cxgbe"
+#define MOD DRV_NAME ":"
+#define KTR_IW_CXGBE	KTR_SPARE3
+
+extern int c4iw_debug;
+#define PDBG(fmt, args...) \
+do { \
+	if (c4iw_debug) \
+		printf(MOD fmt, ## args); \
+} while (0)
+
+#include "t4.h"
+
+static inline void *cplhdr(struct mbuf *m)
+{
+	return mtod(m, void*);
+}
+
+#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.pbl.start)
+#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->adap->vres.rq.start)
+
+#define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
+#define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
+
+struct c4iw_id_table {
+	u32 flags;
+	u32 start;              /* logical minimal id */
+	u32 last;               /* hint for find */
+	u32 max;
+	spinlock_t lock;
+	unsigned long *table;
+};
+
+struct c4iw_resource {
+	struct c4iw_id_table tpt_table;
+	struct c4iw_id_table qid_table;
+	struct c4iw_id_table pdid_table;
+};
+
+struct c4iw_qid_list {
+	struct list_head entry;
+	u32 qid;
+};
+
+struct c4iw_dev_ucontext {
+	struct list_head qpids;
+	struct list_head cqids;
+	struct mutex lock;
+};
+
+enum c4iw_rdev_flags {
+	T4_FATAL_ERROR = (1<<0),
+};
+
+struct c4iw_stat {
+	u64 total;
+	u64 cur;
+	u64 max;
+	u64 fail;
+};
+
+struct c4iw_stats {
+	struct mutex lock;
+	struct c4iw_stat qid;
+	struct c4iw_stat pd;
+	struct c4iw_stat stag;
+	struct c4iw_stat pbl;
+	struct c4iw_stat rqt;
+};
+
+struct c4iw_rdev {
+	struct adapter *adap;
+	struct c4iw_resource resource;
+	unsigned long qpshift;
+	u32 qpmask;
+	unsigned long cqshift;
+	u32 cqmask;
+	struct c4iw_dev_ucontext uctx;
+	vmem_t          *rqt_arena;
+	vmem_t          *pbl_arena;
+	u32 flags;
+	struct c4iw_stats stats;
+};
+
+static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+{
+	return rdev->flags & T4_FATAL_ERROR;
+}
+
+static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
+{
+	return (int)(rdev->adap->vres.stag.size >> 5);
+}
+
+#define C4IW_WR_TO (60*HZ)
+
+struct c4iw_wr_wait {
+	int ret;
+	struct completion completion;
+};
+
+static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
+{
+	wr_waitp->ret = 0;
+	init_completion(&wr_waitp->completion);
+}
+
+static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
+{
+	wr_waitp->ret = ret;
+	complete(&wr_waitp->completion);
+}
+
+static inline int
+c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
+					u32 hwtid, u32 qpid, const char *func)
+{
+	struct adapter *sc = rdev->adap;
+	unsigned to = C4IW_WR_TO;
+	int ret;
+	int timedout = 0;
+	struct timeval t1, t2;
+
+	if (c4iw_fatal_error(rdev)) {
+		wr_waitp->ret = -EIO;
+		goto out;
+	}
+
+	getmicrotime(&t1);
+	do {
+		ret = wait_for_completion_timeout(&wr_waitp->completion, to);
+		if (!ret) {
+			getmicrotime(&t2);
+			timevalsub(&t2, &t1);
+			printf("%s - Device %s not responding after %ld.%06ld "
+			    "seconds - tid %u qpid %u\n", func,
+			    device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec,
+			    hwtid, qpid);
+			if (c4iw_fatal_error(rdev)) {
+				wr_waitp->ret = -EIO;
+				break;
+			}
+			to = to << 2;
+			timedout = 1;
+		}
+	} while (!ret);
+
+out:
+	if (timedout) {
+		getmicrotime(&t2);
+		timevalsub(&t2, &t1);
+		printf("%s - Device %s reply after %ld.%06ld seconds - "
+		    "tid %u qpid %u\n", func, device_get_nameunit(sc->dev),
+		    t2.tv_sec, t2.tv_usec, hwtid, qpid);
+	}
+	if (wr_waitp->ret)
+		CTR4(KTR_IW_CXGBE, "%p: FW reply %d tid %u qpid %u", sc,
+		    wr_waitp->ret, hwtid, qpid);
+	return (wr_waitp->ret);
+}
+
+struct c4iw_dev {
+	struct ib_device ibdev;
+	struct c4iw_rdev rdev;
+	u32 device_cap_flags;
+	struct idr cqidr;
+	struct idr qpidr;
+	struct idr mmidr;
+	spinlock_t lock;
+	struct dentry *debugfs_root;
+};
+
+static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
+{
+	return container_of(ibdev, struct c4iw_dev, ibdev);
+}
+
+static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
+{
+	return container_of(rdev, struct c4iw_dev, rdev);
+}
+
+static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
+{
+	return idr_find(&rhp->cqidr, cqid);
+}
+
+static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
+{
+	return idr_find(&rhp->qpidr, qpid);
+}
+
+static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
+{
+	return idr_find(&rhp->mmidr, mmid);
+}
+
+static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+				 void *handle, u32 id, int lock)
+{
+	int ret;
+	int newid;
+
+	do {
+		if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
+			return -ENOMEM;
+		if (lock)
+			spin_lock_irq(&rhp->lock);
+		ret = idr_get_new_above(idr, handle, id, &newid);
+		BUG_ON(!ret && newid != id);
+		if (lock)
+			spin_unlock_irq(&rhp->lock);
+	} while (ret == -EAGAIN);
+
+	return ret;
+}
+
+static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
+				void *handle, u32 id)
+{
+	return _insert_handle(rhp, idr, handle, id, 1);
+}
+
+static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
+				       void *handle, u32 id)
+{
+	return _insert_handle(rhp, idr, handle, id, 0);
+}
+
+static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
+				   u32 id, int lock)
+{
+	if (lock)
+		spin_lock_irq(&rhp->lock);
+	idr_remove(idr, id);
+	if (lock)
+		spin_unlock_irq(&rhp->lock);
+}
+
+static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
+{
+	_remove_handle(rhp, idr, id, 1);
+}
+
+static inline void remove_handle_nolock(struct c4iw_dev *rhp,
+					 struct idr *idr, u32 id)
+{
+	_remove_handle(rhp, idr, id, 0);
+}
+
+struct c4iw_pd {
+	struct ib_pd ibpd;
+	u32 pdid;
+	struct c4iw_dev *rhp;
+};
+
+static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
+{
+	return container_of(ibpd, struct c4iw_pd, ibpd);
+}
+
+struct tpt_attributes {
+	u64 len;
+	u64 va_fbo;
+	enum fw_ri_mem_perms perms;
+	u32 stag;
+	u32 pdid;
+	u32 qpid;
+	u32 pbl_addr;
+	u32 pbl_size;
+	u32 state:1;
+	u32 type:2;
+	u32 rsvd:1;
+	u32 remote_invaliate_disable:1;
+	u32 zbva:1;
+	u32 mw_bind_enable:1;
+	u32 page_size:5;
+};
+
+struct c4iw_mr {
+	struct ib_mr ibmr;
+	struct ib_umem *umem;
+	struct c4iw_dev *rhp;
+	u64 kva;
+	struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
+{
+	return container_of(ibmr, struct c4iw_mr, ibmr);
+}
+
+struct c4iw_mw {
+	struct ib_mw ibmw;
+	struct c4iw_dev *rhp;
+	u64 kva;
+	struct tpt_attributes attr;
+};
+
+static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
+{
+	return container_of(ibmw, struct c4iw_mw, ibmw);
+}
+
+struct c4iw_fr_page_list {
+	struct ib_fast_reg_page_list ibpl;
+	DECLARE_PCI_UNMAP_ADDR(mapping);
+	dma_addr_t dma_addr;
+	struct c4iw_dev *dev;
+	int size;
+};
+
+static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
+					struct ib_fast_reg_page_list *ibpl)
+{
+	return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
+}
+
+struct c4iw_cq {
+	struct ib_cq ibcq;
+	struct c4iw_dev *rhp;
+	struct t4_cq cq;
+	spinlock_t lock;
+	spinlock_t comp_handler_lock;
+	atomic_t refcnt;
+	wait_queue_head_t wait;
+};
+
+static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
+{
+	return container_of(ibcq, struct c4iw_cq, ibcq);
+}
+
+struct c4iw_mpa_attributes {
+	u8 initiator;
+	u8 recv_marker_enabled;
+	u8 xmit_marker_enabled;
+	u8 crc_enabled;
+	u8 enhanced_rdma_conn;
+	u8 version;
+	u8 p2p_type;
+};
+
+struct c4iw_qp_attributes {
+	u32 scq;
+	u32 rcq;
+	u32 sq_num_entries;
+	u32 rq_num_entries;
+	u32 sq_max_sges;
+	u32 sq_max_sges_rdma_write;
+	u32 rq_max_sges;
+	u32 state;
+	u8 enable_rdma_read;
+	u8 enable_rdma_write;
+	u8 enable_bind;
+	u8 enable_mmid0_fastreg;
+	u32 max_ord;
+	u32 max_ird;
+	u32 pd;
+	u32 next_state;
+	char terminate_buffer[52];
+	u32 terminate_msg_len;
+	u8 is_terminate_local;
+	struct c4iw_mpa_attributes mpa_attr;
+	struct c4iw_ep *llp_stream_handle;
+	u8 layer_etype;
+	u8 ecode;
+	u16 sq_db_inc;
+	u16 rq_db_inc;
+};
+
+struct c4iw_qp {
+	struct ib_qp ibqp;
+	struct c4iw_dev *rhp;
+	struct c4iw_ep *ep;
+	struct c4iw_qp_attributes attr;
+	struct t4_wq wq;
+	spinlock_t lock;
+	struct mutex mutex;
+	atomic_t refcnt;
+	wait_queue_head_t wait;
+	struct timer_list timer;
+	int sq_sig_all;
+};
+
+static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+{
+	return container_of(ibqp, struct c4iw_qp, ibqp);
+}
+
+struct c4iw_ucontext {
+	struct ib_ucontext ibucontext;
+	struct c4iw_dev_ucontext uctx;
+	u32 key;
+	spinlock_t mmap_lock;
+	struct list_head mmaps;
+};
+
+static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
+{
+	return container_of(c, struct c4iw_ucontext, ibucontext);
+}
+
+struct c4iw_mm_entry {
+	struct list_head entry;
+	u64 addr;
+	u32 key;
+	unsigned len;
+};
+
+static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
+						u32 key, unsigned len)
+{
+	struct list_head *pos, *nxt;
+	struct c4iw_mm_entry *mm;
+
+	spin_lock(&ucontext->mmap_lock);
+	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
+
+		mm = list_entry(pos, struct c4iw_mm_entry, entry);
+		if (mm->key == key && mm->len == len) {
+			list_del_init(&mm->entry);
+			spin_unlock(&ucontext->mmap_lock);
+			CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d",
+			     __func__, key, (unsigned long long) mm->addr,
+			     mm->len);
+			return mm;
+		}
+	}
+	spin_unlock(&ucontext->mmap_lock);
+	return NULL;
+}
+
+static inline void insert_mmap(struct c4iw_ucontext *ucontext,
+			       struct c4iw_mm_entry *mm)
+{
+	spin_lock(&ucontext->mmap_lock);
+	CTR4(KTR_IW_CXGBE, "%s key 0x%x addr 0x%llx len %d", __func__, mm->key,
+	    (unsigned long long) mm->addr, mm->len);
+	list_add_tail(&mm->entry, &ucontext->mmaps);
+	spin_unlock(&ucontext->mmap_lock);
+}
+
+enum c4iw_qp_attr_mask {
+	C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
+	C4IW_QP_ATTR_SQ_DB = 1<<1,
+	C4IW_QP_ATTR_RQ_DB = 1<<2,
+	C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
+	C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
+	C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
+	C4IW_QP_ATTR_MAX_ORD = 1 << 11,
+	C4IW_QP_ATTR_MAX_IRD = 1 << 12,
+	C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
+	C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
+	C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
+	C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
+	C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
+				     C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+				     C4IW_QP_ATTR_MAX_ORD |
+				     C4IW_QP_ATTR_MAX_IRD |
+				     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
+				     C4IW_QP_ATTR_STREAM_MSG_BUFFER |
+				     C4IW_QP_ATTR_MPA_ATTR |
+				     C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
+};
+
+int c4iw_modify_qp(struct c4iw_dev *rhp,
+				struct c4iw_qp *qhp,
+				enum c4iw_qp_attr_mask mask,
+				struct c4iw_qp_attributes *attrs,
+				int internal);
+
+enum c4iw_qp_state {
+	C4IW_QP_STATE_IDLE,
+	C4IW_QP_STATE_RTS,
+	C4IW_QP_STATE_ERROR,
+	C4IW_QP_STATE_TERMINATE,
+	C4IW_QP_STATE_CLOSING,
+	C4IW_QP_STATE_TOT
+};
+
+/*
+ * IW_CXGBE event bits.
+ * These bits are used for handling all events for a particular 'ep' serially.
+ */
+#define	C4IW_EVENT_SOCKET	0x0001
+#define	C4IW_EVENT_TIMEOUT	0x0002
+#define	C4IW_EVENT_TERM		0x0004
+
+static inline int c4iw_convert_state(enum ib_qp_state ib_state)
+{
+	switch (ib_state) {
+	case IB_QPS_RESET:
+	case IB_QPS_INIT:
+		return C4IW_QP_STATE_IDLE;
+	case IB_QPS_RTS:
+		return C4IW_QP_STATE_RTS;
+	case IB_QPS_SQD:
+		return C4IW_QP_STATE_CLOSING;
+	case IB_QPS_SQE:
+		return C4IW_QP_STATE_TERMINATE;
+	case IB_QPS_ERR:
+		return C4IW_QP_STATE_ERROR;
+	default:
+		return -1;
+	}
+}
+
+static inline int to_ib_qp_state(int c4iw_qp_state)
+{
+	switch (c4iw_qp_state) {
+	case C4IW_QP_STATE_IDLE:
+		return IB_QPS_INIT;
+	case C4IW_QP_STATE_RTS:
+		return IB_QPS_RTS;
+	case C4IW_QP_STATE_CLOSING:
+		return IB_QPS_SQD;
+	case C4IW_QP_STATE_TERMINATE:
+		return IB_QPS_SQE;
+	case C4IW_QP_STATE_ERROR:
+		return IB_QPS_ERR;
+	}
+	return IB_QPS_ERR;
+}
+
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
+static inline u32 c4iw_ib_to_tpt_access(int a)
+{
+	return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+	       (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
+	       (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
+	       FW_RI_MEM_ACCESS_LOCAL_READ;
+}
+
+static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
+{
+	return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
+	       (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
+}
+
+enum c4iw_mmid_state {
+	C4IW_STAG_STATE_VALID,
+	C4IW_STAG_STATE_INVALID
+};
+
+#define C4IW_NODE_DESC "iw_cxgbe Chelsio Communications"
+
+#define MPA_KEY_REQ "MPA ID Req Frame"
+#define MPA_KEY_REP "MPA ID Rep Frame"
+
+#define MPA_MAX_PRIVATE_DATA	256
+#define MPA_ENHANCED_RDMA_CONN	0x10
+#define MPA_REJECT		0x20
+#define MPA_CRC			0x40
+#define MPA_MARKERS		0x80
+#define MPA_FLAGS_MASK		0xE0
+
+#define MPA_V2_PEER2PEER_MODEL          0x8000
+#define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
+#define MPA_V2_RDMA_WRITE_RTR           0x8000
+#define MPA_V2_RDMA_READ_RTR            0x4000
+#define MPA_V2_IRD_ORD_MASK             0x3FFF
+
+#define c4iw_put_ep(ep) { \
+	CTR4(KTR_IW_CXGBE, "put_ep (%s:%u) ep %p, refcnt %d", \
+	     __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \
+	WARN_ON(atomic_read(&(ep)->kref.refcount) < 1); \
+        kref_put(&((ep)->kref), _c4iw_free_ep); \
+}
+
+#define c4iw_get_ep(ep) { \
+	CTR4(KTR_IW_CXGBE, "get_ep (%s:%u) ep %p, refcnt %d", \
+	      __func__, __LINE__, ep, atomic_read(&(ep)->kref.refcount)); \
+        kref_get(&((ep)->kref));  \
+}
+
+void _c4iw_free_ep(struct kref *kref);
+
+struct mpa_message {
+	u8 key[16];
+	u8 flags;
+	u8 revision;
+	__be16 private_data_size;
+	u8 private_data[0];
+};
+
+struct mpa_v2_conn_params {
+	__be16 ird;
+	__be16 ord;
+};
+
+struct terminate_message {
+	u8 layer_etype;
+	u8 ecode;
+	__be16 hdrct_rsvd;
+	u8 len_hdrs[0];
+};
+
+#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
+
+enum c4iw_layers_types {
+	LAYER_RDMAP		= 0x00,
+	LAYER_DDP		= 0x10,
+	LAYER_MPA		= 0x20,
+	RDMAP_LOCAL_CATA	= 0x00,
+	RDMAP_REMOTE_PROT	= 0x01,
+	RDMAP_REMOTE_OP		= 0x02,
+	DDP_LOCAL_CATA		= 0x00,
+	DDP_TAGGED_ERR		= 0x01,
+	DDP_UNTAGGED_ERR	= 0x02,
+	DDP_LLP			= 0x03
+};
+
+enum c4iw_rdma_ecodes {
+	RDMAP_INV_STAG		= 0x00,
+	RDMAP_BASE_BOUNDS	= 0x01,
+	RDMAP_ACC_VIOL		= 0x02,
+	RDMAP_STAG_NOT_ASSOC	= 0x03,
+	RDMAP_TO_WRAP		= 0x04,
+	RDMAP_INV_VERS		= 0x05,
+	RDMAP_INV_OPCODE	= 0x06,
+	RDMAP_STREAM_CATA	= 0x07,
+	RDMAP_GLOBAL_CATA	= 0x08,
+	RDMAP_CANT_INV_STAG	= 0x09,
+	RDMAP_UNSPECIFIED	= 0xff
+};
+
+enum c4iw_ddp_ecodes {
+	DDPT_INV_STAG		= 0x00,
+	DDPT_BASE_BOUNDS	= 0x01,
+	DDPT_STAG_NOT_ASSOC	= 0x02,
+	DDPT_TO_WRAP		= 0x03,
+	DDPT_INV_VERS		= 0x04,
+	DDPU_INV_QN		= 0x01,
+	DDPU_INV_MSN_NOBUF	= 0x02,
+	DDPU_INV_MSN_RANGE	= 0x03,
+	DDPU_INV_MO		= 0x04,
+	DDPU_MSG_TOOBIG		= 0x05,
+	DDPU_INV_VERS		= 0x06
+};
+
+enum c4iw_mpa_ecodes {
+	MPA_CRC_ERR		= 0x02,
+	MPA_MARKER_ERR		= 0x03,
+	MPA_LOCAL_CATA          = 0x05,
+	MPA_INSUFF_IRD          = 0x06,
+	MPA_NOMATCH_RTR         = 0x07,
+};
+
+enum c4iw_ep_state {
+	IDLE = 0,
+	LISTEN,
+	CONNECTING,
+	MPA_REQ_WAIT,
+	MPA_REQ_SENT,
+	MPA_REQ_RCVD,
+	MPA_REP_SENT,
+	FPDU_MODE,
+	ABORTING,
+	CLOSING,
+	MORIBUND,
+	DEAD,
+};
+
+enum c4iw_ep_flags {
+	PEER_ABORT_IN_PROGRESS	= 0,
+	ABORT_REQ_IN_PROGRESS	= 1,
+	RELEASE_RESOURCES	= 2,
+	CLOSE_SENT		= 3,
+	TIMEOUT                 = 4,
+	QP_REFERENCED		= 5
+};
+
+enum c4iw_ep_history {
+        ACT_OPEN_REQ            = 0,
+        ACT_OFLD_CONN           = 1,
+        ACT_OPEN_RPL            = 2,
+        ACT_ESTAB               = 3,
+        PASS_ACCEPT_REQ         = 4,
+        PASS_ESTAB              = 5,
+        ABORT_UPCALL            = 6,
+        ESTAB_UPCALL            = 7,
+        CLOSE_UPCALL            = 8,
+        ULP_ACCEPT              = 9,
+        ULP_REJECT              = 10,
+        TIMEDOUT                = 11,
+        PEER_ABORT              = 12,
+        PEER_CLOSE              = 13,
+        CONNREQ_UPCALL          = 14,
+        ABORT_CONN              = 15,
+        DISCONN_UPCALL          = 16,
+        EP_DISC_CLOSE           = 17,
+        EP_DISC_ABORT           = 18,
+        CONN_RPL_UPCALL         = 19,
+        ACT_RETRY_NOMEM         = 20,
+        ACT_RETRY_INUSE         = 21,
+        CLOSE_CON_RPL           = 22,
+        EP_DISC_FAIL            = 24,
+        QP_REFED                = 25,
+        QP_DEREFED              = 26,
+        CM_ID_REFED             = 27,
+        CM_ID_DEREFED           = 28
+};
+
+struct c4iw_ep_common {
+	TAILQ_ENTRY(c4iw_ep_common) entry;	/* Work queue attachment */
+	struct iw_cm_id *cm_id;
+	struct c4iw_qp *qp;
+	struct c4iw_dev *dev;
+	enum c4iw_ep_state state;
+	struct kref kref;
+	struct mutex mutex;
+	struct sockaddr_in local_addr;
+	struct sockaddr_in remote_addr;
+	struct c4iw_wr_wait wr_wait;
+	unsigned long flags;
+	unsigned long history;
+        int rpl_err;
+        int rpl_done;
+        struct thread *thread;
+        struct socket *so;
+	int ep_events;
+};
+
+struct c4iw_listen_ep {
+	struct c4iw_ep_common com;
+	unsigned int stid;
+	int backlog;
+};
+
+struct c4iw_ep {
+	struct c4iw_ep_common com;
+	struct c4iw_ep *parent_ep;
+	struct timer_list timer;
+	unsigned int atid;
+	u32 hwtid;
+	u32 snd_seq;
+	u32 rcv_seq;
+	struct l2t_entry *l2t;
+	struct dst_entry *dst;
+	struct c4iw_mpa_attributes mpa_attr;
+	u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
+	unsigned int mpa_pkt_len;
+	u32 ird;
+	u32 ord;
+	u32 smac_idx;
+	u32 tx_chan;
+	u32 mtu;
+	u16 mss;
+	u16 emss;
+	u16 plen;
+	u16 rss_qid;
+	u16 txq_idx;
+	u16 ctrlq_idx;
+	u8 tos;
+	u8 retry_with_mpa_v1;
+	u8 tried_with_mpa_v1;
+};
+
+static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+{
+	return cm_id->provider_data;
+}
+
+static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
+{
+	return cm_id->provider_data;
+}
+
+static inline int compute_wscale(int win)
+{
+	int wscale = 0;
+
+	while (wscale < 14 && (65535<<wscale) < win)
+		wscale++;
+	return wscale;
+}
+
+u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
+void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
+int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
+			u32 reserved, u32 flags);
+void c4iw_id_table_free(struct c4iw_id_table *alloc);
+
+typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
+
+int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
+		     struct l2t_entry *l2t);
+u32 c4iw_get_resource(struct c4iw_id_table *id_table);
+void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_pblpool_create(struct c4iw_rdev *rdev);
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
+void c4iw_destroy_resource(struct c4iw_resource *rscp);
+int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
+int c4iw_register_device(struct c4iw_dev *dev);
+void c4iw_unregister_device(struct c4iw_dev *dev);
+int __init c4iw_cm_init(void);
+void __exit c4iw_cm_term(void);
+void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
+			       struct c4iw_dev_ucontext *uctx);
+void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
+			    struct c4iw_dev_ucontext *uctx);
+int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		      struct ib_send_wr **bad_wr);
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		      struct ib_recv_wr **bad_wr);
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
+		 struct ib_mw_bind *mw_bind);
+int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog);
+void c4iw_destroy_listen_ep(struct iw_cm_id *cm_id);
+int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+void c4iw_qp_add_ref(struct ib_qp *qp);
+void c4iw_qp_rem_ref(struct ib_qp *qp);
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
+					struct ib_device *device,
+					int page_list_len);
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
+int c4iw_dealloc_mw(struct ib_mw *mw);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd);
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64
+    virt, int acc, struct ib_udata *udata, int mr_id);
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+					struct ib_phys_buf *buffer_list,
+					int num_phys_buf,
+					int acc,
+					u64 *iova_start);
+int c4iw_reregister_phys_mem(struct ib_mr *mr,
+				     int mr_rereg_mask,
+				     struct ib_pd *pd,
+				     struct ib_phys_buf *buffer_list,
+				     int num_phys_buf,
+				     int acc, u64 *iova_start);
+int c4iw_dereg_mr(struct ib_mr *ib_mr);
+int c4iw_destroy_cq(struct ib_cq *ib_cq);
+struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
+					int vector,
+					struct ib_ucontext *ib_context,
+					struct ib_udata *udata);
+int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int c4iw_destroy_qp(struct ib_qp *ib_qp);
+struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
+			     struct ib_qp_init_attr *attrs,
+			     struct ib_udata *udata);
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+				 int attr_mask, struct ib_udata *udata);
+int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_qp_init_attr *init_attr);
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
+int c4iw_ofld_send(struct c4iw_rdev *rdev, struct mbuf *m);
+void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
+int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
+int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_ev_handler(struct sge_iq *, const struct rsp_ctrl *);
+u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
+int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+		struct c4iw_dev_ucontext *uctx);
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+		struct c4iw_dev_ucontext *uctx);
+void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
+void process_newconn(struct iw_cm_id *parent_cm_id,
+		struct socket *child_so);
+
+extern struct cxgb4_client t4c_client;
+extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
+extern int c4iw_max_read_depth;
+
+#if defined(__i386__) || defined(__amd64__)
+#define L1_CACHE_BYTES 128
+#else
+#define L1_CACHE_BYTES 32
+#endif
+
+static inline
+int idr_for_each(struct idr *idp,
+                 int (*fn)(int id, void *p, void *data), void *data)
+{
+        int n, id, max, error = 0;
+        struct idr_layer *p;
+        struct idr_layer *pa[MAX_LEVEL];
+        struct idr_layer **paa = &pa[0];
+
+        n = idp->layers * IDR_BITS;
+        p = idp->top;
+        max = 1 << n;
+
+        id = 0;
+        while (id < max) {
+                while (n > 0 && p) {
+                        n -= IDR_BITS;
+                        *paa++ = p;
+                        p = p->ary[(id >> n) & IDR_MASK];
+                }
+
+                if (p) {
+                        error = fn(id, (void *)p, data);
+                        if (error)
+                                break;
+                }
+
+                id += 1 << n;
+                while (n < fls(id)) {
+                        n += IDR_BITS;
+                        p = *--paa;
+                }
+        }
+
+        return error;
+}
+
+void your_reg_device(struct c4iw_dev *dev);
+
+#define SGE_CTRLQ_NUM	0
+
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/mem.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/mem.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/mem.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,852 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/mem.c 318799 2017-05-24 18:16:20Z np $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <rdma/ib_umem.h>
+#include <asm/atomic.h>
+
+#include <common/t4_msg.h>
+#include "iw_cxgbe.h"
+
+#define T4_ULPTX_MIN_IO 32
+#define C4IW_MAX_INLINE_SIZE 96
+
+static int
+mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
+{
+
+	return ((is_t4(dev->rdev.adap) ||
+		is_t5(dev->rdev.adap)) &&
+		length >= 8*1024*1024*1024ULL);
+}
+
+static int
+write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+{
+	struct adapter *sc = rdev->adap;
+	struct ulp_mem_io *ulpmc;
+	struct ulptx_idata *ulpsc;
+	u8 wr_len, *to_dp, *from_dp;
+	int copy_len, num_wqe, i, ret = 0;
+	struct c4iw_wr_wait wr_wait;
+	struct wrqe *wr;
+	u32 cmd;
+
+	cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
+	if (is_t4(sc))
+		cmd |= cpu_to_be32(F_ULP_MEMIO_ORDER);
+	else
+		cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
+
+	addr &= 0x7FFFFFF;
+	CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
+	num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
+	c4iw_init_wr_wait(&wr_wait);
+	for (i = 0; i < num_wqe; i++) {
+
+		copy_len = min(len, C4IW_MAX_INLINE_SIZE);
+		wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
+				 roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+
+		wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+		if (wr == NULL)
+			return (0);
+		ulpmc = wrtod(wr);
+
+		memset(ulpmc, 0, wr_len);
+		INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
+
+		if (i == (num_wqe-1)) {
+			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
+						    F_FW_WR_COMPL);
+			ulpmc->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+		} else
+			ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
+		ulpmc->wr.wr_mid = cpu_to_be32(
+				       V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
+
+		ulpmc->cmd = cmd;
+		ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
+		    DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
+		ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
+						      16));
+		ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
+
+		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+		ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+		ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
+
+		to_dp = (u8 *)(ulpsc + 1);
+		from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
+		if (data)
+			memcpy(to_dp, from_dp, copy_len);
+		else
+			memset(to_dp, 0, copy_len);
+		if (copy_len % T4_ULPTX_MIN_IO)
+			memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
+			       (copy_len % T4_ULPTX_MIN_IO));
+		t4_wrq_tx(sc, wr);
+		len -= C4IW_MAX_INLINE_SIZE;
+	}
+
+	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
+	return ret;
+}
+
+/*
+ * Build and write a TPT entry.
+ * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
+ *     pbl_size and pbl_addr
+ * OUT: stag index
+ */
+static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
+			   u32 *stag, u8 stag_state, u32 pdid,
+			   enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
+			   int bind_enabled, u32 zbva, u64 to,
+			   u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr)
+{
+	int err;
+	struct fw_ri_tpte tpt;
+	u32 stag_idx;
+	static atomic_t key;
+
+	if (c4iw_fatal_error(rdev))
+		return -EIO;
+
+	stag_state = stag_state > 0;
+	stag_idx = (*stag) >> 8;
+
+	if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
+		stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
+		if (!stag_idx) {
+			mutex_lock(&rdev->stats.lock);
+			rdev->stats.stag.fail++;
+			mutex_unlock(&rdev->stats.lock);
+			return -ENOMEM;
+		}
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.stag.cur += 32;
+		if (rdev->stats.stag.cur > rdev->stats.stag.max)
+			rdev->stats.stag.max = rdev->stats.stag.cur;
+		mutex_unlock(&rdev->stats.lock);
+		*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
+	}
+	CTR5(KTR_IW_CXGBE,
+	    "%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x",
+	    __func__, stag_state, type, pdid, stag_idx);
+
+	/* write TPT entry */
+	if (reset_tpt_entry)
+		memset(&tpt, 0, sizeof(tpt));
+	else {
+		tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID |
+			V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) |
+			V_FW_RI_TPTE_STAGSTATE(stag_state) |
+			V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid));
+		tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) |
+			(bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) |
+			V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO :
+						      FW_RI_VA_BASED_TO))|
+			V_FW_RI_TPTE_PS(page_size));
+		tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+			V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3));
+		tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+		tpt.va_hi = cpu_to_be32((u32)(to >> 32));
+		tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+		tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
+		tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+	}
+	err = write_adapter_mem(rdev, stag_idx +
+				(rdev->adap->vres.stag.start >> 5),
+				sizeof(tpt), &tpt);
+
+	if (reset_tpt_entry) {
+		c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.stag.cur -= 32;
+		mutex_unlock(&rdev->stats.lock);
+	}
+	return err;
+}
+
+static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
+		     u32 pbl_addr, u32 pbl_size)
+{
+	int err;
+
+	CTR4(KTR_IW_CXGBE, "%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d",
+	     __func__, pbl_addr, rdev->adap->vres.pbl.start, pbl_size);
+
+	err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl);
+	return err;
+}
+
+static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
+		     u32 pbl_addr)
+{
+	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
+			       pbl_size, pbl_addr);
+}
+
+static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
+{
+	*stag = T4_STAG_UNSET;
+	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
+			       0UL, 0, 0, 0, 0);
+}
+
+static int deallocate_window(struct c4iw_rdev *rdev, u32 stag)
+{
+	return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
+			       0);
+}
+
+static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
+			 u32 pbl_size, u32 pbl_addr)
+{
+	*stag = T4_STAG_UNSET;
+	return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
+			       0UL, 0, 0, pbl_size, pbl_addr);
+}
+
+static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
+{
+	u32 mmid;
+
+	mhp->attr.state = 1;
+	mhp->attr.stag = stag;
+	mmid = stag >> 8;
+	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
+	return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
+}
+
+static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+		      struct c4iw_mr *mhp, int shift)
+{
+	u32 stag = T4_STAG_UNSET;
+	int ret;
+
+	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+			      FW_RI_STAG_NSMR, mhp->attr.len ? mhp->attr.perms : 0,
+			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
+			      mhp->attr.va_fbo, mhp->attr.len ? mhp->attr.len : -1, shift - 12,
+			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
+	if (ret)
+		return ret;
+
+	ret = finish_mem_reg(mhp, stag);
+	if (ret)
+		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+		       mhp->attr.pbl_addr);
+	return ret;
+}
+
+static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
+			  struct c4iw_mr *mhp, int shift, int npages)
+{
+	u32 stag;
+	int ret;
+
+	if (npages > mhp->attr.pbl_size)
+		return -ENOMEM;
+
+	stag = mhp->attr.stag;
+	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
+			      FW_RI_STAG_NSMR, mhp->attr.perms,
+			      mhp->attr.mw_bind_enable, mhp->attr.zbva,
+			      mhp->attr.va_fbo, mhp->attr.len, shift - 12,
+			      mhp->attr.pbl_size, mhp->attr.pbl_addr);
+	if (ret)
+		return ret;
+
+	ret = finish_mem_reg(mhp, stag);
+	if (ret)
+		dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+		       mhp->attr.pbl_addr);
+
+	return ret;
+}
+
+static int alloc_pbl(struct c4iw_mr *mhp, int npages)
+{
+	mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
+						    npages << 3);
+
+	if (!mhp->attr.pbl_addr)
+		return -ENOMEM;
+
+	mhp->attr.pbl_size = npages;
+
+	return 0;
+}
+
+static int build_phys_page_list(struct ib_phys_buf *buffer_list,
+				int num_phys_buf, u64 *iova_start,
+				u64 *total_size, int *npages,
+				int *shift, __be64 **page_list)
+{
+	u64 mask;
+	int i, j, n;
+
+	mask = 0;
+	*total_size = 0;
+	for (i = 0; i < num_phys_buf; ++i) {
+		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
+			return -EINVAL;
+		if (i != 0 && i != num_phys_buf - 1 &&
+		    (buffer_list[i].size & ~PAGE_MASK))
+			return -EINVAL;
+		*total_size += buffer_list[i].size;
+		if (i > 0)
+			mask |= buffer_list[i].addr;
+		else
+			mask |= buffer_list[i].addr & PAGE_MASK;
+		if (i != num_phys_buf - 1)
+			mask |= buffer_list[i].addr + buffer_list[i].size;
+		else
+			mask |= (buffer_list[i].addr + buffer_list[i].size +
+				PAGE_SIZE - 1) & PAGE_MASK;
+	}
+
+	/* Find largest page shift we can use to cover buffers */
+	for (*shift = PAGE_SHIFT; *shift < PAGE_SHIFT + M_FW_RI_TPTE_PS;
+	    ++(*shift))
+		if ((1ULL << *shift) & mask)
+			break;
+
+	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
+	buffer_list[0].addr &= ~0ull << *shift;
+
+	*npages = 0;
+	for (i = 0; i < num_phys_buf; ++i)
+		*npages += (buffer_list[i].size +
+			(1ULL << *shift) - 1) >> *shift;
+
+	if (!*npages)
+		return -EINVAL;
+
+	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
+	if (!*page_list)
+		return -ENOMEM;
+
+	n = 0;
+	for (i = 0; i < num_phys_buf; ++i)
+		for (j = 0;
+		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
+		     ++j)
+			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
+			    ((u64) j << *shift));
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d", __func__,
+	    (unsigned long long)*iova_start, (unsigned long long)mask, *shift,
+	    (unsigned long long)*total_size, *npages);
+
+	return 0;
+
+}
+
+int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask,
+			     struct ib_pd *pd, struct ib_phys_buf *buffer_list,
+			     int num_phys_buf, int acc, u64 *iova_start)
+{
+
+	struct c4iw_mr mh, *mhp;
+	struct c4iw_pd *php;
+	struct c4iw_dev *rhp;
+	__be64 *page_list = NULL;
+	int shift = 0;
+	u64 total_size = 0;
+	int npages = 0;
+	int ret;
+
+	CTR3(KTR_IW_CXGBE, "%s ib_mr %p ib_pd %p", __func__, mr, pd);
+
+	/* There can be no memory windows */
+	if (atomic_read(&mr->usecnt))
+		return -EINVAL;
+
+	mhp = to_c4iw_mr(mr);
+	rhp = mhp->rhp;
+	php = to_c4iw_pd(mr->pd);
+
+	/* make sure we are on the same adapter */
+	if (rhp != php->rhp)
+		return -EINVAL;
+
+	memcpy(&mh, mhp, sizeof *mhp);
+
+	if (mr_rereg_mask & IB_MR_REREG_PD)
+		php = to_c4iw_pd(pd);
+	if (mr_rereg_mask & IB_MR_REREG_ACCESS) {
+		mh.attr.perms = c4iw_ib_to_tpt_access(acc);
+		mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) ==
+					 IB_ACCESS_MW_BIND;
+	}
+	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+		ret = build_phys_page_list(buffer_list, num_phys_buf,
+						iova_start,
+						&total_size, &npages,
+						&shift, &page_list);
+		if (ret)
+			return ret;
+	}
+	if (mr_exceeds_hw_limits(rhp, total_size)) {
+		kfree(page_list);
+		return -EINVAL;
+	}
+	ret = reregister_mem(rhp, php, &mh, shift, npages);
+	kfree(page_list);
+	if (ret)
+		return ret;
+	if (mr_rereg_mask & IB_MR_REREG_PD)
+		mhp->attr.pdid = php->pdid;
+	if (mr_rereg_mask & IB_MR_REREG_ACCESS)
+		mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+	if (mr_rereg_mask & IB_MR_REREG_TRANS) {
+		mhp->attr.zbva = 0;
+		mhp->attr.va_fbo = *iova_start;
+		mhp->attr.page_size = shift - 12;
+		mhp->attr.len = total_size;
+		mhp->attr.pbl_size = npages;
+	}
+
+	return 0;
+}
+
+struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
+				     struct ib_phys_buf *buffer_list,
+				     int num_phys_buf, int acc, u64 *iova_start)
+{
+	__be64 *page_list;
+	int shift;
+	u64 total_size;
+	int npages;
+	struct c4iw_dev *rhp;
+	struct c4iw_pd *php;
+	struct c4iw_mr *mhp;
+	int ret;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+
+	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+	if (!mhp)
+		return ERR_PTR(-ENOMEM);
+
+	mhp->rhp = rhp;
+
+	/* First check that we have enough alignment */
+	if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (num_phys_buf > 1 &&
+	    ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start,
+					&total_size, &npages, &shift,
+					&page_list);
+	if (ret)
+		goto err;
+
+	if (mr_exceeds_hw_limits(rhp, total_size)) {
+		kfree(page_list);
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = alloc_pbl(mhp, npages);
+	if (ret) {
+		kfree(page_list);
+		goto err;
+	}
+
+	ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr,
+			     npages);
+	kfree(page_list);
+	if (ret)
+		goto err_pbl;
+
+	mhp->attr.pdid = php->pdid;
+	mhp->attr.zbva = 0;
+
+	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+	mhp->attr.va_fbo = *iova_start;
+	mhp->attr.page_size = shift - 12;
+
+	mhp->attr.len = total_size;
+	mhp->attr.pbl_size = npages;
+	ret = register_mem(rhp, php, mhp, shift);
+	if (ret)
+		goto err_pbl;
+
+	return &mhp->ibmr;
+
+err_pbl:
+	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+			      mhp->attr.pbl_size << 3);
+
+err:
+	kfree(mhp);
+	return ERR_PTR(ret);
+
+}
+
+struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_pd *php;
+	struct c4iw_mr *mhp;
+	int ret;
+	u32 stag = T4_STAG_UNSET;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+
+	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+	if (!mhp)
+		return ERR_PTR(-ENOMEM);
+
+	mhp->rhp = rhp;
+	mhp->attr.pdid = php->pdid;
+	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+	mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
+	mhp->attr.zbva = 0;
+	mhp->attr.va_fbo = 0;
+	mhp->attr.page_size = 0;
+	mhp->attr.len = ~0UL;
+	mhp->attr.pbl_size = 0;
+
+	ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
+			      FW_RI_STAG_NSMR, mhp->attr.perms,
+			      mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+	if (ret)
+		goto err1;
+
+	ret = finish_mem_reg(mhp, stag);
+	if (ret)
+		goto err2;
+	return &mhp->ibmr;
+err2:
+	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+		  mhp->attr.pbl_addr);
+err1:
+	kfree(mhp);
+	return ERR_PTR(ret);
+}
+
+struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+    u64 virt, int acc, struct ib_udata *udata, int mr_id)
+{
+	__be64 *pages;
+	int shift, n, len;
+	int i, j, k;
+	int err = 0;
+	struct ib_umem_chunk *chunk;
+	struct c4iw_dev *rhp;
+	struct c4iw_pd *php;
+	struct c4iw_mr *mhp;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
+
+	if (length == ~0ULL)
+		return ERR_PTR(-EINVAL);
+
+	if ((length + start) < start)
+		return ERR_PTR(-EINVAL);
+
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+
+	if (mr_exceeds_hw_limits(rhp, length))
+		return ERR_PTR(-EINVAL);
+
+	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+	if (!mhp)
+		return ERR_PTR(-ENOMEM);
+
+	mhp->rhp = rhp;
+
+	mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+	if (IS_ERR(mhp->umem)) {
+		err = PTR_ERR(mhp->umem);
+		kfree(mhp);
+		return ERR_PTR(err);
+	}
+
+	shift = ffs(mhp->umem->page_size) - 1;
+
+	n = 0;
+	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+		n += chunk->nents;
+
+	err = alloc_pbl(mhp, n);
+	if (err)
+		goto err;
+
+	pages = (__be64 *) __get_free_page(GFP_KERNEL);
+	if (!pages) {
+		err = -ENOMEM;
+		goto err_pbl;
+	}
+
+	i = n = 0;
+
+	list_for_each_entry(chunk, &mhp->umem->chunk_list, list)
+		for (j = 0; j < chunk->nmap; ++j) {
+			len = sg_dma_len(&chunk->page_list[j]) >> shift;
+			for (k = 0; k < len; ++k) {
+				pages[i++] = cpu_to_be64(sg_dma_address(
+					&chunk->page_list[j]) +
+					mhp->umem->page_size * k);
+				if (i == PAGE_SIZE / sizeof *pages) {
+					err = write_pbl(&mhp->rhp->rdev,
+					      pages,
+					      mhp->attr.pbl_addr + (n << 3), i);
+					if (err)
+						goto pbl_done;
+					n += i;
+					i = 0;
+				}
+			}
+		}
+
+	if (i)
+		err = write_pbl(&mhp->rhp->rdev, pages,
+				     mhp->attr.pbl_addr + (n << 3), i);
+
+pbl_done:
+	free_page((unsigned long) pages);
+	if (err)
+		goto err_pbl;
+
+	mhp->attr.pdid = php->pdid;
+	mhp->attr.zbva = 0;
+	mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
+	mhp->attr.va_fbo = virt;
+	mhp->attr.page_size = shift - 12;
+	mhp->attr.len = length;
+
+	err = register_mem(rhp, php, mhp, shift);
+	if (err)
+		goto err_pbl;
+
+	return &mhp->ibmr;
+
+err_pbl:
+	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+			      mhp->attr.pbl_size << 3);
+
+err:
+	ib_umem_release(mhp->umem);
+	kfree(mhp);
+	return ERR_PTR(err);
+}
+
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_pd *php;
+	struct c4iw_mw *mhp;
+	u32 mmid;
+	u32 stag = 0;
+	int ret;
+
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+	if (!mhp)
+		return ERR_PTR(-ENOMEM);
+	ret = allocate_window(&rhp->rdev, &stag, php->pdid);
+	if (ret) {
+		kfree(mhp);
+		return ERR_PTR(ret);
+	}
+	mhp->rhp = rhp;
+	mhp->attr.pdid = php->pdid;
+	mhp->attr.type = FW_RI_STAG_MW;
+	mhp->attr.stag = stag;
+	mmid = (stag) >> 8;
+	mhp->ibmw.rkey = stag;
+	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+		deallocate_window(&rhp->rdev, mhp->attr.stag);
+		kfree(mhp);
+		return ERR_PTR(-ENOMEM);
+	}
+	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
+	    stag);
+	return &(mhp->ibmw);
+}
+
+int c4iw_dealloc_mw(struct ib_mw *mw)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_mw *mhp;
+	u32 mmid;
+
+	mhp = to_c4iw_mw(mw);
+	rhp = mhp->rhp;
+	mmid = (mw->rkey) >> 8;
+	remove_handle(rhp, &rhp->mmidr, mmid);
+	deallocate_window(&rhp->rdev, mhp->attr.stag);
+	kfree(mhp);
+	CTR4(KTR_IW_CXGBE, "%s ib_mw %p mmid 0x%x ptr %p", __func__, mw, mmid,
+	    mhp);
+	return 0;
+}
+
+struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_pd *php;
+	struct c4iw_mr *mhp;
+	u32 mmid;
+	u32 stag = 0;
+	int ret = 0;
+
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
+	if (!mhp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mhp->rhp = rhp;
+	ret = alloc_pbl(mhp, pbl_depth);
+	if (ret)
+		goto err1;
+	mhp->attr.pbl_size = pbl_depth;
+	ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
+				 mhp->attr.pbl_size, mhp->attr.pbl_addr);
+	if (ret)
+		goto err2;
+	mhp->attr.pdid = php->pdid;
+	mhp->attr.type = FW_RI_STAG_NSMR;
+	mhp->attr.stag = stag;
+	mhp->attr.state = 1;
+	mmid = (stag) >> 8;
+	mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
+	if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+		ret = -ENOMEM;
+		goto err3;
+	}
+
+	CTR4(KTR_IW_CXGBE, "%s mmid 0x%x mhp %p stag 0x%x", __func__, mmid, mhp,
+	    stag);
+	return &(mhp->ibmr);
+err3:
+	dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+		       mhp->attr.pbl_addr);
+err2:
+	c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+			      mhp->attr.pbl_size << 3);
+err1:
+	kfree(mhp);
+err:
+	return ERR_PTR(ret);
+}
+
+struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
+						     int page_list_len)
+{
+	struct c4iw_fr_page_list *c4pl;
+	struct c4iw_dev *dev = to_c4iw_dev(device);
+	bus_addr_t dma_addr;
+	int size = sizeof *c4pl + page_list_len * sizeof(u64);
+
+	c4pl = contigmalloc(size,
+            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
+        if (c4pl)
+                dma_addr = vtophys(c4pl);
+        else
+                return ERR_PTR(-ENOMEM);;
+
+	pci_unmap_addr_set(c4pl, mapping, dma_addr);
+	c4pl->dma_addr = dma_addr;
+	c4pl->dev = dev;
+	c4pl->size = size;
+	c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
+	c4pl->ibpl.max_page_list_len = page_list_len;
+
+	return &c4pl->ibpl;
+}
+
+void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
+{
+	struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
+	contigfree(c4pl, c4pl->size, M_DEVBUF);
+}
+
+int c4iw_dereg_mr(struct ib_mr *ib_mr)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_mr *mhp;
+	u32 mmid;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_mr %p", __func__, ib_mr);
+	/* There can be no memory windows */
+	if (atomic_read(&ib_mr->usecnt))
+		return -EINVAL;
+
+	mhp = to_c4iw_mr(ib_mr);
+	rhp = mhp->rhp;
+	mmid = mhp->attr.stag >> 8;
+	remove_handle(rhp, &rhp->mmidr, mmid);
+	dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+		       mhp->attr.pbl_addr);
+	if (mhp->attr.pbl_size)
+		c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
+				  mhp->attr.pbl_size << 3);
+	if (mhp->kva)
+		kfree((void *) (unsigned long) mhp->kva);
+	if (mhp->umem)
+		ib_umem_release(mhp->umem);
+	CTR3(KTR_IW_CXGBE, "%s mmid 0x%x ptr %p", __func__, mmid, mhp);
+	kfree(mhp);
+	return 0;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/mem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/provider.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/provider.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/provider.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,527 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/provider.c 325611 2017-11-09 19:00:11Z hselasky $");
+
+#define	LINUXKPI_PARAM_PREFIX iw_cxgbe_
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <asm/pgtable.h>
+#include <linux/page.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "iw_cxgbe.h"
+#include "user.h"
+
+static int fastreg_support = 1;
+module_param(fastreg_support, int, 0644);
+MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default = 1)");
+
+static int c4iw_modify_port(struct ib_device *ibdev,
+			    u8 port, int port_modify_mask,
+			    struct ib_port_modify *props)
+{
+	return -ENOSYS;
+}
+
+static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
+				    struct ib_ah_attr *ah_attr)
+{
+	return ERR_PTR(-ENOSYS);
+}
+
+static int c4iw_ah_destroy(struct ib_ah *ah)
+{
+	return -ENOSYS;
+}
+
+static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+	return -ENOSYS;
+}
+
+static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+	return -ENOSYS;
+}
+
+static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
+			    u8 port_num, struct ib_wc *in_wc,
+			    struct ib_grh *in_grh, struct ib_mad *in_mad,
+			    struct ib_mad *out_mad)
+{
+	return -ENOSYS;
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+	struct c4iw_dev *rhp = to_c4iw_dev(context->device);
+	struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+	struct c4iw_mm_entry *mm, *tmp;
+
+	CTR2(KTR_IW_CXGBE, "%s context %p", __func__, context);
+	list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+		kfree(mm);
+	c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+	kfree(ucontext);
+	return 0;
+}
+
+static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
+					       struct ib_udata *udata)
+{
+	struct c4iw_ucontext *context;
+	struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
+
+	CTR2(KTR_IW_CXGBE, "%s ibdev %p", __func__, ibdev);
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return ERR_PTR(-ENOMEM);
+	c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+	INIT_LIST_HEAD(&context->mmaps);
+	spin_lock_init(&context->mmap_lock);
+	return &context->ibucontext;
+}
+
+#ifdef DOT5
+static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
+{
+    return pgprot_writecombine(prot);
+}
+#endif
+
+static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+	int len = vma->vm_end - vma->vm_start;
+	u32 key = vma->vm_pgoff << PAGE_SHIFT;
+	struct c4iw_rdev *rdev;
+	int ret = 0;
+	struct c4iw_mm_entry *mm;
+	struct c4iw_ucontext *ucontext;
+	u64 addr, paddr;
+
+	u64 va_regs_res = 0, va_udbs_res = 0;
+	u64 len_regs_res = 0, len_udbs_res = 0;
+
+	CTR3(KTR_IW_CXGBE, "%s:1 ctx %p vma %p", __func__, context, vma);
+
+	CTR4(KTR_IW_CXGBE, "%s:1a pgoff 0x%lx key 0x%x len %d", __func__,
+	    vma->vm_pgoff, key, len);
+
+	if (vma->vm_start & (PAGE_SIZE-1)) {
+		CTR3(KTR_IW_CXGBE, "%s:2 unaligned vm_start %u vma %p",
+		    __func__, vma->vm_start, vma);
+		return -EINVAL;
+	}
+
+	rdev = &(to_c4iw_dev(context->device)->rdev);
+	ucontext = to_c4iw_ucontext(context);
+
+	mm = remove_mmap(ucontext, key, len);
+	if (!mm) {
+		CTR4(KTR_IW_CXGBE, "%s:3 ucontext %p key %u len %u", __func__,
+		    ucontext, key, len);
+		return -EINVAL;
+	}
+	addr = mm->addr;
+	kfree(mm);
+
+	va_regs_res = (u64)rman_get_virtual(rdev->adap->regs_res);
+	len_regs_res = (u64)rman_get_size(rdev->adap->regs_res);
+	va_udbs_res = (u64)rman_get_virtual(rdev->adap->udbs_res);
+	len_udbs_res = (u64)rman_get_size(rdev->adap->udbs_res);
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s:4 addr %p, masync region %p:%p, udb region %p:%p", __func__,
+	    addr, va_regs_res, va_regs_res+len_regs_res, va_udbs_res,
+	    va_udbs_res+len_udbs_res);
+
+	if (addr >= va_regs_res && addr < va_regs_res + len_regs_res) {
+		CTR4(KTR_IW_CXGBE, "%s:5 MA_SYNC addr %p region %p, reglen %u",
+		    __func__, addr, va_regs_res, len_regs_res);
+		/*
+		 * MA_SYNC register...
+		 */
+		paddr = vtophys(addr);
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		ret = io_remap_pfn_range(vma, vma->vm_start,
+				paddr >> PAGE_SHIFT,
+				len, vma->vm_page_prot);
+	} else {
+
+		if (addr >= va_udbs_res && addr < va_udbs_res + len_udbs_res) {
+			/*
+			* Map user DB or OCQP memory...
+			*/
+			paddr = vtophys(addr);
+			CTR4(KTR_IW_CXGBE,
+			    "%s:6 USER DB-GTS addr %p region %p, reglen %u",
+			    __func__, addr, va_udbs_res, len_udbs_res);
+#ifdef DOT5
+			if (!is_t4(rdev->lldi.adapter_type) && map_udb_as_wc)
+				vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
+			else
+#endif
+				vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+			ret = io_remap_pfn_range(vma, vma->vm_start,
+					paddr >> PAGE_SHIFT,
+					len, vma->vm_page_prot);
+		} else {
+			/*
+			 * Map WQ or CQ contig dma memory...
+			 */
+			CTR4(KTR_IW_CXGBE,
+			    "%s:7 WQ/CQ addr %p vm_start %u vma %p", __func__,
+			    addr, vma->vm_start, vma);
+			ret = io_remap_pfn_range(vma, vma->vm_start,
+				addr >> PAGE_SHIFT,
+				len, vma->vm_page_prot);
+		}
+	}
+	CTR4(KTR_IW_CXGBE, "%s:8 ctx %p vma %p ret %u", __func__, context, vma,
+	    ret);
+	return ret;
+}
+
+static int
+c4iw_deallocate_pd(struct ib_pd *pd)
+{
+	struct c4iw_pd *php = to_c4iw_pd(pd);
+	struct c4iw_dev *rhp = php->rhp;
+
+	CTR3(KTR_IW_CXGBE, "%s: pd %p, pdid 0x%x", __func__, pd, php->pdid);
+
+	c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
+	mutex_lock(&rhp->rdev.stats.lock);
+	rhp->rdev.stats.pd.cur--;
+	mutex_unlock(&rhp->rdev.stats.lock);
+	kfree(php);
+
+	return (0);
+}
+
+static struct ib_pd *
+c4iw_allocate_pd(struct ib_device *ibdev, struct ib_ucontext *context,
+    struct ib_udata *udata)
+{
+	struct c4iw_pd *php;
+	u32 pdid;
+	struct c4iw_dev *rhp;
+
+	CTR4(KTR_IW_CXGBE, "%s: ibdev %p, context %p, data %p", __func__, ibdev,
+	    context, udata);
+	rhp = (struct c4iw_dev *) ibdev;
+	pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
+	if (!pdid)
+		return ERR_PTR(-EINVAL);
+	php = kzalloc(sizeof(*php), GFP_KERNEL);
+	if (!php) {
+		c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
+		return ERR_PTR(-ENOMEM);
+	}
+	php->pdid = pdid;
+	php->rhp = rhp;
+	if (context) {
+		if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
+			c4iw_deallocate_pd(&php->ibpd);
+			return ERR_PTR(-EFAULT);
+		}
+	}
+	mutex_lock(&rhp->rdev.stats.lock);
+	rhp->rdev.stats.pd.cur++;
+	if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
+		rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
+	mutex_unlock(&rhp->rdev.stats.lock);
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s: ibdev %p, context %p, data %p, pddid 0x%x, pd %p", __func__,
+	    ibdev, context, udata, pdid, php);
+	return (&php->ibpd);
+}
+
+static int
+c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+{
+
+	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, pkey %p", __func__,
+	    ibdev, port, index, pkey);
+
+	*pkey = 0;
+	return (0);
+}
+
+static int
+c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid)
+{
+	struct c4iw_dev *dev;
+	struct port_info *pi;
+	struct adapter *sc;
+
+	CTR5(KTR_IW_CXGBE, "%s ibdev %p, port %d, index %d, gid %p", __func__,
+	    ibdev, port, index, gid);
+
+	memset(&gid->raw[0], 0, sizeof(gid->raw));
+	dev = to_c4iw_dev(ibdev);
+	sc = dev->rdev.adap;
+	if (port == 0 || port > sc->params.nports)
+		return (-EINVAL);
+	pi = sc->port[port - 1];
+	memcpy(&gid->raw[0], pi->vi[0].hw_addr, ETHER_ADDR_LEN);
+	return (0);
+}
+
+static int
+c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
+{
+	struct c4iw_dev *dev = to_c4iw_dev(ibdev);
+	struct adapter *sc = dev->rdev.adap;
+	const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE;
+
+	CTR3(KTR_IW_CXGBE, "%s ibdev %p, props %p", __func__, ibdev, props);
+
+	memset(props, 0, sizeof *props);
+	memcpy(&props->sys_image_guid, sc->port[0]->vi[0].hw_addr,
+	    ETHER_ADDR_LEN);
+	props->hw_ver = sc->params.chipid;
+	props->fw_ver = sc->params.fw_vers;
+	props->device_cap_flags = dev->device_cap_flags;
+	props->page_size_cap = T4_PAGESIZE_MASK;
+	props->vendor_id = pci_get_vendor(sc->dev);
+	props->vendor_part_id = pci_get_device(sc->dev);
+	props->max_mr_size = T4_MAX_MR_SIZE;
+	props->max_qp = sc->vres.qp.size / 2;
+	props->max_qp_wr = T4_MAX_QP_DEPTH(spg_ndesc);
+	props->max_sge = T4_MAX_RECV_SGE;
+	props->max_sge_rd = 1;
+	props->max_res_rd_atom = sc->params.max_ird_adapter;
+	props->max_qp_rd_atom = min(sc->params.max_ordird_qp,
+	    c4iw_max_read_depth);
+	props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+	props->max_cq = sc->vres.qp.size;
+	props->max_cqe = T4_MAX_CQ_DEPTH;
+	props->max_mr = c4iw_num_stags(&dev->rdev);
+	props->max_pd = T4_MAX_NUM_PD;
+	props->local_ca_ack_delay = 0;
+	props->max_fast_reg_page_list_len = T4_MAX_FR_DEPTH;
+
+	return (0);
+}
+
+/*
+ * Returns -errno on failure.
+ */
+static int
+c4iw_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
+{
+	struct c4iw_dev *dev;
+	struct adapter *sc;
+	struct port_info *pi;
+	struct ifnet *ifp;
+
+	CTR4(KTR_IW_CXGBE, "%s ibdev %p, port %d, props %p", __func__, ibdev,
+	    port, props);
+
+	dev = to_c4iw_dev(ibdev);
+	sc = dev->rdev.adap;
+	if (port > sc->params.nports)
+		return (-EINVAL);
+	pi = sc->port[port - 1];
+	ifp = pi->vi[0].ifp;
+
+	memset(props, 0, sizeof(struct ib_port_attr));
+	props->max_mtu = IB_MTU_4096;
+	if (ifp->if_mtu >= 4096)
+		props->active_mtu = IB_MTU_4096;
+	else if (ifp->if_mtu >= 2048)
+		props->active_mtu = IB_MTU_2048;
+	else if (ifp->if_mtu >= 1024)
+		props->active_mtu = IB_MTU_1024;
+	else if (ifp->if_mtu >= 512)
+		props->active_mtu = IB_MTU_512;
+	else
+		props->active_mtu = IB_MTU_256;
+	props->state = pi->link_cfg.link_ok ? IB_PORT_ACTIVE : IB_PORT_DOWN;
+	props->port_cap_flags =
+	    IB_PORT_CM_SUP |
+	    IB_PORT_SNMP_TUNNEL_SUP |
+	    IB_PORT_REINIT_SUP |
+	    IB_PORT_DEVICE_MGMT_SUP |
+	    IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+	props->gid_tbl_len = 1;
+	props->pkey_tbl_len = 1;
+	props->active_width = 2;
+	props->active_speed = 2;
+	props->max_msg_sz = -1;
+
+	return 0;
+}
+
+static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+			       struct ib_port_immutable *immutable)
+{
+	struct ib_port_attr attr;
+	int err;
+
+	immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+	err = ib_query_port(ibdev, port_num, &attr);
+	if (err)
+		return err;
+
+	immutable->pkey_tbl_len = attr.pkey_tbl_len;
+	immutable->gid_tbl_len = attr.gid_tbl_len;
+
+	return 0;
+}
+
+/*
+ * Returns -errno on error.
+ */
+int
+c4iw_register_device(struct c4iw_dev *dev)
+{
+	struct adapter *sc = dev->rdev.adap;
+	struct ib_device *ibdev = &dev->ibdev;
+	struct iw_cm_verbs *iwcm;
+	int ret;
+
+	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev, sc);
+	BUG_ON(!sc->port[0]);
+	strlcpy(ibdev->name, device_get_nameunit(sc->dev), sizeof(ibdev->name));
+	memset(&ibdev->node_guid, 0, sizeof(ibdev->node_guid));
+	memcpy(&ibdev->node_guid, sc->port[0]->vi[0].hw_addr, ETHER_ADDR_LEN);
+	ibdev->owner = THIS_MODULE;
+	dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
+	if (fastreg_support)
+		dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+	ibdev->local_dma_lkey = 0;
+	ibdev->uverbs_cmd_mask =
+	    (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+	    (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+	    (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+	    (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+	    (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+	    (1ull << IB_USER_VERBS_CMD_REG_MR) |
+	    (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+	    (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+	    (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+	    (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+	    (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+	    (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+	    (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+	    (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+	    (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+	    (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+	    (1ull << IB_USER_VERBS_CMD_POST_SEND) |
+	    (1ull << IB_USER_VERBS_CMD_POST_RECV);
+	ibdev->node_type = RDMA_NODE_RNIC;
+	strlcpy(ibdev->node_desc, C4IW_NODE_DESC, sizeof(ibdev->node_desc));
+	ibdev->phys_port_cnt = sc->params.nports;
+	ibdev->num_comp_vectors = 1;
+	ibdev->dma_device = NULL;
+	ibdev->query_device = c4iw_query_device;
+	ibdev->query_port = c4iw_query_port;
+	ibdev->modify_port = c4iw_modify_port;
+	ibdev->query_pkey = c4iw_query_pkey;
+	ibdev->query_gid = c4iw_query_gid;
+	ibdev->alloc_ucontext = c4iw_alloc_ucontext;
+	ibdev->dealloc_ucontext = c4iw_dealloc_ucontext;
+	ibdev->mmap = c4iw_mmap;
+	ibdev->alloc_pd = c4iw_allocate_pd;
+	ibdev->dealloc_pd = c4iw_deallocate_pd;
+	ibdev->create_ah = c4iw_ah_create;
+	ibdev->destroy_ah = c4iw_ah_destroy;
+	ibdev->create_qp = c4iw_create_qp;
+	ibdev->modify_qp = c4iw_ib_modify_qp;
+	ibdev->query_qp = c4iw_ib_query_qp;
+	ibdev->destroy_qp = c4iw_destroy_qp;
+	ibdev->create_cq = c4iw_create_cq;
+	ibdev->destroy_cq = c4iw_destroy_cq;
+	ibdev->resize_cq = c4iw_resize_cq;
+	ibdev->poll_cq = c4iw_poll_cq;
+	ibdev->get_dma_mr = c4iw_get_dma_mr;
+	ibdev->reg_phys_mr = c4iw_register_phys_mem;
+	ibdev->rereg_phys_mr = c4iw_reregister_phys_mem;
+	ibdev->reg_user_mr = c4iw_reg_user_mr;
+	ibdev->dereg_mr = c4iw_dereg_mr;
+	ibdev->alloc_mw = c4iw_alloc_mw;
+	ibdev->bind_mw = c4iw_bind_mw;
+	ibdev->dealloc_mw = c4iw_dealloc_mw;
+	ibdev->alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
+	ibdev->alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
+	ibdev->free_fast_reg_page_list = c4iw_free_fastreg_pbl;
+	ibdev->attach_mcast = c4iw_multicast_attach;
+	ibdev->detach_mcast = c4iw_multicast_detach;
+	ibdev->process_mad = c4iw_process_mad;
+	ibdev->req_notify_cq = c4iw_arm_cq;
+	ibdev->post_send = c4iw_post_send;
+	ibdev->post_recv = c4iw_post_receive;
+	ibdev->uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
+	ibdev->get_port_immutable = c4iw_port_immutable;
+
+	iwcm = kmalloc(sizeof(*iwcm), GFP_KERNEL);
+	if (iwcm == NULL)
+		return (-ENOMEM);
+
+	iwcm->connect = c4iw_connect;
+	iwcm->accept = c4iw_accept_cr;
+	iwcm->reject = c4iw_reject_cr;
+	iwcm->create_listen_ep = c4iw_create_listen_ep;
+	iwcm->destroy_listen_ep = c4iw_destroy_listen_ep;
+	iwcm->newconn = process_newconn;
+	iwcm->add_ref = c4iw_qp_add_ref;
+	iwcm->rem_ref = c4iw_qp_rem_ref;
+	iwcm->get_qp = c4iw_get_qp;
+	ibdev->iwcm = iwcm;
+
+	ret = ib_register_device(&dev->ibdev, NULL);
+	if (ret)
+		kfree(iwcm);
+
+	return (ret);
+}
+
+void
+c4iw_unregister_device(struct c4iw_dev *dev)
+{
+
+	CTR3(KTR_IW_CXGBE, "%s c4iw_dev %p, adapter %p", __func__, dev,
+	    dev->rdev.adap);
+	ib_unregister_device(&dev->ibdev);
+	kfree(dev->ibdev.iwcm);
+	return;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/provider.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/qp.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/qp.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/qp.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,1762 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/qp.c 331719 2018-03-29 01:20:58Z np $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockio.h>
+#include <sys/taskqueue.h>
+#include <netinet/in.h>
+#include <net/route.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip_var.h>
+#include <netinet/tcp_var.h>
+#include <netinet/tcp.h>
+#include <netinet/tcpip.h>
+
+#include <netinet/toecore.h>
+
+struct sge_iq;
+struct rss_header;
+#include <linux/types.h>
+#include "offload.h"
+#include "tom/t4_tom.h"
+
+#include "iw_cxgbe.h"
+#include "user.h"
+
+static int creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize);
+
+
+static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
+{
+	unsigned long flag;
+	spin_lock_irqsave(&qhp->lock, flag);
+	qhp->attr.state = state;
+	spin_unlock_irqrestore(&qhp->lock, flag);
+}
+
+static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+
+	contigfree(sq->queue, sq->memsize, M_DEVBUF);
+}
+
+static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+
+	dealloc_host_sq(rdev, sq);
+}
+
+static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
+{
+	sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
+	    4096, 0);
+
+	if (sq->queue)
+		sq->dma_addr = vtophys(sq->queue);
+	else
+		return -ENOMEM;
+	sq->phys_addr = vtophys(sq->queue);
+	pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+	CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__,
+	    sq->queue, sq->dma_addr, sq->phys_addr);
+	return 0;
+}
+
+static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+		      struct c4iw_dev_ucontext *uctx)
+{
+	/*
+	 * uP clears EQ contexts when the connection exits rdma mode,
+	 * so no need to post a RESET WR for these EQs.
+	 */
+	contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
+	dealloc_sq(rdev, &wq->sq);
+	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+	kfree(wq->rq.sw_rq);
+	kfree(wq->sq.sw_sq);
+	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+	return 0;
+}
+
+static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
+		     struct t4_cq *rcq, struct t4_cq *scq,
+		     struct c4iw_dev_ucontext *uctx)
+{
+	struct adapter *sc = rdev->adap;
+	int user = (uctx != &rdev->uctx);
+	struct fw_ri_res_wr *res_wr;
+	struct fw_ri_res *res;
+	int wr_len;
+	struct c4iw_wr_wait wr_wait;
+	int ret;
+	int eqsize;
+	struct wrqe *wr;
+	const int spg_ndesc = sc->params.sge.spg_len / EQ_ESIZE;
+
+	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
+	if (!wq->sq.qid)
+		return -ENOMEM;
+
+	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
+	if (!wq->rq.qid)
+		goto err1;
+
+	if (!user) {
+		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
+				 GFP_KERNEL);
+		if (!wq->sq.sw_sq)
+			goto err2;
+
+		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
+				 GFP_KERNEL);
+		if (!wq->rq.sw_rq)
+			goto err3;
+	}
+
+	/* RQT must be a power of 2. */
+	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
+	if (!wq->rq.rqt_hwaddr)
+		goto err4;
+
+	if (alloc_host_sq(rdev, &wq->sq))
+		goto err5;
+
+	memset(wq->sq.queue, 0, wq->sq.memsize);
+	pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
+
+	wq->rq.queue = contigmalloc(wq->rq.memsize,
+            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
+        if (wq->rq.queue)
+                wq->rq.dma_addr = vtophys(wq->rq.queue);
+        else
+                goto err6;
+	CTR5(KTR_IW_CXGBE,
+	    "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__,
+	    wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue),
+	    wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue));
+	memset(wq->rq.queue, 0, wq->rq.memsize);
+	pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
+
+	wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
+	    sc->sge_kdoorbell_reg);
+	wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res)
+			   + sc->sge_gts_reg);
+	if (user) {
+		wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
+						(wq->sq.qid << rdev->qpshift));
+		wq->sq.udb &= PAGE_MASK;
+		wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
+						(wq->rq.qid << rdev->qpshift));
+		wq->rq.udb &= PAGE_MASK;
+	}
+	wq->rdev = rdev;
+	wq->rq.msn = 1;
+
+	/* build fw_ri_res_wr */
+	wr_len = sizeof *res_wr + 2 * sizeof *res;
+
+	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
+        if (wr == NULL)
+		return (0);
+        res_wr = wrtod(wr);
+
+	memset(res_wr, 0, wr_len);
+	res_wr->op_nres = cpu_to_be32(
+			V_FW_WR_OP(FW_RI_RES_WR) |
+			V_FW_RI_RES_WR_NRES(2) |
+			F_FW_WR_COMPL);
+	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
+	res_wr->cookie = (unsigned long) &wr_wait;
+	res = res_wr->res;
+	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
+	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+	/* eqsize is the number of 64B entries plus the status page size. */
+	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_ndesc;
+
+	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
+		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
+		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
+		V_FW_RI_RES_WR_IQID(scq->cqid));
+	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+		V_FW_RI_RES_WR_DCAEN(0) |
+		V_FW_RI_RES_WR_DCACPU(0) |
+		V_FW_RI_RES_WR_FBMIN(2) |
+		V_FW_RI_RES_WR_FBMAX(2) |
+		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+		V_FW_RI_RES_WR_EQSIZE(eqsize));
+	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
+	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
+	res++;
+	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
+	res->u.sqrq.op = FW_RI_RES_OP_WRITE;
+
+	/* eqsize is the number of 64B entries plus the status page size. */
+	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_ndesc;
+	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
+		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
+		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
+		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
+		V_FW_RI_RES_WR_IQID(rcq->cqid));
+	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
+		V_FW_RI_RES_WR_DCAEN(0) |
+		V_FW_RI_RES_WR_DCACPU(0) |
+		V_FW_RI_RES_WR_FBMIN(2) |
+		V_FW_RI_RES_WR_FBMAX(2) |
+		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
+		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
+		V_FW_RI_RES_WR_EQSIZE(eqsize));
+	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
+	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
+
+	c4iw_init_wr_wait(&wr_wait);
+
+	t4_wrq_tx(sc, wr);
+	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
+	if (ret)
+		goto err7;
+
+	CTR6(KTR_IW_CXGBE,
+	    "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
+	    __func__, wq->sq.qid, wq->rq.qid, wq->db,
+	    (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
+
+	return 0;
+err7:
+	contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
+err6:
+	dealloc_sq(rdev, &wq->sq);
+err5:
+	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
+err4:
+	kfree(wq->rq.sw_rq);
+err3:
+	kfree(wq->sq.sw_sq);
+err2:
+	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
+err1:
+	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
+	return -ENOMEM;
+}
+
+static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
+		      struct ib_send_wr *wr, int max, u32 *plenp)
+{
+	u8 *dstp, *srcp;
+	u32 plen = 0;
+	int i;
+	int rem, len;
+
+	dstp = (u8 *)immdp->data;
+	for (i = 0; i < wr->num_sge; i++) {
+		if ((plen + wr->sg_list[i].length) > max)
+			return -EMSGSIZE;
+		srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
+		plen += wr->sg_list[i].length;
+		rem = wr->sg_list[i].length;
+		while (rem) {
+			if (dstp == (u8 *)&sq->queue[sq->size])
+				dstp = (u8 *)sq->queue;
+			if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
+				len = rem;
+			else
+				len = (u8 *)&sq->queue[sq->size] - dstp;
+			memcpy(dstp, srcp, len);
+			dstp += len;
+			srcp += len;
+			rem -= len;
+		}
+	}
+	len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
+	if (len)
+		memset(dstp, 0, len);
+	immdp->op = FW_RI_DATA_IMMD;
+	immdp->r1 = 0;
+	immdp->r2 = 0;
+	immdp->immdlen = cpu_to_be32(plen);
+	*plenp = plen;
+	return 0;
+}
+
+static int build_isgl(__be64 *queue_start, __be64 *queue_end,
+		      struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
+		      int num_sge, u32 *plenp)
+
+{
+	int i;
+	u32 plen = 0;
+	__be64 *flitp = (__be64 *)isglp->sge;
+
+	for (i = 0; i < num_sge; i++) {
+		if ((plen + sg_list[i].length) < plen)
+			return -EMSGSIZE;
+		plen += sg_list[i].length;
+		*flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
+				     sg_list[i].length);
+		if (++flitp == queue_end)
+			flitp = queue_start;
+		*flitp = cpu_to_be64(sg_list[i].addr);
+		if (++flitp == queue_end)
+			flitp = queue_start;
+	}
+	*flitp = (__force __be64)0;
+	isglp->op = FW_RI_DATA_ISGL;
+	isglp->r1 = 0;
+	isglp->nsge = cpu_to_be16(num_sge);
+	isglp->r2 = 0;
+	if (plenp)
+		*plenp = plen;
+	return 0;
+}
+
+static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
+			   struct ib_send_wr *wr, u8 *len16)
+{
+	u32 plen;
+	int size;
+	int ret;
+
+	if (wr->num_sge > T4_MAX_SEND_SGE)
+		return -EINVAL;
+	switch (wr->opcode) {
+	case IB_WR_SEND:
+		if (wr->send_flags & IB_SEND_SOLICITED)
+			wqe->send.sendop_pkd = cpu_to_be32(
+				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
+		else
+			wqe->send.sendop_pkd = cpu_to_be32(
+				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
+		wqe->send.stag_inv = 0;
+		break;
+	case IB_WR_SEND_WITH_INV:
+		if (wr->send_flags & IB_SEND_SOLICITED)
+			wqe->send.sendop_pkd = cpu_to_be32(
+				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
+		else
+			wqe->send.sendop_pkd = cpu_to_be32(
+				V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
+		wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	plen = 0;
+	if (wr->num_sge) {
+		if (wr->send_flags & IB_SEND_INLINE) {
+			ret = build_immd(sq, wqe->send.u.immd_src, wr,
+					 T4_MAX_SEND_INLINE, &plen);
+			if (ret)
+				return ret;
+			size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
+			       plen;
+		} else {
+			ret = build_isgl((__be64 *)sq->queue,
+					 (__be64 *)&sq->queue[sq->size],
+					 wqe->send.u.isgl_src,
+					 wr->sg_list, wr->num_sge, &plen);
+			if (ret)
+				return ret;
+			size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
+			       wr->num_sge * sizeof(struct fw_ri_sge);
+		}
+	} else {
+		wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
+		wqe->send.u.immd_src[0].r1 = 0;
+		wqe->send.u.immd_src[0].r2 = 0;
+		wqe->send.u.immd_src[0].immdlen = 0;
+		size = sizeof wqe->send + sizeof(struct fw_ri_immd);
+		plen = 0;
+	}
+	*len16 = DIV_ROUND_UP(size, 16);
+	wqe->send.plen = cpu_to_be32(plen);
+	return 0;
+}
+
+static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
+			    struct ib_send_wr *wr, u8 *len16)
+{
+	u32 plen;
+	int size;
+	int ret;
+
+	if (wr->num_sge > T4_MAX_SEND_SGE)
+		return -EINVAL;
+	wqe->write.immd_data = 0;
+	wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
+	wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
+	if (wr->num_sge) {
+		if (wr->send_flags & IB_SEND_INLINE) {
+			ret = build_immd(sq, wqe->write.u.immd_src, wr,
+					 T4_MAX_WRITE_INLINE, &plen);
+			if (ret)
+				return ret;
+			size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
+			       plen;
+		} else {
+			ret = build_isgl((__be64 *)sq->queue,
+					 (__be64 *)&sq->queue[sq->size],
+					 wqe->write.u.isgl_src,
+					 wr->sg_list, wr->num_sge, &plen);
+			if (ret)
+				return ret;
+			size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
+			       wr->num_sge * sizeof(struct fw_ri_sge);
+		}
+	} else {
+		wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+		wqe->write.u.immd_src[0].r1 = 0;
+		wqe->write.u.immd_src[0].r2 = 0;
+		wqe->write.u.immd_src[0].immdlen = 0;
+		size = sizeof wqe->write + sizeof(struct fw_ri_immd);
+		plen = 0;
+	}
+	*len16 = DIV_ROUND_UP(size, 16);
+	wqe->write.plen = cpu_to_be32(plen);
+	return 0;
+}
+
+static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
+{
+	if (wr->num_sge > 1)
+		return -EINVAL;
+	if (wr->num_sge) {
+		wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
+		wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
+							>> 32));
+		wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
+		wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
+		wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
+		wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
+							 >> 32));
+		wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
+	} else {
+		wqe->read.stag_src = cpu_to_be32(2);
+		wqe->read.to_src_hi = 0;
+		wqe->read.to_src_lo = 0;
+		wqe->read.stag_sink = cpu_to_be32(2);
+		wqe->read.plen = 0;
+		wqe->read.to_sink_hi = 0;
+		wqe->read.to_sink_lo = 0;
+	}
+	wqe->read.r2 = 0;
+	wqe->read.r5 = 0;
+	*len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
+	return 0;
+}
+
+static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
+			   struct ib_recv_wr *wr, u8 *len16)
+{
+	int ret;
+
+	ret = build_isgl((__be64 *)qhp->wq.rq.queue,
+			 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
+			 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
+	if (ret)
+		return ret;
+	*len16 = DIV_ROUND_UP(sizeof wqe->recv +
+			      wr->num_sge * sizeof(struct fw_ri_sge), 16);
+	return 0;
+}
+
+static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
+			 struct ib_send_wr *wr, u8 *len16)
+{
+
+	struct fw_ri_immd *imdp;
+	__be64 *p;
+	int i;
+	int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
+	int rem;
+
+	if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
+		return -EINVAL;
+
+	wqe->fr.qpbinde_to_dcacpu = 0;
+	wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
+	wqe->fr.addr_type = FW_RI_VA_BASED_TO;
+	wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
+	wqe->fr.len_hi = 0;
+	wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
+	wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
+	wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
+	wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
+					0xffffffff);
+	WARN_ON(pbllen > T4_MAX_FR_IMMD);
+	imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
+	imdp->op = FW_RI_DATA_IMMD;
+	imdp->r1 = 0;
+	imdp->r2 = 0;
+	imdp->immdlen = cpu_to_be32(pbllen);
+	p = (__be64 *)(imdp + 1);
+	rem = pbllen;
+	for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+		*p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
+		rem -= sizeof *p;
+		if (++p == (__be64 *)&sq->queue[sq->size])
+			p = (__be64 *)sq->queue;
+	}
+	BUG_ON(rem < 0);
+	while (rem) {
+		*p = 0;
+		rem -= sizeof *p;
+		if (++p == (__be64 *)&sq->queue[sq->size])
+			p = (__be64 *)sq->queue;
+	}
+	*len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
+	return 0;
+}
+
+static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
+			  u8 *len16)
+{
+	wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
+	wqe->inv.r2 = 0;
+	*len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
+	return 0;
+}
+
+void c4iw_qp_add_ref(struct ib_qp *qp)
+{
+	CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
+	atomic_inc(&(to_c4iw_qp(qp)->refcnt));
+}
+
+void c4iw_qp_rem_ref(struct ib_qp *qp)
+{
+	CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, qp);
+	if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
+		wake_up(&(to_c4iw_qp(qp)->wait));
+}
+
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+	struct t4_cqe cqe = {};
+	struct c4iw_cq *schp;
+	unsigned long flag;
+	struct t4_cq *cq;
+
+	schp = to_c4iw_cq(qhp->ibqp.send_cq);
+	cq = &schp->cq;
+
+	PDBG("%s drain sq id %u\n", __func__, qhp->wq.sq.qid);
+	cqe.u.drain_cookie = wr->wr_id;
+	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+				 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
+				 V_CQE_TYPE(1) |
+				 V_CQE_SWCQE(1) |
+				 V_CQE_QPID(qhp->wq.sq.qid));
+
+	spin_lock_irqsave(&schp->lock, flag);
+	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+	cq->sw_queue[cq->sw_pidx] = cqe;
+	t4_swcq_produce(cq);
+	spin_unlock_irqrestore(&schp->lock, flag);
+
+	spin_lock_irqsave(&schp->comp_handler_lock, flag);
+	(*schp->ibcq.comp_handler)(&schp->ibcq,
+				   schp->ibcq.cq_context);
+	spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+	struct t4_cqe cqe = {};
+	struct c4iw_cq *rchp;
+	unsigned long flag;
+	struct t4_cq *cq;
+
+	rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+	cq = &rchp->cq;
+
+	PDBG("%s drain rq id %u\n", __func__, qhp->wq.sq.qid);
+	cqe.u.drain_cookie = wr->wr_id;
+	cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
+				 V_CQE_OPCODE(C4IW_DRAIN_OPCODE) |
+				 V_CQE_TYPE(0) |
+				 V_CQE_SWCQE(1) |
+				 V_CQE_QPID(qhp->wq.sq.qid));
+
+	spin_lock_irqsave(&rchp->lock, flag);
+	cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
+	cq->sw_queue[cq->sw_pidx] = cqe;
+	t4_swcq_produce(cq);
+	spin_unlock_irqrestore(&rchp->lock, flag);
+
+	spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+	(*rchp->ibcq.comp_handler)(&rchp->ibcq,
+				   rchp->ibcq.cq_context);
+	spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
+int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+		   struct ib_send_wr **bad_wr)
+{
+	int err = 0;
+	u8 len16 = 0;
+	enum fw_wr_opcodes fw_opcode = 0;
+	enum fw_ri_wr_flags fw_flags;
+	struct c4iw_qp *qhp;
+	union t4_wr *wqe;
+	u32 num_wrs;
+	struct t4_swsqe *swsqe;
+	unsigned long flag;
+	u16 idx = 0;
+
+	qhp = to_c4iw_qp(ibqp);
+	spin_lock_irqsave(&qhp->lock, flag);
+	if (t4_wq_in_error(&qhp->wq)) {
+		spin_unlock_irqrestore(&qhp->lock, flag);
+		complete_sq_drain_wr(qhp, wr);
+		return err;
+	}
+	num_wrs = t4_sq_avail(&qhp->wq);
+	if (num_wrs == 0) {
+		spin_unlock_irqrestore(&qhp->lock, flag);
+		return -ENOMEM;
+	}
+	while (wr) {
+		if (num_wrs == 0) {
+			err = -ENOMEM;
+			*bad_wr = wr;
+			break;
+		}
+		wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
+		      qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
+
+		fw_flags = 0;
+		if (wr->send_flags & IB_SEND_SOLICITED)
+			fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
+		if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
+			fw_flags |= FW_RI_COMPLETION_FLAG;
+		swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
+		switch (wr->opcode) {
+		case IB_WR_SEND_WITH_INV:
+		case IB_WR_SEND:
+			if (wr->send_flags & IB_SEND_FENCE)
+				fw_flags |= FW_RI_READ_FENCE_FLAG;
+			fw_opcode = FW_RI_SEND_WR;
+			if (wr->opcode == IB_WR_SEND)
+				swsqe->opcode = FW_RI_SEND;
+			else
+				swsqe->opcode = FW_RI_SEND_WITH_INV;
+			err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
+			break;
+		case IB_WR_RDMA_WRITE:
+			fw_opcode = FW_RI_RDMA_WRITE_WR;
+			swsqe->opcode = FW_RI_RDMA_WRITE;
+			err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
+			break;
+		case IB_WR_RDMA_READ:
+		case IB_WR_RDMA_READ_WITH_INV:
+			fw_opcode = FW_RI_RDMA_READ_WR;
+			swsqe->opcode = FW_RI_READ_REQ;
+			if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
+				fw_flags = FW_RI_RDMA_READ_INVALIDATE;
+			else
+				fw_flags = 0;
+			err = build_rdma_read(wqe, wr, &len16);
+			if (err)
+				break;
+			swsqe->read_len = wr->sg_list[0].length;
+			if (!qhp->wq.sq.oldest_read)
+				qhp->wq.sq.oldest_read = swsqe;
+			break;
+		case IB_WR_FAST_REG_MR:
+			fw_opcode = FW_RI_FR_NSMR_WR;
+			swsqe->opcode = FW_RI_FAST_REGISTER;
+			err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
+			break;
+		case IB_WR_LOCAL_INV:
+			if (wr->send_flags & IB_SEND_FENCE)
+				fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
+			fw_opcode = FW_RI_INV_LSTAG_WR;
+			swsqe->opcode = FW_RI_LOCAL_INV;
+			err = build_inv_stag(wqe, wr, &len16);
+			break;
+		default:
+			CTR2(KTR_IW_CXGBE, "%s post of type =%d TBD!", __func__,
+			     wr->opcode);
+			err = -EINVAL;
+		}
+		if (err) {
+			*bad_wr = wr;
+			break;
+		}
+		swsqe->idx = qhp->wq.sq.pidx;
+		swsqe->complete = 0;
+		swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+					qhp->sq_sig_all;
+		swsqe->wr_id = wr->wr_id;
+
+		init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
+
+		CTR5(KTR_IW_CXGBE,
+		    "%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u",
+		    __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
+		    swsqe->opcode, swsqe->read_len);
+		wr = wr->next;
+		num_wrs--;
+		t4_sq_produce(&qhp->wq, len16);
+		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+	}
+
+	t4_ring_sq_db(&qhp->wq, idx);
+	spin_unlock_irqrestore(&qhp->lock, flag);
+	return err;
+}
+
+int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+		      struct ib_recv_wr **bad_wr)
+{
+	int err = 0;
+	struct c4iw_qp *qhp;
+	union t4_recv_wr *wqe;
+	u32 num_wrs;
+	u8 len16 = 0;
+	unsigned long flag;
+	u16 idx = 0;
+
+	qhp = to_c4iw_qp(ibqp);
+	spin_lock_irqsave(&qhp->lock, flag);
+	if (t4_wq_in_error(&qhp->wq)) {
+		spin_unlock_irqrestore(&qhp->lock, flag);
+		complete_rq_drain_wr(qhp, wr);
+		return err;
+	}
+	num_wrs = t4_rq_avail(&qhp->wq);
+	if (num_wrs == 0) {
+		spin_unlock_irqrestore(&qhp->lock, flag);
+		return -ENOMEM;
+	}
+	while (wr) {
+		if (wr->num_sge > T4_MAX_RECV_SGE) {
+			err = -EINVAL;
+			*bad_wr = wr;
+			break;
+		}
+		wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
+					   qhp->wq.rq.wq_pidx *
+					   T4_EQ_ENTRY_SIZE);
+		if (num_wrs)
+			err = build_rdma_recv(qhp, wqe, wr, &len16);
+		else
+			err = -ENOMEM;
+		if (err) {
+			*bad_wr = wr;
+			break;
+		}
+
+		qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+
+		wqe->recv.opcode = FW_RI_RECV_WR;
+		wqe->recv.r1 = 0;
+		wqe->recv.wrid = qhp->wq.rq.pidx;
+		wqe->recv.r2[0] = 0;
+		wqe->recv.r2[1] = 0;
+		wqe->recv.r2[2] = 0;
+		wqe->recv.len16 = len16;
+		CTR3(KTR_IW_CXGBE, "%s cookie 0x%llx pidx %u", __func__,
+		     (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
+		t4_rq_produce(&qhp->wq, len16);
+		idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+		wr = wr->next;
+		num_wrs--;
+	}
+
+	t4_ring_rq_db(&qhp->wq, idx);
+	spin_unlock_irqrestore(&qhp->lock, flag);
+	return err;
+}
+
+int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
+{
+	return -ENOSYS;
+}
+
+static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
+				    u8 *ecode)
+{
+	int status;
+	int tagged;
+	int opcode;
+	int rqtype;
+	int send_inv;
+
+	if (!err_cqe) {
+		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+		*ecode = 0;
+		return;
+	}
+
+	status = CQE_STATUS(err_cqe);
+	opcode = CQE_OPCODE(err_cqe);
+	rqtype = RQ_TYPE(err_cqe);
+	send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
+		   (opcode == FW_RI_SEND_WITH_SE_INV);
+	tagged = (opcode == FW_RI_RDMA_WRITE) ||
+		 (rqtype && (opcode == FW_RI_READ_RESP));
+
+	switch (status) {
+	case T4_ERR_STAG:
+		if (send_inv) {
+			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+			*ecode = RDMAP_CANT_INV_STAG;
+		} else {
+			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+			*ecode = RDMAP_INV_STAG;
+		}
+		break;
+	case T4_ERR_PDID:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+		if ((opcode == FW_RI_SEND_WITH_INV) ||
+		    (opcode == FW_RI_SEND_WITH_SE_INV))
+			*ecode = RDMAP_CANT_INV_STAG;
+		else
+			*ecode = RDMAP_STAG_NOT_ASSOC;
+		break;
+	case T4_ERR_QPID:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+		*ecode = RDMAP_STAG_NOT_ASSOC;
+		break;
+	case T4_ERR_ACCESS:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+		*ecode = RDMAP_ACC_VIOL;
+		break;
+	case T4_ERR_WRAP:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+		*ecode = RDMAP_TO_WRAP;
+		break;
+	case T4_ERR_BOUND:
+		if (tagged) {
+			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+			*ecode = DDPT_BASE_BOUNDS;
+		} else {
+			*layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
+			*ecode = RDMAP_BASE_BOUNDS;
+		}
+		break;
+	case T4_ERR_INVALIDATE_SHARED_MR:
+	case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+		*ecode = RDMAP_CANT_INV_STAG;
+		break;
+	case T4_ERR_ECC:
+	case T4_ERR_ECC_PSTAG:
+	case T4_ERR_INTERNAL_ERR:
+		*layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
+		*ecode = 0;
+		break;
+	case T4_ERR_OUT_OF_RQE:
+		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+		*ecode = DDPU_INV_MSN_NOBUF;
+		break;
+	case T4_ERR_PBL_ADDR_BOUND:
+		*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+		*ecode = DDPT_BASE_BOUNDS;
+		break;
+	case T4_ERR_CRC:
+		*layer_type = LAYER_MPA|DDP_LLP;
+		*ecode = MPA_CRC_ERR;
+		break;
+	case T4_ERR_MARKER:
+		*layer_type = LAYER_MPA|DDP_LLP;
+		*ecode = MPA_MARKER_ERR;
+		break;
+	case T4_ERR_PDU_LEN_ERR:
+		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+		*ecode = DDPU_MSG_TOOBIG;
+		break;
+	case T4_ERR_DDP_VERSION:
+		if (tagged) {
+			*layer_type = LAYER_DDP|DDP_TAGGED_ERR;
+			*ecode = DDPT_INV_VERS;
+		} else {
+			*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+			*ecode = DDPU_INV_VERS;
+		}
+		break;
+	case T4_ERR_RDMA_VERSION:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+		*ecode = RDMAP_INV_VERS;
+		break;
+	case T4_ERR_OPCODE:
+		*layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
+		*ecode = RDMAP_INV_OPCODE;
+		break;
+	case T4_ERR_DDP_QUEUE_NUM:
+		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+		*ecode = DDPU_INV_QN;
+		break;
+	case T4_ERR_MSN:
+	case T4_ERR_MSN_GAP:
+	case T4_ERR_MSN_RANGE:
+	case T4_ERR_IRD_OVERFLOW:
+		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+		*ecode = DDPU_INV_MSN_RANGE;
+		break;
+	case T4_ERR_TBIT:
+		*layer_type = LAYER_DDP|DDP_LOCAL_CATA;
+		*ecode = 0;
+		break;
+	case T4_ERR_MO:
+		*layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
+		*ecode = DDPU_INV_MO;
+		break;
+	default:
+		*layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
+		*ecode = 0;
+		break;
+	}
+}
+
+static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
+			   gfp_t gfp)
+{
+	int ret;
+	struct fw_ri_wr *wqe;
+	struct terminate_message *term;
+	struct wrqe *wr;
+	struct socket *so = qhp->ep->com.so;
+        struct inpcb *inp = sotoinpcb(so);
+        struct tcpcb *tp = intotcpcb(inp);
+        struct toepcb *toep = tp->t_toe;
+
+	CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
+	    qhp->wq.sq.qid, qhp->ep->hwtid);
+
+	wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+	if (wr == NULL)
+		return;
+        wqe = wrtod(wr);
+
+	memset(wqe, 0, sizeof *wqe);
+	wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR));
+	wqe->flowid_len16 = cpu_to_be32(
+		V_FW_WR_FLOWID(qhp->ep->hwtid) |
+		V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+	wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
+	wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
+	term = (struct terminate_message *)wqe->u.terminate.termmsg;
+	if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
+		term->layer_etype = qhp->attr.layer_etype;
+		term->ecode = qhp->attr.ecode;
+	} else
+		build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
+	ret = creds(toep, inp, sizeof(*wqe));
+	if (ret) {
+		free_wrqe(wr);
+		return;
+	}
+	t4_wrq_tx(qhp->rhp->rdev.adap, wr);
+}
+
+/* Assumes qhp lock is held. */
+static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
+		       struct c4iw_cq *schp)
+{
+	int count;
+	int flushed;
+	unsigned long flag;
+
+	CTR4(KTR_IW_CXGBE, "%s qhp %p rchp %p schp %p", __func__, qhp, rchp,
+	    schp);
+
+	/* locking hierarchy: cq lock first, then qp lock. */
+	spin_lock_irqsave(&rchp->lock, flag);
+	spin_lock(&qhp->lock);
+	c4iw_flush_hw_cq(&rchp->cq);
+	c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
+	flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+	spin_unlock(&qhp->lock);
+	spin_unlock_irqrestore(&rchp->lock, flag);
+	if (flushed && rchp->ibcq.comp_handler) {
+		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+		spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+	}
+
+	/* locking hierarchy: cq lock first, then qp lock. */
+	spin_lock_irqsave(&schp->lock, flag);
+	spin_lock(&qhp->lock);
+	c4iw_flush_hw_cq(&schp->cq);
+	c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
+	flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+	spin_unlock(&qhp->lock);
+	spin_unlock_irqrestore(&schp->lock, flag);
+	if (flushed && schp->ibcq.comp_handler) {
+		spin_lock_irqsave(&schp->comp_handler_lock, flag);
+		(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+		spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+	}
+}
+
+static void flush_qp(struct c4iw_qp *qhp)
+{
+	struct c4iw_cq *rchp, *schp;
+	unsigned long flag;
+
+	rchp = get_chp(qhp->rhp, qhp->attr.rcq);
+	schp = get_chp(qhp->rhp, qhp->attr.scq);
+
+	if (qhp->ibqp.uobject) {
+		t4_set_wq_in_error(&qhp->wq);
+		t4_set_cq_in_error(&rchp->cq);
+		spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+		(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+		spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+		if (schp != rchp) {
+			t4_set_cq_in_error(&schp->cq);
+			spin_lock_irqsave(&schp->comp_handler_lock, flag);
+			(*schp->ibcq.comp_handler)(&schp->ibcq,
+					schp->ibcq.cq_context);
+			spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+		}
+		return;
+	}
+	__flush_qp(qhp, rchp, schp);
+}
+
+static int
+rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep)
+{
+	struct c4iw_rdev *rdev = &rhp->rdev;
+	struct adapter *sc = rdev->adap;
+	struct fw_ri_wr *wqe;
+	int ret;
+	struct wrqe *wr;
+	struct socket *so = ep->com.so;
+        struct inpcb *inp = sotoinpcb(so);
+        struct tcpcb *tp = intotcpcb(inp);
+        struct toepcb *toep = tp->t_toe;
+
+	KASSERT(rhp == qhp->rhp && ep == qhp->ep, ("%s: EDOOFUS", __func__));
+
+	CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
+	    qhp->wq.sq.qid, ep->hwtid);
+
+	wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+	if (wr == NULL)
+		return (0);
+	wqe = wrtod(wr);
+
+	memset(wqe, 0, sizeof *wqe);
+
+	wqe->op_compl = cpu_to_be32(V_FW_WR_OP(FW_RI_WR) | F_FW_WR_COMPL);
+	wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
+	    V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+	wqe->cookie = (unsigned long) &ep->com.wr_wait;
+	wqe->u.fini.type = FW_RI_TYPE_FINI;
+
+	c4iw_init_wr_wait(&ep->com.wr_wait);
+
+	ret = creds(toep, inp, sizeof(*wqe));
+	if (ret) {
+		free_wrqe(wr);
+		return ret;
+	}
+	t4_wrq_tx(sc, wr);
+
+	ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
+	    qhp->wq.sq.qid, __func__);
+	return ret;
+}
+
+static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
+{
+	CTR2(KTR_IW_CXGBE, "%s p2p_type = %d", __func__, p2p_type);
+	memset(&init->u, 0, sizeof init->u);
+	switch (p2p_type) {
+	case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
+		init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
+		init->u.write.stag_sink = cpu_to_be32(1);
+		init->u.write.to_sink = cpu_to_be64(1);
+		init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
+		init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
+						   sizeof(struct fw_ri_immd),
+						   16);
+		break;
+	case FW_RI_INIT_P2PTYPE_READ_REQ:
+		init->u.write.opcode = FW_RI_RDMA_READ_WR;
+		init->u.read.stag_src = cpu_to_be32(1);
+		init->u.read.to_src_lo = cpu_to_be32(1);
+		init->u.read.stag_sink = cpu_to_be32(1);
+		init->u.read.to_sink_lo = cpu_to_be32(1);
+		init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
+		break;
+	}
+}
+
+static int
+creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
+{
+	struct ofld_tx_sdesc *txsd;
+
+	CTR3(KTR_IW_CXGBE, "%s:creB  %p %u", __func__, toep , wrsize);
+	INP_WLOCK(inp);
+	if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) {
+		INP_WUNLOCK(inp);
+		return (EINVAL);
+	}
+	txsd = &toep->txsd[toep->txsd_pidx];
+	txsd->tx_credits = howmany(wrsize, 16);
+	txsd->plen = 0;
+	KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
+			("%s: not enough credits (%d)", __func__, toep->tx_credits));
+	toep->tx_credits -= txsd->tx_credits;
+	if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+		toep->txsd_pidx = 0;
+	toep->txsd_avail--;
+	INP_WUNLOCK(inp);
+	CTR5(KTR_IW_CXGBE, "%s:creE  %p %u %u %u", __func__, toep ,
+	    txsd->tx_credits, toep->tx_credits, toep->txsd_pidx);
+	return (0);
+}
+
+static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
+{
+	struct fw_ri_wr *wqe;
+	int ret;
+	struct wrqe *wr;
+	struct c4iw_ep *ep = qhp->ep;
+	struct c4iw_rdev *rdev = &qhp->rhp->rdev;
+	struct adapter *sc = rdev->adap;
+	struct socket *so = ep->com.so;
+        struct inpcb *inp = sotoinpcb(so);
+        struct tcpcb *tp = intotcpcb(inp);
+        struct toepcb *toep = tp->t_toe;
+
+	CTR4(KTR_IW_CXGBE, "%s qhp %p qid 0x%x tid %u", __func__, qhp,
+	    qhp->wq.sq.qid, ep->hwtid);
+
+	wr = alloc_wrqe(sizeof(*wqe), toep->ofld_txq);
+	if (wr == NULL)
+		return (0);
+	wqe = wrtod(wr);
+
+	memset(wqe, 0, sizeof *wqe);
+
+	wqe->op_compl = cpu_to_be32(
+		V_FW_WR_OP(FW_RI_WR) |
+		F_FW_WR_COMPL);
+	wqe->flowid_len16 = cpu_to_be32(V_FW_WR_FLOWID(ep->hwtid) |
+	    V_FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16)));
+
+	wqe->cookie = (unsigned long) &ep->com.wr_wait;
+
+	wqe->u.init.type = FW_RI_TYPE_INIT;
+	wqe->u.init.mpareqbit_p2ptype =
+		V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
+		V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
+	wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
+	if (qhp->attr.mpa_attr.recv_marker_enabled)
+		wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
+	if (qhp->attr.mpa_attr.xmit_marker_enabled)
+		wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
+	if (qhp->attr.mpa_attr.crc_enabled)
+		wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
+
+	wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
+			    FW_RI_QP_RDMA_WRITE_ENABLE |
+			    FW_RI_QP_BIND_ENABLE;
+	if (!qhp->ibqp.uobject)
+		wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
+				     FW_RI_QP_STAG0_ENABLE;
+	wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
+	wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
+	wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
+	wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
+	wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
+	wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
+	wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
+	wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
+	wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
+	wqe->u.init.iss = cpu_to_be32(ep->snd_seq);
+	wqe->u.init.irs = cpu_to_be32(ep->rcv_seq);
+	wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
+	wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
+	    sc->vres.rq.start);
+	if (qhp->attr.mpa_attr.initiator)
+		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
+
+	c4iw_init_wr_wait(&ep->com.wr_wait);
+
+	ret = creds(toep, inp, sizeof(*wqe));
+	if (ret) {
+		free_wrqe(wr);
+		return ret;
+	}
+	t4_wrq_tx(sc, wr);
+
+	ret = c4iw_wait_for_reply(rdev, &ep->com.wr_wait, ep->hwtid,
+	    qhp->wq.sq.qid, __func__);
+
+	toep->ulp_mode = ULP_MODE_RDMA;
+
+	return ret;
+}
+
+int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
+		   enum c4iw_qp_attr_mask mask,
+		   struct c4iw_qp_attributes *attrs,
+		   int internal)
+{
+	int ret = 0;
+	struct c4iw_qp_attributes newattr = qhp->attr;
+	int disconnect = 0;
+	int terminate = 0;
+	int abort = 0;
+	int free = 0;
+	struct c4iw_ep *ep = NULL;
+
+	CTR5(KTR_IW_CXGBE, "%s qhp %p sqid 0x%x rqid 0x%x ep %p", __func__, qhp,
+	    qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep);
+	CTR3(KTR_IW_CXGBE, "%s state %d -> %d", __func__, qhp->attr.state,
+	    (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
+
+	mutex_lock(&qhp->mutex);
+
+	/* Process attr changes if in IDLE */
+	if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
+		if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
+			ret = -EIO;
+			goto out;
+		}
+		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
+			newattr.enable_rdma_read = attrs->enable_rdma_read;
+		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
+			newattr.enable_rdma_write = attrs->enable_rdma_write;
+		if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
+			newattr.enable_bind = attrs->enable_bind;
+		if (mask & C4IW_QP_ATTR_MAX_ORD) {
+			if (attrs->max_ord > c4iw_max_read_depth) {
+				ret = -EINVAL;
+				goto out;
+			}
+			newattr.max_ord = attrs->max_ord;
+		}
+		if (mask & C4IW_QP_ATTR_MAX_IRD) {
+			if (attrs->max_ird > c4iw_max_read_depth) {
+				ret = -EINVAL;
+				goto out;
+			}
+			newattr.max_ird = attrs->max_ird;
+		}
+		qhp->attr = newattr;
+	}
+
+	if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
+		goto out;
+	if (qhp->attr.state == attrs->next_state)
+		goto out;
+
+	switch (qhp->attr.state) {
+	case C4IW_QP_STATE_IDLE:
+		switch (attrs->next_state) {
+		case C4IW_QP_STATE_RTS:
+			if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
+				ret = -EINVAL;
+				goto out;
+			}
+			qhp->attr.mpa_attr = attrs->mpa_attr;
+			qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
+			qhp->ep = qhp->attr.llp_stream_handle;
+			set_state(qhp, C4IW_QP_STATE_RTS);
+
+			/*
+			 * Ref the endpoint here and deref when we
+			 * disassociate the endpoint from the QP.  This
+			 * happens in CLOSING->IDLE transition or *->ERROR
+			 * transition.
+			 */
+			c4iw_get_ep(&qhp->ep->com);
+			ret = rdma_init(rhp, qhp);
+			if (ret)
+				goto err;
+			break;
+		case C4IW_QP_STATE_ERROR:
+			set_state(qhp, C4IW_QP_STATE_ERROR);
+			flush_qp(qhp);
+			break;
+		default:
+			ret = -EINVAL;
+			goto out;
+		}
+		break;
+	case C4IW_QP_STATE_RTS:
+		switch (attrs->next_state) {
+		case C4IW_QP_STATE_CLOSING:
+			BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+			set_state(qhp, C4IW_QP_STATE_CLOSING);
+			ep = qhp->ep;
+			if (!internal) {
+				abort = 0;
+				disconnect = 1;
+				c4iw_get_ep(&qhp->ep->com);
+			}
+			if (qhp->ibqp.uobject)
+				t4_set_wq_in_error(&qhp->wq);
+			ret = rdma_fini(rhp, qhp, ep);
+			if (ret)
+				goto err;
+			break;
+		case C4IW_QP_STATE_TERMINATE:
+			set_state(qhp, C4IW_QP_STATE_TERMINATE);
+			qhp->attr.layer_etype = attrs->layer_etype;
+			qhp->attr.ecode = attrs->ecode;
+			if (qhp->ibqp.uobject)
+				t4_set_wq_in_error(&qhp->wq);
+			ep = qhp->ep;
+			if (!internal)
+				terminate = 1;
+			disconnect = 1;
+			c4iw_get_ep(&qhp->ep->com);
+			break;
+		case C4IW_QP_STATE_ERROR:
+			set_state(qhp, C4IW_QP_STATE_ERROR);
+			if (qhp->ibqp.uobject)
+				t4_set_wq_in_error(&qhp->wq);
+			if (!internal) {
+				abort = 1;
+				disconnect = 1;
+				ep = qhp->ep;
+				c4iw_get_ep(&qhp->ep->com);
+			}
+			goto err;
+			break;
+		default:
+			ret = -EINVAL;
+			goto out;
+		}
+		break;
+	case C4IW_QP_STATE_CLOSING:
+
+		/*
+		 * Allow kernel users to move to ERROR for qp draining.
+		 */
+		if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+				  C4IW_QP_STATE_ERROR)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		switch (attrs->next_state) {
+		case C4IW_QP_STATE_IDLE:
+			flush_qp(qhp);
+			set_state(qhp, C4IW_QP_STATE_IDLE);
+			qhp->attr.llp_stream_handle = NULL;
+			c4iw_put_ep(&qhp->ep->com);
+			qhp->ep = NULL;
+			wake_up(&qhp->wait);
+			break;
+		case C4IW_QP_STATE_ERROR:
+			goto err;
+		default:
+			ret = -EINVAL;
+			goto err;
+		}
+		break;
+	case C4IW_QP_STATE_ERROR:
+		if (attrs->next_state != C4IW_QP_STATE_IDLE) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		set_state(qhp, C4IW_QP_STATE_IDLE);
+		break;
+	case C4IW_QP_STATE_TERMINATE:
+		if (!internal) {
+			ret = -EINVAL;
+			goto out;
+		}
+		goto err;
+		break;
+	default:
+		printf("%s in a bad state %d\n",
+		       __func__, qhp->attr.state);
+		ret = -EINVAL;
+		goto err;
+		break;
+	}
+	goto out;
+err:
+	CTR3(KTR_IW_CXGBE, "%s disassociating ep %p qpid 0x%x", __func__,
+	    qhp->ep, qhp->wq.sq.qid);
+
+	/* disassociate the LLP connection */
+	qhp->attr.llp_stream_handle = NULL;
+	if (!ep)
+		ep = qhp->ep;
+	qhp->ep = NULL;
+	set_state(qhp, C4IW_QP_STATE_ERROR);
+	free = 1;
+	abort = 1;
+	BUG_ON(!ep);
+	flush_qp(qhp);
+	wake_up(&qhp->wait);
+out:
+	mutex_unlock(&qhp->mutex);
+
+	if (terminate)
+		post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
+
+	/*
+	 * If disconnect is 1, then we need to initiate a disconnect
+	 * on the EP.  This can be a normal close (RTS->CLOSING) or
+	 * an abnormal close (RTS/CLOSING->ERROR).
+	 */
+	if (disconnect) {
+		c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
+							 GFP_KERNEL);
+		c4iw_put_ep(&ep->com);
+	}
+
+	/*
+	 * If free is 1, then we've disassociated the EP from the QP
+	 * and we need to dereference the EP.
+	 */
+	if (free)
+		c4iw_put_ep(&ep->com);
+	CTR2(KTR_IW_CXGBE, "%s exit state %d", __func__, qhp->attr.state);
+	return ret;
+}
+
+int c4iw_destroy_qp(struct ib_qp *ib_qp)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_qp *qhp;
+	struct c4iw_qp_attributes attrs;
+	struct c4iw_ucontext *ucontext;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ib_qp);
+	qhp = to_c4iw_qp(ib_qp);
+	rhp = qhp->rhp;
+
+	attrs.next_state = C4IW_QP_STATE_ERROR;
+	if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
+		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+	else
+		c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+	wait_event(qhp->wait, !qhp->ep);
+
+	spin_lock_irq(&rhp->lock);
+	remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+	spin_unlock_irq(&rhp->lock);
+	atomic_dec(&qhp->refcnt);
+	wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+
+	ucontext = ib_qp->uobject ?
+		   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
+	destroy_qp(&rhp->rdev, &qhp->wq,
+		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+	CTR3(KTR_IW_CXGBE, "%s ib_qp %p qpid 0x%0x", __func__, ib_qp,
+	    qhp->wq.sq.qid);
+	kfree(qhp);
+	return 0;
+}
+
+struct ib_qp *
+c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
+    struct ib_udata *udata)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_qp *qhp;
+	struct c4iw_pd *php;
+	struct c4iw_cq *schp;
+	struct c4iw_cq *rchp;
+	struct c4iw_create_qp_resp uresp;
+	int sqsize, rqsize;
+	struct c4iw_ucontext *ucontext;
+	int ret, spg_ndesc;
+	struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_pd %p", __func__, pd);
+
+	if (attrs->qp_type != IB_QPT_RC)
+		return ERR_PTR(-EINVAL);
+
+	php = to_c4iw_pd(pd);
+	rhp = php->rhp;
+	schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
+	rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
+	if (!schp || !rchp)
+		return ERR_PTR(-EINVAL);
+
+	if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
+		return ERR_PTR(-EINVAL);
+
+	spg_ndesc = rhp->rdev.adap->params.sge.spg_len / EQ_ESIZE;
+	rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
+	if (rqsize > T4_MAX_RQ_SIZE(spg_ndesc))
+		return ERR_PTR(-E2BIG);
+
+	sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
+	if (sqsize > T4_MAX_SQ_SIZE(spg_ndesc))
+		return ERR_PTR(-E2BIG);
+
+	ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
+
+
+	qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
+	if (!qhp)
+		return ERR_PTR(-ENOMEM);
+	qhp->wq.sq.size = sqsize;
+	qhp->wq.sq.memsize = (sqsize + spg_ndesc) * sizeof *qhp->wq.sq.queue +
+	    16 * sizeof(__be64);
+	qhp->wq.rq.size = rqsize;
+	qhp->wq.rq.memsize = (rqsize + spg_ndesc) * sizeof *qhp->wq.rq.queue;
+
+	if (ucontext) {
+		qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
+		qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
+	}
+
+	CTR5(KTR_IW_CXGBE, "%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu",
+	    __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
+
+	ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
+			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+	if (ret)
+		goto err1;
+
+	attrs->cap.max_recv_wr = rqsize - 1;
+	attrs->cap.max_send_wr = sqsize - 1;
+	attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
+
+	qhp->rhp = rhp;
+	qhp->attr.pd = php->pdid;
+	qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
+	qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
+	qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
+	qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
+	qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
+	qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
+	qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
+	qhp->attr.state = C4IW_QP_STATE_IDLE;
+	qhp->attr.next_state = C4IW_QP_STATE_IDLE;
+	qhp->attr.enable_rdma_read = 1;
+	qhp->attr.enable_rdma_write = 1;
+	qhp->attr.enable_bind = 1;
+	qhp->attr.max_ord = 1;
+	qhp->attr.max_ird = 1;
+	qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
+	spin_lock_init(&qhp->lock);
+	mutex_init(&qhp->mutex);
+	init_waitqueue_head(&qhp->wait);
+	atomic_set(&qhp->refcnt, 1);
+
+	spin_lock_irq(&rhp->lock);
+	ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
+	spin_unlock_irq(&rhp->lock);
+	if (ret)
+		goto err2;
+
+	if (udata) {
+		mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
+		if (!mm1) {
+			ret = -ENOMEM;
+			goto err3;
+		}
+		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
+		if (!mm2) {
+			ret = -ENOMEM;
+			goto err4;
+		}
+		mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
+		if (!mm3) {
+			ret = -ENOMEM;
+			goto err5;
+		}
+		mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
+		if (!mm4) {
+			ret = -ENOMEM;
+			goto err6;
+		}
+		uresp.flags = 0;
+		uresp.qid_mask = rhp->rdev.qpmask;
+		uresp.sqid = qhp->wq.sq.qid;
+		uresp.sq_size = qhp->wq.sq.size;
+		uresp.sq_memsize = qhp->wq.sq.memsize;
+		uresp.rqid = qhp->wq.rq.qid;
+		uresp.rq_size = qhp->wq.rq.size;
+		uresp.rq_memsize = qhp->wq.rq.memsize;
+		spin_lock(&ucontext->mmap_lock);
+		uresp.sq_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		uresp.rq_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		uresp.sq_db_gts_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		uresp.rq_db_gts_key = ucontext->key;
+		ucontext->key += PAGE_SIZE;
+		spin_unlock(&ucontext->mmap_lock);
+		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
+		if (ret)
+			goto err7;
+		mm1->key = uresp.sq_key;
+		mm1->addr = qhp->wq.sq.phys_addr;
+		mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+		CTR4(KTR_IW_CXGBE, "%s mm1 %x, %x, %d", __func__, mm1->key,
+		    mm1->addr, mm1->len);
+		insert_mmap(ucontext, mm1);
+		mm2->key = uresp.rq_key;
+		mm2->addr = vtophys(qhp->wq.rq.queue);
+		mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+		CTR4(KTR_IW_CXGBE, "%s mm2 %x, %x, %d", __func__, mm2->key,
+		    mm2->addr, mm2->len);
+		insert_mmap(ucontext, mm2);
+		mm3->key = uresp.sq_db_gts_key;
+		mm3->addr = qhp->wq.sq.udb;
+		mm3->len = PAGE_SIZE;
+		CTR4(KTR_IW_CXGBE, "%s mm3 %x, %x, %d", __func__, mm3->key,
+		    mm3->addr, mm3->len);
+		insert_mmap(ucontext, mm3);
+		mm4->key = uresp.rq_db_gts_key;
+		mm4->addr = qhp->wq.rq.udb;
+		mm4->len = PAGE_SIZE;
+		CTR4(KTR_IW_CXGBE, "%s mm4 %x, %x, %d", __func__, mm4->key,
+		    mm4->addr, mm4->len);
+		insert_mmap(ucontext, mm4);
+	}
+	qhp->ibqp.qp_num = qhp->wq.sq.qid;
+	init_timer(&(qhp->timer));
+	CTR5(KTR_IW_CXGBE,
+	    "%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x",
+	    __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
+	    qhp->wq.sq.qid);
+	return &qhp->ibqp;
+err7:
+	kfree(mm4);
+err6:
+	kfree(mm3);
+err5:
+	kfree(mm2);
+err4:
+	kfree(mm1);
+err3:
+	remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
+err2:
+	destroy_qp(&rhp->rdev, &qhp->wq,
+		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+err1:
+	kfree(qhp);
+	return ERR_PTR(ret);
+}
+
+int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		      int attr_mask, struct ib_udata *udata)
+{
+	struct c4iw_dev *rhp;
+	struct c4iw_qp *qhp;
+	enum c4iw_qp_attr_mask mask = 0;
+	struct c4iw_qp_attributes attrs;
+
+	CTR2(KTR_IW_CXGBE, "%s ib_qp %p", __func__, ibqp);
+
+	/* iwarp does not support the RTR state */
+	if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
+		attr_mask &= ~IB_QP_STATE;
+
+	/* Make sure we still have something left to do */
+	if (!attr_mask)
+		return 0;
+
+	memset(&attrs, 0, sizeof attrs);
+	qhp = to_c4iw_qp(ibqp);
+	rhp = qhp->rhp;
+
+	attrs.next_state = c4iw_convert_state(attr->qp_state);
+	attrs.enable_rdma_read = (attr->qp_access_flags &
+			       IB_ACCESS_REMOTE_READ) ?  1 : 0;
+	attrs.enable_rdma_write = (attr->qp_access_flags &
+				IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+	attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+
+	mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
+	mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
+			(C4IW_QP_ATTR_ENABLE_RDMA_READ |
+			 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
+			 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
+
+	return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
+}
+
+struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
+{
+	CTR3(KTR_IW_CXGBE, "%s ib_dev %p qpn 0x%x", __func__, dev, qpn);
+	return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
+}
+
+int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+		     int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
+
+	memset(attr, 0, sizeof *attr);
+	memset(init_attr, 0, sizeof *init_attr);
+	attr->qp_state = to_ib_qp_state(qhp->attr.state);
+	init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
+	init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
+	init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+	init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+	init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
+	init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
+	return 0;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/qp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/resource.c
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/resource.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/resource.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,355 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+/* Crude resource management */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/resource.c 309378 2016-12-01 23:38:52Z jhb $");
+
+#include "opt_inet.h"
+
+#ifdef TCP_OFFLOAD
+#include <linux/spinlock.h>
+#include "iw_cxgbe.h"
+
+static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
+{
+	u32 i;
+
+	if (c4iw_id_table_alloc(&rdev->resource.qid_table,
+				rdev->adap->vres.qp.start,
+				rdev->adap->vres.qp.size,
+				rdev->adap->vres.qp.size, 0)) {
+		printf("%s: return ENOMEM\n", __func__);
+		return -ENOMEM;
+	}
+
+	for (i = rdev->adap->vres.qp.start;
+		i < rdev->adap->vres.qp.start + rdev->adap->vres.qp.size; i++)
+		if (!(i & rdev->qpmask))
+			c4iw_id_free(&rdev->resource.qid_table, i);
+	return 0;
+}
+
+/* nr_* must be power of 2 */
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+{
+	int err = 0;
+	err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
+					C4IW_ID_TABLE_F_RANDOM);
+	if (err)
+		goto tpt_err;
+	err = c4iw_init_qid_table(rdev);
+	if (err)
+		goto qid_err;
+	err = c4iw_id_table_alloc(&rdev->resource.pdid_table, 0,
+					nr_pdid, 1, 0);
+	if (err)
+		goto pdid_err;
+	return 0;
+ pdid_err:
+	c4iw_id_table_free(&rdev->resource.qid_table);
+ qid_err:
+	c4iw_id_table_free(&rdev->resource.tpt_table);
+ tpt_err:
+	return -ENOMEM;
+}
+
+/*
+ * returns 0 if no resource available
+ */
+u32 c4iw_get_resource(struct c4iw_id_table *id_table)
+{
+	u32 entry;
+	entry = c4iw_id_alloc(id_table);
+	if (entry == (u32)(-1)) {
+		return 0;
+	}
+	return entry;
+}
+
+void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
+{
+	CTR2(KTR_IW_CXGBE, "%s entry 0x%x", __func__, entry);
+	c4iw_id_free(id_table, entry);
+}
+
+u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+	struct c4iw_qid_list *entry;
+	u32 qid;
+	int i;
+
+	mutex_lock(&uctx->lock);
+	if (!list_empty(&uctx->cqids)) {
+		entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
+				   entry);
+		list_del(&entry->entry);
+		qid = entry->qid;
+		kfree(entry);
+	} else {
+		qid = c4iw_get_resource(&rdev->resource.qid_table);
+		if (!qid)
+			goto out;
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.qid.cur += rdev->qpmask + 1;
+		mutex_unlock(&rdev->stats.lock);
+		for (i = qid+1; i & rdev->qpmask; i++) {
+			entry = kmalloc(sizeof *entry, GFP_KERNEL);
+			if (!entry)
+				goto out;
+			entry->qid = i;
+			list_add_tail(&entry->entry, &uctx->cqids);
+		}
+
+		/*
+		 * now put the same ids on the qp list since they all
+		 * map to the same db/gts page.
+		 */
+		entry = kmalloc(sizeof *entry, GFP_KERNEL);
+		if (!entry)
+			goto out;
+		entry->qid = qid;
+		list_add_tail(&entry->entry, &uctx->qpids);
+		for (i = qid+1; i & rdev->qpmask; i++) {
+			entry = kmalloc(sizeof *entry, GFP_KERNEL);
+			if (!entry)
+				goto out;
+			entry->qid = i;
+			list_add_tail(&entry->entry, &uctx->qpids);
+		}
+	}
+out:
+	mutex_unlock(&uctx->lock);
+	CTR2(KTR_IW_CXGBE, "%s: qid 0x%x", __func__, qid);
+	mutex_lock(&rdev->stats.lock);
+	if (rdev->stats.qid.cur > rdev->stats.qid.max)
+		rdev->stats.qid.max = rdev->stats.qid.cur;
+	mutex_unlock(&rdev->stats.lock);
+	return qid;
+}
+
+void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
+		   struct c4iw_dev_ucontext *uctx)
+{
+	struct c4iw_qid_list *entry;
+
+	entry = kmalloc(sizeof *entry, GFP_KERNEL);
+	if (!entry)
+		return;
+	CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
+	entry->qid = qid;
+	mutex_lock(&uctx->lock);
+	list_add_tail(&entry->entry, &uctx->cqids);
+	mutex_unlock(&uctx->lock);
+}
+
+u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
+{
+	struct c4iw_qid_list *entry;
+	u32 qid;
+	int i;
+
+	mutex_lock(&uctx->lock);
+	if (!list_empty(&uctx->qpids)) {
+		entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
+				   entry);
+		list_del(&entry->entry);
+		qid = entry->qid;
+		kfree(entry);
+	} else {
+		qid = c4iw_get_resource(&rdev->resource.qid_table);
+		if (!qid)
+			goto out;
+		mutex_lock(&rdev->stats.lock);
+		rdev->stats.qid.cur += rdev->qpmask + 1;
+		mutex_unlock(&rdev->stats.lock);
+		for (i = qid+1; i & rdev->qpmask; i++) {
+			entry = kmalloc(sizeof *entry, GFP_KERNEL);
+			if (!entry)
+				goto out;
+			entry->qid = i;
+			list_add_tail(&entry->entry, &uctx->qpids);
+		}
+
+		/*
+		 * now put the same ids on the cq list since they all
+		 * map to the same db/gts page.
+		 */
+		entry = kmalloc(sizeof *entry, GFP_KERNEL);
+		if (!entry)
+			goto out;
+		entry->qid = qid;
+		list_add_tail(&entry->entry, &uctx->cqids);
+		for (i = qid; i & rdev->qpmask; i++) {
+			entry = kmalloc(sizeof *entry, GFP_KERNEL);
+			if (!entry)
+				goto out;
+			entry->qid = i;
+			list_add_tail(&entry->entry, &uctx->cqids);
+		}
+	}
+out:
+	mutex_unlock(&uctx->lock);
+	CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
+	mutex_lock(&rdev->stats.lock);
+	if (rdev->stats.qid.cur > rdev->stats.qid.max)
+		rdev->stats.qid.max = rdev->stats.qid.cur;
+	mutex_unlock(&rdev->stats.lock);
+	return qid;
+}
+
+void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
+		   struct c4iw_dev_ucontext *uctx)
+{
+	struct c4iw_qid_list *entry;
+
+	entry = kmalloc(sizeof *entry, GFP_KERNEL);
+	if (!entry)
+		return;
+	CTR2(KTR_IW_CXGBE, "%s qid 0x%x", __func__, qid);
+	entry->qid = qid;
+	mutex_lock(&uctx->lock);
+	list_add_tail(&entry->entry, &uctx->qpids);
+	mutex_unlock(&uctx->lock);
+}
+
+void c4iw_destroy_resource(struct c4iw_resource *rscp)
+{
+	c4iw_id_table_free(&rscp->tpt_table);
+	c4iw_id_table_free(&rscp->qid_table);
+	c4iw_id_table_free(&rscp->pdid_table);
+}
+
+/* PBL Memory Manager. */
+
+#define MIN_PBL_SHIFT 5			/* 32B == min PBL size (4 entries) */
+
+u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+	unsigned long addr;
+
+	vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+			4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+			M_FIRSTFIT|M_NOWAIT, &addr);
+	CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+	mutex_lock(&rdev->stats.lock);
+	if (addr) {
+		rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
+		if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
+			rdev->stats.pbl.max = rdev->stats.pbl.cur;
+	} else
+		rdev->stats.pbl.fail++;
+	mutex_unlock(&rdev->stats.lock);
+	return (u32)addr;
+}
+
+void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+	CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
+	mutex_lock(&rdev->stats.lock);
+	rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
+	mutex_unlock(&rdev->stats.lock);
+	vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
+}
+
+int c4iw_pblpool_create(struct c4iw_rdev *rdev)
+{
+	rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
+					rdev->adap->vres.pbl.start,
+					rdev->adap->vres.pbl.size,
+					1, 0, M_FIRSTFIT| M_NOWAIT);
+	if (!rdev->pbl_arena)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
+{
+	vmem_destroy(rdev->pbl_arena);
+}
+
+/* RQT Memory Manager. */
+
+#define MIN_RQT_SHIFT 10	/* 1KB == min RQT size (16 entries) */
+
+u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
+{
+	unsigned long addr;
+
+	vmem_xalloc(rdev->rqt_arena,
+			roundup((size << 6),(1 << MIN_RQT_SHIFT)),
+			4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
+			M_FIRSTFIT|M_NOWAIT, &addr);
+	CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr,
+	    size << 6);
+	if (!addr)
+		printf("%s: Out of RQT memory\n",
+		       device_get_nameunit(rdev->adap->dev));
+	mutex_lock(&rdev->stats.lock);
+	if (addr) {
+		rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
+		if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
+			rdev->stats.rqt.max = rdev->stats.rqt.cur;
+	} else
+		rdev->stats.rqt.fail++;
+	mutex_unlock(&rdev->stats.lock);
+	return (u32)addr;
+}
+
+void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
+{
+	CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size << 6);
+	mutex_lock(&rdev->stats.lock);
+	rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
+	mutex_unlock(&rdev->stats.lock);
+	vmem_xfree(rdev->rqt_arena, addr,
+		       roundup((size << 6),(1 << MIN_RQT_SHIFT)));
+}
+
+int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
+{
+	rdev->rqt_arena = vmem_create("RQT_MEM_POOL",
+					rdev->adap->vres.rq.start,
+					rdev->adap->vres.rq.size,
+					1, 0, M_FIRSTFIT| M_NOWAIT);
+	if (!rdev->rqt_arena)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
+{
+	vmem_destroy(rdev->rqt_arena);
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/resource.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/t4.h
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/t4.h	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/t4.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,580 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/t4.h 318799 2017-05-24 18:16:20Z np $
+ */
+#ifndef __T4_H__
+#define __T4_H__
+
+/*
+ * Fixme: Adding missing defines
+ */
+#define SGE_PF_KDOORBELL 0x0
+#define  QID_MASK    0xffff8000U
+#define  QID_SHIFT   15
+#define  QID(x)      ((x) << QID_SHIFT)
+#define  DBPRIO      0x00004000U
+#define  PIDX_MASK   0x00003fffU
+#define  PIDX_SHIFT  0
+#define  PIDX(x)     ((x) << PIDX_SHIFT)
+
+#define SGE_PF_GTS 0x4
+#define  INGRESSQID_MASK   0xffff0000U
+#define  INGRESSQID_SHIFT  16
+#define  INGRESSQID(x)     ((x) << INGRESSQID_SHIFT)
+#define  TIMERREG_MASK     0x0000e000U
+#define  TIMERREG_SHIFT    13
+#define  TIMERREG(x)       ((x) << TIMERREG_SHIFT)
+#define  SEINTARM_MASK     0x00001000U
+#define  SEINTARM_SHIFT    12
+#define  SEINTARM(x)       ((x) << SEINTARM_SHIFT)
+#define  CIDXINC_MASK      0x00000fffU
+#define  CIDXINC_SHIFT     0
+#define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
+
+#define T4_MAX_NUM_PD 65536
+#define T4_MAX_EQ_SIZE 65520
+#define T4_MAX_IQ_SIZE 65520
+#define T4_MAX_RQ_SIZE(n) (8192 - (n) - 1)
+#define T4_MAX_SQ_SIZE(n) (T4_MAX_EQ_SIZE - (n) - 1)
+#define T4_MAX_QP_DEPTH(n) (T4_MAX_RQ_SIZE(n))
+#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 2)
+#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_PAGESIZE_MASK 0xffffffff000  /* 4KB-8TB */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_FW_MAJ 0
+#define A_PCIE_MA_SYNC 0x30b4
+
+struct t4_status_page {
+	__be32 rsvd1;	/* flit 0 - hw owns */
+	__be16 rsvd2;
+	__be16 qid;
+	__be16 cidx;
+	__be16 pidx;
+	u8 qp_err;	/* flit 1 - sw owns */
+	u8 db_off;
+	u8 pad;
+	u16 host_wq_pidx;
+	u16 host_cidx;
+	u16 host_pidx;
+};
+
+#define T4_EQ_ENTRY_SIZE 64
+
+#define T4_SQ_NUM_SLOTS 5
+#define T4_SQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_SQ_NUM_SLOTS)
+#define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+			sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
+			sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
+			sizeof(struct fw_ri_rdma_write_wr) - \
+			sizeof(struct fw_ri_immd)))
+#define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
+			sizeof(struct fw_ri_rdma_write_wr) - \
+			sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
+#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
+			sizeof(struct fw_ri_immd)) & ~31UL)
+#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64))
+
+#define T4_RQ_NUM_SLOTS 2
+#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
+#define T4_MAX_RECV_SGE 4
+
+union t4_wr {
+	struct fw_ri_res_wr res;
+	struct fw_ri_wr ri;
+	struct fw_ri_rdma_write_wr write;
+	struct fw_ri_send_wr send;
+	struct fw_ri_rdma_read_wr read;
+	struct fw_ri_bind_mw_wr bind;
+	struct fw_ri_fr_nsmr_wr fr;
+	struct fw_ri_inv_lstag_wr inv;
+	struct t4_status_page status;
+	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
+};
+
+union t4_recv_wr {
+	struct fw_ri_recv_wr recv;
+	struct t4_status_page status;
+	__be64 flits[T4_EQ_ENTRY_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
+};
+
+static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
+			       enum fw_wr_opcodes opcode, u8 flags, u8 len16)
+{
+	wqe->send.opcode = (u8)opcode;
+	wqe->send.flags = flags;
+	wqe->send.wrid = wrid;
+	wqe->send.r1[0] = 0;
+	wqe->send.r1[1] = 0;
+	wqe->send.r1[2] = 0;
+	wqe->send.len16 = len16;
+}
+
+/* CQE/AE status codes */
+#define T4_ERR_SUCCESS                     0x0
+#define T4_ERR_STAG                        0x1	/* STAG invalid: either the */
+						/* STAG is offlimt, being 0, */
+						/* or STAG_key mismatch */
+#define T4_ERR_PDID                        0x2	/* PDID mismatch */
+#define T4_ERR_QPID                        0x3	/* QPID mismatch */
+#define T4_ERR_ACCESS                      0x4	/* Invalid access right */
+#define T4_ERR_WRAP                        0x5	/* Wrap error */
+#define T4_ERR_BOUND                       0x6	/* base and bounds voilation */
+#define T4_ERR_INVALIDATE_SHARED_MR        0x7	/* attempt to invalidate a  */
+						/* shared memory region */
+#define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8	/* attempt to invalidate a  */
+						/* shared memory region */
+#define T4_ERR_ECC                         0x9	/* ECC error detected */
+#define T4_ERR_ECC_PSTAG                   0xA	/* ECC error detected when  */
+						/* reading PSTAG for a MW  */
+						/* Invalidate */
+#define T4_ERR_PBL_ADDR_BOUND              0xB	/* pbl addr out of bounds:  */
+						/* software error */
+#define T4_ERR_SWFLUSH			   0xC	/* SW FLUSHED */
+#define T4_ERR_CRC                         0x10 /* CRC error */
+#define T4_ERR_MARKER                      0x11 /* Marker error */
+#define T4_ERR_PDU_LEN_ERR                 0x12 /* invalid PDU length */
+#define T4_ERR_OUT_OF_RQE                  0x13 /* out of RQE */
+#define T4_ERR_DDP_VERSION                 0x14 /* wrong DDP version */
+#define T4_ERR_RDMA_VERSION                0x15 /* wrong RDMA version */
+#define T4_ERR_OPCODE                      0x16 /* invalid rdma opcode */
+#define T4_ERR_DDP_QUEUE_NUM               0x17 /* invalid ddp queue number */
+#define T4_ERR_MSN                         0x18 /* MSN error */
+#define T4_ERR_TBIT                        0x19 /* tag bit not set correctly */
+#define T4_ERR_MO                          0x1A /* MO not 0 for TERMINATE  */
+						/* or READ_REQ */
+#define T4_ERR_MSN_GAP                     0x1B
+#define T4_ERR_MSN_RANGE                   0x1C
+#define T4_ERR_IRD_OVERFLOW                0x1D
+#define T4_ERR_RQE_ADDR_BOUND              0x1E /* RQE addr out of bounds:  */
+						/* software error */
+#define T4_ERR_INTERNAL_ERR                0x1F /* internal error (opcode  */
+						/* mismatch) */
+/*
+ * CQE defs
+ */
+struct t4_cqe {
+	__be32 header;
+	__be32 len;
+	union {
+		struct {
+			__be32 stag;
+			__be32 msn;
+		} rcqe;
+		struct {
+			u32 nada1;
+			u16 nada2;
+			u16 cidx;
+		} scqe;
+		struct {
+			__be32 wrid_hi;
+			__be32 wrid_low;
+		} gen;
+		u64 drain_cookie;
+	} u;
+	__be64 reserved;
+	__be64 bits_type_ts;
+};
+
+/* macros for flit 0 of the cqe */
+
+#define S_CQE_QPID        12
+#define M_CQE_QPID        0xFFFFF
+#define G_CQE_QPID(x)     ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
+#define V_CQE_QPID(x)	  ((x)<<S_CQE_QPID)
+
+#define S_CQE_SWCQE       11
+#define M_CQE_SWCQE       0x1
+#define G_CQE_SWCQE(x)    ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
+#define V_CQE_SWCQE(x)	  ((x)<<S_CQE_SWCQE)
+
+#define S_CQE_STATUS      5
+#define M_CQE_STATUS      0x1F
+#define G_CQE_STATUS(x)   ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
+#define V_CQE_STATUS(x)   ((x)<<S_CQE_STATUS)
+
+#define S_CQE_TYPE        4
+#define M_CQE_TYPE        0x1
+#define G_CQE_TYPE(x)     ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
+#define V_CQE_TYPE(x)     ((x)<<S_CQE_TYPE)
+
+#define S_CQE_OPCODE      0
+#define M_CQE_OPCODE      0xF
+#define G_CQE_OPCODE(x)   ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
+#define V_CQE_OPCODE(x)   ((x)<<S_CQE_OPCODE)
+
+#define SW_CQE(x)         (G_CQE_SWCQE(be32_to_cpu((x)->header)))
+#define CQE_QPID(x)       (G_CQE_QPID(be32_to_cpu((x)->header)))
+#define CQE_TYPE(x)       (G_CQE_TYPE(be32_to_cpu((x)->header)))
+#define SQ_TYPE(x)	  (CQE_TYPE((x)))
+#define RQ_TYPE(x)	  (!CQE_TYPE((x)))
+#define CQE_STATUS(x)     (G_CQE_STATUS(be32_to_cpu((x)->header)))
+#define CQE_OPCODE(x)     (G_CQE_OPCODE(be32_to_cpu((x)->header)))
+
+#define CQE_SEND_OPCODE(x)(\
+	(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
+	(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
+	(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
+	(G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
+
+#define CQE_LEN(x)        (be32_to_cpu((x)->len))
+
+/* used for RQ completion processing */
+#define CQE_WRID_STAG(x)  (be32_to_cpu((x)->u.rcqe.stag))
+#define CQE_WRID_MSN(x)   (be32_to_cpu((x)->u.rcqe.msn))
+
+/* used for SQ completion processing */
+#define CQE_WRID_SQ_IDX(x)	((x)->u.scqe.cidx)
+
+/* generic accessor macros */
+#define CQE_WRID_HI(x)		((x)->u.gen.wrid_hi)
+#define CQE_WRID_LOW(x)		((x)->u.gen.wrid_low)
+#define CQE_DRAIN_COOKIE(x)	(x)->u.drain_cookie;
+
+/* macros for flit 3 of the cqe */
+#define S_CQE_GENBIT	63
+#define M_CQE_GENBIT	0x1
+#define G_CQE_GENBIT(x)	(((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
+#define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
+
+#define S_CQE_OVFBIT	62
+#define M_CQE_OVFBIT	0x1
+#define G_CQE_OVFBIT(x)	((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
+
+#define S_CQE_IQTYPE	60
+#define M_CQE_IQTYPE	0x3
+#define G_CQE_IQTYPE(x)	((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
+
+#define M_CQE_TS	0x0fffffffffffffffULL
+#define G_CQE_TS(x)	((x) & M_CQE_TS)
+
+#define CQE_OVFBIT(x)	((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_GENBIT(x)	((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
+#define CQE_TS(x)	(G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
+
+struct t4_swsqe {
+	u64			wr_id;
+	struct t4_cqe		cqe;
+	int			read_len;
+	int			opcode;
+	int			complete;
+	int			signaled;
+	u16			idx;
+};
+
+struct t4_sq {
+	union t4_wr *queue;
+	bus_addr_t dma_addr;
+	DECLARE_PCI_UNMAP_ADDR(mapping);
+	unsigned long phys_addr;
+	struct t4_swsqe *sw_sq;
+	struct t4_swsqe *oldest_read;
+	u64 udb;
+	size_t memsize;
+	u32 qid;
+	u16 in_use;
+	u16 size;
+	u16 cidx;
+	u16 pidx;
+	u16 wq_pidx;
+	u16 flags;
+};
+
+struct t4_swrqe {
+	u64 wr_id;
+};
+
+struct t4_rq {
+	union  t4_recv_wr *queue;
+	bus_addr_t dma_addr;
+	DECLARE_PCI_UNMAP_ADDR(mapping);
+	struct t4_swrqe *sw_rq;
+	u64 udb;
+	size_t memsize;
+	u32 qid;
+	u32 msn;
+	u32 rqt_hwaddr;
+	u16 rqt_size;
+	u16 in_use;
+	u16 size;
+	u16 cidx;
+	u16 pidx;
+	u16 wq_pidx;
+};
+
+struct t4_wq {
+	struct t4_sq sq;
+	struct t4_rq rq;
+	void __iomem *db;
+	void __iomem *gts;
+	struct c4iw_rdev *rdev;
+};
+
+static inline int t4_rqes_posted(struct t4_wq *wq)
+{
+	return wq->rq.in_use;
+}
+
+static inline int t4_rq_empty(struct t4_wq *wq)
+{
+	return wq->rq.in_use == 0;
+}
+
+static inline int t4_rq_full(struct t4_wq *wq)
+{
+	return wq->rq.in_use == (wq->rq.size - 1);
+}
+
+static inline u32 t4_rq_avail(struct t4_wq *wq)
+{
+	return wq->rq.size - 1 - wq->rq.in_use;
+}
+
+static inline void t4_rq_produce(struct t4_wq *wq, u8 len16)
+{
+	wq->rq.in_use++;
+	if (++wq->rq.pidx == wq->rq.size)
+		wq->rq.pidx = 0;
+	wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+	if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS)
+		wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS;
+}
+
+static inline void t4_rq_consume(struct t4_wq *wq)
+{
+	wq->rq.in_use--;
+	wq->rq.msn++;
+	if (++wq->rq.cidx == wq->rq.size)
+		wq->rq.cidx = 0;
+}
+
+static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq)
+{
+	return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
+}
+
+static inline u16 t4_rq_wq_size(struct t4_wq *wq)
+{
+		return wq->rq.size * T4_RQ_NUM_SLOTS;
+}
+
+static inline int t4_sq_empty(struct t4_wq *wq)
+{
+	return wq->sq.in_use == 0;
+}
+
+static inline int t4_sq_full(struct t4_wq *wq)
+{
+	return wq->sq.in_use == (wq->sq.size - 1);
+}
+
+static inline u32 t4_sq_avail(struct t4_wq *wq)
+{
+	return wq->sq.size - 1 - wq->sq.in_use;
+}
+
+static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
+{
+	wq->sq.in_use++;
+	if (++wq->sq.pidx == wq->sq.size)
+		wq->sq.pidx = 0;
+	wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
+	if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS)
+		wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS;
+}
+
+static inline void t4_sq_consume(struct t4_wq *wq)
+{
+	wq->sq.in_use--;
+	if (++wq->sq.cidx == wq->sq.size)
+		wq->sq.cidx = 0;
+}
+
+static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq)
+{
+	return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
+}
+
+static inline u16 t4_sq_wq_size(struct t4_wq *wq)
+{
+		return wq->sq.size * T4_SQ_NUM_SLOTS;
+}
+
+static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
+{
+	wmb();
+	writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
+}
+
+static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
+{
+	wmb();
+	writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
+}
+
+static inline int t4_wq_in_error(struct t4_wq *wq)
+{
+	return wq->rq.queue[wq->rq.size].status.qp_err;
+}
+
+static inline void t4_set_wq_in_error(struct t4_wq *wq)
+{
+	wq->rq.queue[wq->rq.size].status.qp_err = 1;
+}
+
+struct t4_cq {
+	struct t4_cqe *queue;
+	bus_addr_t dma_addr;
+	DECLARE_PCI_UNMAP_ADDR(mapping);
+	struct t4_cqe *sw_queue;
+	void __iomem *gts;
+	struct c4iw_rdev *rdev;
+	u64 ugts;
+	size_t memsize;
+	__be64 bits_type_ts;
+	u32 cqid;
+	u16 size; /* including status page */
+	u16 cidx;
+	u16 sw_pidx;
+	u16 sw_cidx;
+	u16 sw_in_use;
+	u16 cidx_inc;
+	u8 gen;
+	u8 error;
+};
+
+static inline int t4_arm_cq(struct t4_cq *cq, int se)
+{
+	u32 val;
+
+	while (cq->cidx_inc > CIDXINC_MASK) {
+		val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
+		      INGRESSQID(cq->cqid);
+		writel(val, cq->gts);
+		cq->cidx_inc -= CIDXINC_MASK;
+	}
+	val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) |
+	      INGRESSQID(cq->cqid);
+	writel(val, cq->gts);
+	cq->cidx_inc = 0;
+	return 0;
+}
+
+static inline void t4_swcq_produce(struct t4_cq *cq)
+{
+	cq->sw_in_use++;
+	if (++cq->sw_pidx == cq->size)
+		cq->sw_pidx = 0;
+}
+
+static inline void t4_swcq_consume(struct t4_cq *cq)
+{
+	cq->sw_in_use--;
+	if (++cq->sw_cidx == cq->size)
+		cq->sw_cidx = 0;
+}
+
+static inline void t4_hwcq_consume(struct t4_cq *cq)
+{
+	cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
+	if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == M_CIDXINC) {
+		u32 val;
+
+		val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
+		      INGRESSQID(cq->cqid);
+		writel(val, cq->gts);
+		cq->cidx_inc = 0;
+	}
+	if (++cq->cidx == cq->size) {
+		cq->cidx = 0;
+		cq->gen ^= 1;
+	}
+}
+
+static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
+{
+	return (CQE_GENBIT(cqe) == cq->gen);
+}
+
+static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+	int ret;
+	u16 prev_cidx;
+
+	if (cq->cidx == 0)
+		prev_cidx = cq->size - 1;
+	else
+		prev_cidx = cq->cidx - 1;
+
+	if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
+		ret = -EOVERFLOW;
+		cq->error = 1;
+		printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+	} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
+		*cqe = &cq->queue[cq->cidx];
+		ret = 0;
+	} else
+		ret = -ENODATA;
+	return ret;
+}
+
+static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
+{
+	if (cq->sw_in_use)
+		return &cq->sw_queue[cq->sw_cidx];
+	return NULL;
+}
+
+static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
+{
+	int ret = 0;
+
+	if (cq->error)
+		ret = -ENODATA;
+	else if (cq->sw_in_use)
+		*cqe = &cq->sw_queue[cq->sw_cidx];
+	else
+		ret = t4_next_hw_cqe(cq, cqe);
+	return ret;
+}
+
+static inline int t4_cq_in_error(struct t4_cq *cq)
+{
+	return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
+}
+
+static inline void t4_set_cq_in_error(struct t4_cq *cq)
+{
+	((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/t4.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/iw_cxgbe/user.h
===================================================================
--- trunk/sys/dev/cxgbe/iw_cxgbe/user.h	                        (rev 0)
+++ trunk/sys/dev/cxgbe/iw_cxgbe/user.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,72 @@
+/* $MidnightBSD$ */
+/*
+ * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $FreeBSD: stable/10/sys/dev/cxgbe/iw_cxgbe/user.h 309378 2016-12-01 23:38:52Z jhb $
+ */
+#ifndef __C4IW_USER_H__
+#define __C4IW_USER_H__
+
+#define C4IW_UVERBS_ABI_VERSION	2
+
+/*
+ * Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+struct c4iw_create_cq_resp {
+	__u64 key;
+	__u64 gts_key;
+	__u64 memsize;
+	__u32 cqid;
+	__u32 size;
+	__u32 qid_mask;
+	__u32 reserved; /* explicit padding (optional for i386) */
+};
+
+struct c4iw_create_qp_resp {
+	__u64 ma_sync_key;
+	__u64 sq_key;
+	__u64 rq_key;
+	__u64 sq_db_gts_key;
+	__u64 rq_db_gts_key;
+	__u64 sq_memsize;
+	__u64 rq_memsize;
+	__u32 sqid;
+	__u32 rqid;
+	__u32 sq_size;
+	__u32 rq_size;
+	__u32 qid_mask;
+	__u32 flags;
+};
+#endif


Property changes on: trunk/sys/dev/cxgbe/iw_cxgbe/user.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/offload.h
===================================================================
--- trunk/sys/dev/cxgbe/offload.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/offload.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2010 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/offload.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/offload.h 318797 2017-05-24 17:52:56Z np $
  *
  */
 
@@ -59,8 +60,8 @@
 
 struct stid_region {
 	TAILQ_ENTRY(stid_region) link;
-	int used;	/* # of stids used by this region */
-	int free;	/* # of contiguous stids free right after this region */
+	u_int used;	/* # of stids used by this region */
+	u_int free;	/* # of contiguous stids free right after this region */
 };
 
 /*
@@ -101,6 +102,11 @@
 	u_int nftids;
 	u_int ftid_base;
 	u_int ftids_in_use;
+
+	struct mtx etid_lock __aligned(CACHE_LINE_SIZE);
+	struct etid_entry *etid_tab;
+	u_int netids;
+	u_int etid_base;
 };
 
 struct t4_range {
@@ -116,13 +122,16 @@
 	struct t4_range pbl;
 	struct t4_range qp;
 	struct t4_range cq;
+	struct t4_range srq;
 	struct t4_range ocq;
 	struct t4_range l2t;
 };
 
-#ifdef TCP_OFFLOAD
 enum {
-	ULD_TOM = 1,
+	ULD_TOM = 0,
+	ULD_IWARP,
+	ULD_ISCSI,
+	ULD_MAX = ULD_ISCSI
 };
 
 struct adapter;
@@ -140,12 +149,16 @@
 	int ddp;
 	int indsz;
 	int ddp_thres;
+	int rx_coalesce;
+	int tx_align;
 };
 
+#ifdef TCP_OFFLOAD
 int t4_register_uld(struct uld_info *);
 int t4_unregister_uld(struct uld_info *);
 int t4_activate_uld(struct adapter *, int);
 int t4_deactivate_uld(struct adapter *, int);
+void t4_iscsi_init(struct adapter *, u_int, const u_int *);
+int uld_active(struct adapter *, int);
 #endif
-
 #endif

Modified: trunk/sys/dev/cxgbe/osdep.h
===================================================================
--- trunk/sys/dev/cxgbe/osdep.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/osdep.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2010 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/osdep.h 242015 2012-10-24 19:04:17Z gavin $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/osdep.h 308304 2016-11-04 18:45:06Z jhb $
  *
  */
 
@@ -45,6 +46,7 @@
 #define CH_ALERT(adap, fmt, ...) log(LOG_ALERT, fmt, ##__VA_ARGS__)
 #define CH_WARN_RATELIMIT(adap, fmt, ...) log(LOG_WARNING, fmt, ##__VA_ARGS__)
 
+#ifndef LINUX_TYPES_DEFINED
 typedef int8_t  s8;
 typedef int16_t s16;
 typedef int32_t s32;
@@ -64,8 +66,11 @@
 
 #if BYTE_ORDER == BIG_ENDIAN
 #define __BIG_ENDIAN_BITFIELD
+#define htobe32_const(x) (x)
 #elif BYTE_ORDER == LITTLE_ENDIAN
 #define __LITTLE_ENDIAN_BITFIELD
+#define htobe32_const(x) (((x) >> 24) | (((x) >> 8) & 0xff00) |	\
+    ((((x) & 0xffffff) << 8) & 0xff0000) | ((((x) & 0xff) << 24) & 0xff000000))
 #else
 #error "Must set BYTE_ORDER"
 #endif
@@ -76,10 +81,11 @@
 #define true TRUE
 #endif
 
+#define __force
+
 #define mdelay(x) DELAY((x) * 1000)
 #define udelay(x) DELAY(x)
 
-#define __devinit
 #define simple_strtoul strtoul
 #define DIV_ROUND_UP(x, y) howmany(x, y)
 
@@ -86,9 +92,9 @@
 #define ARRAY_SIZE(x) nitems(x)
 #define container_of(p, s, f) ((s *)(((uint8_t *)(p)) - offsetof(s, f)))
 
-#define swab16(x) bswap16(x) 
-#define swab32(x) bswap32(x) 
-#define swab64(x) bswap64(x) 
+#define swab16(x) bswap16(x)
+#define swab32(x) bswap32(x)
+#define swab64(x) bswap64(x)
 #define le16_to_cpu(x) le16toh(x)
 #define le32_to_cpu(x) le32toh(x)
 #define le64_to_cpu(x) le64toh(x)
@@ -102,10 +108,6 @@
 #define cpu_to_be32(x) htobe32(x)
 #define cpu_to_be64(x) htobe64(x)
 
-#define SPEED_10	10
-#define SPEED_100	100
-#define SPEED_1000	1000
-#define SPEED_10000	10000
 #define DUPLEX_HALF	0
 #define DUPLEX_FULL	1
 #define AUTONEG_DISABLE	0
@@ -125,7 +127,7 @@
 #define PCI_EXP_LNKSTA		PCIER_LINK_STA
 #define PCI_EXP_LNKSTA_CLS	PCIEM_LINK_STA_SPEED
 #define PCI_EXP_LNKSTA_NLW	PCIEM_LINK_STA_WIDTH
-#define PCI_EXP_DEVCTL2		0x28
+#define PCI_EXP_DEVCTL2		PCIER_DEVICE_CTL2
 
 static inline int
 ilog2(long x)
@@ -152,5 +154,6 @@
 
 	return (r);
 }
+#endif /* LINUX_TYPES_DEFINED */
 
 #endif

Modified: trunk/sys/dev/cxgbe/t4_ioctl.h
===================================================================
--- trunk/sys/dev/cxgbe/t4_ioctl.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/t4_ioctl.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/t4_ioctl.h 241573 2012-10-15 06:41:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/t4_ioctl.h 309569 2016-12-05 23:02:26Z jhb $
  *
  */
 
@@ -51,6 +52,12 @@
 	T4_GET_MEM,			/* read memory */
 	T4_GET_I2C,			/* read from i2c addressible device */
 	T4_CLEAR_STATS,			/* clear a port's MAC statistics */
+	T4_SET_OFLD_POLICY,		/* Set offload policy */
+	T4_SET_SCHED_CLASS,             /* set sched class */
+	T4_SET_SCHED_QUEUE,             /* set queue class */
+	T4_GET_TRACER,			/* get information about a tracer */
+	T4_SET_TRACER,			/* program a tracer */
+	T4_LOAD_CFG,			/* copy a config file to card's flash */
 };
 
 struct t4_reg {
@@ -60,6 +67,7 @@
 };
 
 #define T4_REGDUMP_SIZE  (160 * 1024)
+#define T5_REGDUMP_SIZE  (332 * 1024)
 struct t4_regdump {
 	uint32_t version;
 	uint32_t len; /* bytes */
@@ -99,6 +107,12 @@
 #define T4_FILTER_MPS_HIT_TYPE	0x4000	/* MPS match type */
 #define T4_FILTER_IP_FRAGMENT	0x8000	/* IP fragment */
 
+#define T4_FILTER_IC_VNIC	0x80000000	/* TP Ingress Config's F_VNIC
+						   bit.  It indicates whether
+						   T4_FILTER_VNIC bit means VNIC
+						   id (PF/VF) or outer VLAN.
+						   0 = oVLAN, 1 = VNIC */
+
 /* Filter action */
 enum {
 	FILTER_PASS = 0,	/* default */
@@ -148,7 +162,7 @@
 	 * is used to select the global mode and all filters are limited to the
 	 * set of fields allowed by the global mode.
 	 */
-	uint16_t vnic;		/* VNIC id or outer VLAN tag */
+	uint16_t vnic;		/* VNIC id (PF/VF) or outer VLAN tag */
 	uint16_t vlan;		/* VLAN tag */
 	uint16_t ethtype;	/* Ethernet type */
 	uint8_t  tos;		/* TOS/Traffic Type */
@@ -159,7 +173,8 @@
 	uint32_t frag:1;	/* fragmentation extension header */
 	uint32_t macidx:9;	/* exact match MAC index */
 	uint32_t vlan_vld:1;	/* VLAN valid */
-	uint32_t vnic_vld:1;	/* VNIC id/outer VLAN tag valid */
+	uint32_t ovlan_vld:1;	/* outer VLAN tag valid, value in "vnic" */
+	uint32_t pfvf_vld:1;	/* VNIC id (PF/VF) valid, value in "vnic" */
 };
 
 struct t4_filter_specification {
@@ -202,6 +217,76 @@
 	struct t4_filter_specification fs;
 };
 
+/* Tx Scheduling Class parameters */
+struct t4_sched_class_params {
+	int8_t   level;		/* scheduler hierarchy level */
+	int8_t   mode;		/* per-class or per-flow */
+	int8_t   rateunit;	/* bit or packet rate */
+	int8_t   ratemode;	/* %port relative or kbps absolute */
+	int8_t   channel;	/* scheduler channel [0..N] */
+	int8_t   cl;		/* scheduler class [0..N] */
+	int32_t  minrate;	/* minimum rate */
+	int32_t  maxrate;	/* maximum rate */
+	int16_t  weight;	/* percent weight */
+	int16_t  pktsize;	/* average packet size */
+};
+
+/*
+ * Support for "sched-class" command to allow a TX Scheduling Class to be
+ * programmed with various parameters.
+ */
+struct t4_sched_params {
+	int8_t   subcmd;		/* sub-command */
+	int8_t   type;			/* packet or flow */
+	union {
+		struct {		/* sub-command SCHED_CLASS_CONFIG */
+			int8_t   minmax;	/* minmax enable */
+		} config;
+		struct t4_sched_class_params params;
+		uint8_t     reserved[6 + 8 * 8];
+	} u;
+};
+
+enum {
+	SCHED_CLASS_SUBCMD_CONFIG,	/* config sub-command */
+	SCHED_CLASS_SUBCMD_PARAMS,	/* params sub-command */
+};
+
+enum {
+	SCHED_CLASS_TYPE_PACKET,
+};
+
+enum {
+	SCHED_CLASS_LEVEL_CL_RL,	/* class rate limiter */
+	SCHED_CLASS_LEVEL_CL_WRR,	/* class weighted round robin */
+	SCHED_CLASS_LEVEL_CH_RL,	/* channel rate limiter */
+};
+
+enum {
+	SCHED_CLASS_MODE_CLASS,		/* per-class scheduling */
+	SCHED_CLASS_MODE_FLOW,		/* per-flow scheduling */
+};
+
+enum {
+	SCHED_CLASS_RATEUNIT_BITS,	/* bit rate scheduling */
+	SCHED_CLASS_RATEUNIT_PKTS,	/* packet rate scheduling */
+};
+
+enum {
+	SCHED_CLASS_RATEMODE_REL,	/* percent of port bandwidth */
+	SCHED_CLASS_RATEMODE_ABS,	/* Kb/s */
+};
+
+/*
+ * Support for "sched_queue" command to allow one or more NIC TX Queues to be
+ * bound to a TX Scheduling Class.
+ */
+struct t4_sched_queue {
+	uint8_t  port;
+	int8_t   queue;	/* queue index; -1 => all queues */
+	int8_t   cl;	/* class index; -1 => unbind */
+};
+
 #define T4_SGE_CONTEXT_SIZE 24
 enum {
 	SGE_CONTEXT_EGRESS,
@@ -222,6 +307,25 @@
 	uint32_t *data;
 };
 
+#define T4_TRACE_LEN 112
+struct t4_trace_params {
+	uint32_t data[T4_TRACE_LEN / 4];
+	uint32_t mask[T4_TRACE_LEN / 4];
+	uint16_t snap_len;
+	uint16_t min_len;
+	uint8_t skip_ofst;
+	uint8_t skip_len;
+	uint8_t invert;
+	uint8_t port;
+};
+
+struct t4_tracer {
+	uint8_t idx;
+	uint8_t enabled;
+	uint8_t valid;
+	struct t4_trace_params tp;
+};
+
 #define CHELSIO_T4_GETREG	_IOWR('f', T4_GETREG, struct t4_reg)
 #define CHELSIO_T4_SETREG	_IOW('f', T4_SETREG, struct t4_reg)
 #define CHELSIO_T4_REGDUMP	_IOWR('f', T4_REGDUMP, struct t4_regdump)
@@ -236,4 +340,11 @@
 #define CHELSIO_T4_GET_MEM	_IOW('f', T4_GET_MEM, struct t4_mem_range)
 #define CHELSIO_T4_GET_I2C	_IOWR('f', T4_GET_I2C, struct t4_i2c_data)
 #define CHELSIO_T4_CLEAR_STATS	_IOW('f', T4_CLEAR_STATS, uint32_t)
+#define CHELSIO_T4_SCHED_CLASS  _IOW('f', T4_SET_SCHED_CLASS, \
+    struct t4_sched_params)
+#define CHELSIO_T4_SCHED_QUEUE  _IOW('f', T4_SET_SCHED_QUEUE, \
+    struct t4_sched_queue)
+#define CHELSIO_T4_GET_TRACER	_IOWR('f', T4_GET_TRACER, struct t4_tracer)
+#define CHELSIO_T4_SET_TRACER	_IOW('f', T4_SET_TRACER, struct t4_tracer)
+#define CHELSIO_T4_LOAD_CFG	_IOW('f', T4_LOAD_CFG, struct t4_data)
 #endif

Modified: trunk/sys/dev/cxgbe/t4_l2t.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_l2t.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/t4_l2t.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * SUCH DAMAGE.
  */
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/t4_l2t.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_l2t.c 309442 2016-12-02 21:29:52Z jhb $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -110,28 +111,34 @@
  * The write may be synchronous or asynchronous.
  */
 int
-t4_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync)
+t4_write_l2e(struct l2t_entry *e, int sync)
 {
-	struct wrqe *wr;
+	struct sge_wrq *wrq;
+	struct adapter *sc;
+	struct wrq_cookie cookie;
 	struct cpl_l2t_write_req *req;
-	int idx = e->idx + sc->vres.l2t.start;
+	int idx;
 
 	mtx_assert(&e->lock, MA_OWNED);
+	MPASS(e->wrq != NULL);
 
-	wr = alloc_wrqe(sizeof(*req), &sc->sge.mgmtq);
-	if (wr == NULL)
+	wrq = e->wrq;
+	sc = wrq->adapter;
+
+	req = start_wrq_wr(wrq, howmany(sizeof(*req), 16), &cookie);
+	if (req == NULL)
 		return (ENOMEM);
-	req = wrtod(wr);
 
+	idx = e->idx + sc->vres.l2t.start;
 	INIT_TP_WR(req, 0);
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
-	    V_SYNC_WR(sync) | V_TID_QID(sc->sge.fwq.abs_id)));
+	    V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
 	req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!sync));
 	req->l2t_idx = htons(idx);
 	req->vlan = htons(e->vlan);
 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
 
-	t4_wrq_tx(sc, wr);
+	commit_wrq_wr(wrq, req, &cookie);
 
 	if (sync && e->state != L2T_STATE_SWITCHING)
 		e->state = L2T_STATE_SYNC_WRITE;
@@ -173,9 +180,11 @@
 
 	e->vlan = vlan;
 	e->lport = port;
+	e->wrq = &sc->sge.mgmtq;
+	e->iqid = sc->sge.fwq.abs_id;
 	memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
 	mtx_lock(&e->lock);
-	rc = t4_write_l2e(sc, e, 0);
+	rc = t4_write_l2e(e, 0);
 	mtx_unlock(&e->lock);
 	return (rc);
 }
@@ -211,7 +220,6 @@
 	}
 
 	sc->l2t = d;
-	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
 
 	return (0);
 }
@@ -307,7 +315,6 @@
 		}
 
 		/*
-		 * XXX: e->ifp may not be around.
 		 * XXX: IPv6 addresses may not align properly in the output.
 		 */
 		sbuf_printf(sb, "\n%4u %-15s %02x:%02x:%02x:%02x:%02x:%02x %4d"
@@ -316,7 +323,7 @@
 			   e->dmac[3], e->dmac[4], e->dmac[5],
 			   e->vlan & 0xfff, vlan_prio(e), e->lport,
 			   l2e_state(e), atomic_load_acq_int(&e->refcnt),
-			   e->ifp->if_xname);
+			   e->ifp ? e->ifp->if_xname : "-");
 skip:
 		mtx_unlock(&e->lock);
 	}

Modified: trunk/sys/dev/cxgbe/t4_l2t.h
===================================================================
--- trunk/sys/dev/cxgbe/t4_l2t.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/t4_l2t.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/t4_l2t.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/t4_l2t.h 309442 2016-12-02 21:29:52Z jhb $
  *
  */
 
@@ -61,6 +62,8 @@
 	uint16_t state;			/* entry state */
 	uint16_t idx;			/* entry index */
 	uint32_t addr[4];		/* next hop IP or IPv6 address */
+	uint32_t iqid;			/* iqid for reply to write_l2e */
+	struct sge_wrq *wrq;		/* queue to use for write_l2e */
 	struct ifnet *ifp;		/* outgoing interface */
 	uint16_t smt_idx;		/* SMT index */
 	uint16_t vlan;			/* VLAN TCI (id: 0-11, prio: 13-15) */
@@ -90,7 +93,7 @@
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *);
 int t4_l2t_set_switching(struct adapter *, struct l2t_entry *, uint16_t,
     uint8_t, uint8_t *);
-int t4_write_l2e(struct adapter *, struct l2t_entry *, int);
+int t4_write_l2e(struct l2t_entry *, int);
 int do_l2t_write_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
 
 static inline void

Modified: trunk/sys/dev/cxgbe/t4_main.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_main.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/t4_main.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,8 +27,9 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/t4_main.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_main.c 331647 2018-03-27 20:49:47Z jhb $");
 
+#include "opt_ddb.h"
 #include "opt_inet.h"
 #include "opt_inet6.h"
 
@@ -36,6 +38,8 @@
 #include <sys/priv.h>
 #include <sys/kernel.h>
 #include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/counter.h>
 #include <sys/module.h>
 #include <sys/malloc.h>
 #include <sys/queue.h>
@@ -55,6 +59,17 @@
 #include <net/if_types.h>
 #include <net/if_dl.h>
 #include <net/if_vlan_var.h>
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+#if defined(__i386__) || defined(__amd64__)
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#endif
+#ifdef DDB
+#include <ddb/ddb.h>
+#include <ddb/db_lex.h>
+#endif
 
 #include "common/common.h"
 #include "common/t4_msg.h"
@@ -62,6 +77,7 @@
 #include "common/t4_regs_values.h"
 #include "t4_ioctl.h"
 #include "t4_l2t.h"
+#include "t4_mp_ring.h"
 
 /* T4 bus driver interface */
 static int t4_probe(device_t);
@@ -85,7 +101,7 @@
 static int cxgbe_probe(device_t);
 static int cxgbe_attach(device_t);
 static int cxgbe_detach(device_t);
-static device_method_t cxgbe_methods[] = {
+device_method_t cxgbe_methods[] = {
 	DEVMETHOD(device_probe,		cxgbe_probe),
 	DEVMETHOD(device_attach,	cxgbe_attach),
 	DEVMETHOD(device_detach,	cxgbe_detach),
@@ -97,19 +113,90 @@
 	sizeof(struct port_info)
 };
 
+/* T4 VI (vcxgbe) interface */
+static int vcxgbe_probe(device_t);
+static int vcxgbe_attach(device_t);
+static int vcxgbe_detach(device_t);
+static device_method_t vcxgbe_methods[] = {
+	DEVMETHOD(device_probe,		vcxgbe_probe),
+	DEVMETHOD(device_attach,	vcxgbe_attach),
+	DEVMETHOD(device_detach,	vcxgbe_detach),
+	{ 0, 0 }
+};
+static driver_t vcxgbe_driver = {
+	"vcxgbe",
+	vcxgbe_methods,
+	sizeof(struct vi_info)
+};
+
 static d_ioctl_t t4_ioctl;
-static d_open_t t4_open;
-static d_close_t t4_close;
 
 static struct cdevsw t4_cdevsw = {
        .d_version = D_VERSION,
-       .d_flags = 0,
-       .d_open = t4_open,
-       .d_close = t4_close,
        .d_ioctl = t4_ioctl,
        .d_name = "t4nex",
 };
 
+/* T5 bus driver interface */
+static int t5_probe(device_t);
+static device_method_t t5_methods[] = {
+	DEVMETHOD(device_probe,		t5_probe),
+	DEVMETHOD(device_attach,	t4_attach),
+	DEVMETHOD(device_detach,	t4_detach),
+
+	DEVMETHOD_END
+};
+static driver_t t5_driver = {
+	"t5nex",
+	t5_methods,
+	sizeof(struct adapter)
+};
+
+
+/* T5 port (cxl) interface */
+static driver_t cxl_driver = {
+	"cxl",
+	cxgbe_methods,
+	sizeof(struct port_info)
+};
+
+/* T5 VI (vcxl) interface */
+static driver_t vcxl_driver = {
+	"vcxl",
+	vcxgbe_methods,
+	sizeof(struct vi_info)
+};
+
+/* T6 bus driver interface */
+static int t6_probe(device_t);
+static device_method_t t6_methods[] = {
+	DEVMETHOD(device_probe,		t6_probe),
+	DEVMETHOD(device_attach,	t4_attach),
+	DEVMETHOD(device_detach,	t4_detach),
+
+	DEVMETHOD_END
+};
+static driver_t t6_driver = {
+	"t6nex",
+	t6_methods,
+	sizeof(struct adapter)
+};
+
+
+/* T6 port (cc) interface */
+static driver_t cc_driver = {
+	"cc",
+	cxgbe_methods,
+	sizeof(struct port_info)
+};
+
+/* T6 VI (vcc) interface */
+static driver_t vcc_driver = {
+	"vcc",
+	vcxgbe_methods,
+	sizeof(struct vi_info)
+};
+
 /* ifnet + media interface */
 static void cxgbe_init(void *);
 static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
@@ -118,21 +205,28 @@
 static int cxgbe_media_change(struct ifnet *);
 static void cxgbe_media_status(struct ifnet *, struct ifmediareq *);
 
-MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4 Ethernet driver and services");
+MALLOC_DEFINE(M_CXGBE, "cxgbe", "Chelsio T4/T5 Ethernet driver and services");
 
 /*
  * Correct lock order when you need to acquire multiple locks is t4_list_lock,
  * then ADAPTER_LOCK, then t4_uld_list_lock.
  */
-static struct mtx t4_list_lock;
-static SLIST_HEAD(, adapter) t4_list;
+static struct sx t4_list_lock;
+SLIST_HEAD(, adapter) t4_list;
 #ifdef TCP_OFFLOAD
-static struct mtx t4_uld_list_lock;
-static SLIST_HEAD(, uld_info) t4_uld_list;
+static struct sx t4_uld_list_lock;
+SLIST_HEAD(, uld_info) t4_uld_list;
 #endif
 
 /*
  * Tunables.  See tweak_tunables() too.
+ *
+ * Each tunable is set to a default value here if it's known at compile-time.
+ * Otherwise it is set to -n as an indication to tweak_tunables() that it should
+ * provide a reasonable default (upto n) when the driver is loaded.
+ *
+ * Tunables applicable to both T4 and T5 are under hw.cxgbe.  Those specific to
+ * T5 are under hw.cxl.
  */
 
 /*
@@ -139,80 +233,140 @@
  * Number of queues for tx and rx, 10G and 1G, NIC and offload.
  */
 #define NTXQ_10G 16
-static int t4_ntxq10g = -1;
+int t4_ntxq10g = -NTXQ_10G;
 TUNABLE_INT("hw.cxgbe.ntxq10g", &t4_ntxq10g);
 
 #define NRXQ_10G 8
-static int t4_nrxq10g = -1;
+int t4_nrxq10g = -NRXQ_10G;
 TUNABLE_INT("hw.cxgbe.nrxq10g", &t4_nrxq10g);
 
 #define NTXQ_1G 4
-static int t4_ntxq1g = -1;
+int t4_ntxq1g = -NTXQ_1G;
 TUNABLE_INT("hw.cxgbe.ntxq1g", &t4_ntxq1g);
 
 #define NRXQ_1G 2
-static int t4_nrxq1g = -1;
+int t4_nrxq1g = -NRXQ_1G;
 TUNABLE_INT("hw.cxgbe.nrxq1g", &t4_nrxq1g);
 
+#define NTXQ_VI 1
+static int t4_ntxq_vi = -NTXQ_VI;
+TUNABLE_INT("hw.cxgbe.ntxq_vi", &t4_ntxq_vi);
+
+#define NRXQ_VI 1
+static int t4_nrxq_vi = -NRXQ_VI;
+TUNABLE_INT("hw.cxgbe.nrxq_vi", &t4_nrxq_vi);
+
+static int t4_rsrv_noflowq = 0;
+TUNABLE_INT("hw.cxgbe.rsrv_noflowq", &t4_rsrv_noflowq);
+
 #ifdef TCP_OFFLOAD
 #define NOFLDTXQ_10G 8
-static int t4_nofldtxq10g = -1;
+static int t4_nofldtxq10g = -NOFLDTXQ_10G;
 TUNABLE_INT("hw.cxgbe.nofldtxq10g", &t4_nofldtxq10g);
 
 #define NOFLDRXQ_10G 2
-static int t4_nofldrxq10g = -1;
+static int t4_nofldrxq10g = -NOFLDRXQ_10G;
 TUNABLE_INT("hw.cxgbe.nofldrxq10g", &t4_nofldrxq10g);
 
 #define NOFLDTXQ_1G 2
-static int t4_nofldtxq1g = -1;
+static int t4_nofldtxq1g = -NOFLDTXQ_1G;
 TUNABLE_INT("hw.cxgbe.nofldtxq1g", &t4_nofldtxq1g);
 
 #define NOFLDRXQ_1G 1
-static int t4_nofldrxq1g = -1;
+static int t4_nofldrxq1g = -NOFLDRXQ_1G;
 TUNABLE_INT("hw.cxgbe.nofldrxq1g", &t4_nofldrxq1g);
+
+#define NOFLDTXQ_VI 1
+static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
+TUNABLE_INT("hw.cxgbe.nofldtxq_vi", &t4_nofldtxq_vi);
+
+#define NOFLDRXQ_VI 1
+static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
+TUNABLE_INT("hw.cxgbe.nofldrxq_vi", &t4_nofldrxq_vi);
 #endif
 
+#ifdef DEV_NETMAP
+#define NNMTXQ_VI 2
+static int t4_nnmtxq_vi = -NNMTXQ_VI;
+TUNABLE_INT("hw.cxgbe.nnmtxq_vi", &t4_nnmtxq_vi);
+
+#define NNMRXQ_VI 2
+static int t4_nnmrxq_vi = -NNMRXQ_VI;
+TUNABLE_INT("hw.cxgbe.nnmrxq_vi", &t4_nnmrxq_vi);
+#endif
+
 /*
  * Holdoff parameters for 10G and 1G ports.
  */
 #define TMR_IDX_10G 1
-static int t4_tmr_idx_10g = TMR_IDX_10G;
+int t4_tmr_idx_10g = TMR_IDX_10G;
 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_10G", &t4_tmr_idx_10g);
 
 #define PKTC_IDX_10G (-1)
-static int t4_pktc_idx_10g = PKTC_IDX_10G;
+int t4_pktc_idx_10g = PKTC_IDX_10G;
 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_10G", &t4_pktc_idx_10g);
 
 #define TMR_IDX_1G 1
-static int t4_tmr_idx_1g = TMR_IDX_1G;
+int t4_tmr_idx_1g = TMR_IDX_1G;
 TUNABLE_INT("hw.cxgbe.holdoff_timer_idx_1G", &t4_tmr_idx_1g);
 
 #define PKTC_IDX_1G (-1)
-static int t4_pktc_idx_1g = PKTC_IDX_1G;
+int t4_pktc_idx_1g = PKTC_IDX_1G;
 TUNABLE_INT("hw.cxgbe.holdoff_pktc_idx_1G", &t4_pktc_idx_1g);
 
 /*
  * Size (# of entries) of each tx and rx queue.
  */
-static unsigned int t4_qsize_txq = TX_EQ_QSIZE;
+unsigned int t4_qsize_txq = TX_EQ_QSIZE;
 TUNABLE_INT("hw.cxgbe.qsize_txq", &t4_qsize_txq);
 
-static unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
+unsigned int t4_qsize_rxq = RX_IQ_QSIZE;
 TUNABLE_INT("hw.cxgbe.qsize_rxq", &t4_qsize_rxq);
 
 /*
  * Interrupt types allowed (bits 0, 1, 2 = INTx, MSI, MSI-X respectively).
  */
-static int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
+int t4_intr_types = INTR_MSIX | INTR_MSI | INTR_INTX;
 TUNABLE_INT("hw.cxgbe.interrupt_types", &t4_intr_types);
 
 /*
  * Configuration file.
  */
-static char t4_cfg_file[32] = "default";
+#define DEFAULT_CF	"default"
+#define FLASH_CF	"flash"
+#define UWIRE_CF	"uwire"
+#define FPGA_CF		"fpga"
+static char t4_cfg_file[32] = DEFAULT_CF;
 TUNABLE_STR("hw.cxgbe.config_file", t4_cfg_file, sizeof(t4_cfg_file));
 
 /*
+ * PAUSE settings (bit 0, 1 = rx_pause, tx_pause respectively).
+ * rx_pause = 1 to heed incoming PAUSE frames, 0 to ignore them.
+ * tx_pause = 1 to emit PAUSE frames when the rx FIFO reaches its high water
+ *            mark or when signalled to do so, 0 to never emit PAUSE.
+ */
+static int t4_pause_settings = PAUSE_TX | PAUSE_RX;
+TUNABLE_INT("hw.cxgbe.pause_settings", &t4_pause_settings);
+
+/*
+ * Forward Error Correction settings (bit 0, 1, 2 = FEC_RS, FEC_BASER_RS,
+ * FEC_RESERVED respectively).
+ * -1 to run with the firmware default.
+ *  0 to disable FEC.
+ */
+static int t4_fec = -1;
+TUNABLE_INT("hw.cxgbe.fec", &t4_fec);
+
+/*
+ * Link autonegotiation.
+ * -1 to run with the firmware default.
+ *  0 to disable.
+ *  1 to enable.
+ */
+static int t4_autoneg = -1;
+TUNABLE_INT("hw.cxgbe.autoneg", &t4_autoneg);
+
+/*
  * Firmware auto-install by driver during attach (0, 1, 2 = prohibited, allowed,
  * encouraged respectively).
  */
@@ -223,9 +377,16 @@
  * ASIC features that will be used.  Disable the ones you don't want so that the
  * chip resources aren't wasted on features that will not be used.
  */
+static int t4_nbmcaps_allowed = 0;
+TUNABLE_INT("hw.cxgbe.nbmcaps_allowed", &t4_nbmcaps_allowed);
+
 static int t4_linkcaps_allowed = 0;	/* No DCBX, PPP, etc. by default */
 TUNABLE_INT("hw.cxgbe.linkcaps_allowed", &t4_linkcaps_allowed);
 
+static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
+    FW_CAPS_CONFIG_SWITCH_EGRESS;
+TUNABLE_INT("hw.cxgbe.switchcaps_allowed", &t4_switchcaps_allowed);
+
 static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC;
 TUNABLE_INT("hw.cxgbe.niccaps_allowed", &t4_niccaps_allowed);
 
@@ -232,29 +393,56 @@
 static int t4_toecaps_allowed = -1;
 TUNABLE_INT("hw.cxgbe.toecaps_allowed", &t4_toecaps_allowed);
 
-static int t4_rdmacaps_allowed = 0;
+static int t4_rdmacaps_allowed = -1;
 TUNABLE_INT("hw.cxgbe.rdmacaps_allowed", &t4_rdmacaps_allowed);
 
-static int t4_iscsicaps_allowed = 0;
+static int t4_cryptocaps_allowed = 0;
+TUNABLE_INT("hw.cxgbe.cryptocaps_allowed", &t4_cryptocaps_allowed);
+
+static int t4_iscsicaps_allowed = -1;
 TUNABLE_INT("hw.cxgbe.iscsicaps_allowed", &t4_iscsicaps_allowed);
 
 static int t4_fcoecaps_allowed = 0;
 TUNABLE_INT("hw.cxgbe.fcoecaps_allowed", &t4_fcoecaps_allowed);
 
+static int t5_write_combine = 0;
+TUNABLE_INT("hw.cxl.write_combine", &t5_write_combine);
+
+static int t4_num_vis = 1;
+TUNABLE_INT("hw.cxgbe.num_vis", &t4_num_vis);
+
+/* Functions used by extra VIs to obtain unique MAC addresses for each VI. */
+static int vi_mac_funcs[] = {
+	FW_VI_FUNC_OFLD,
+	FW_VI_FUNC_IWARP,
+	FW_VI_FUNC_OPENISCSI,
+	FW_VI_FUNC_OPENFCOE,
+	FW_VI_FUNC_FOISCSI,
+	FW_VI_FUNC_FOFCOE,
+};
+
 struct intrs_and_queues {
-	int intr_type;		/* INTx, MSI, or MSI-X */
-	int nirq;		/* Number of vectors */
-	int intr_flags;
-	int ntxq10g;		/* # of NIC txq's for each 10G port */
-	int nrxq10g;		/* # of NIC rxq's for each 10G port */
-	int ntxq1g;		/* # of NIC txq's for each 1G port */
-	int nrxq1g;		/* # of NIC rxq's for each 1G port */
-#ifdef TCP_OFFLOAD
-	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
-	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
-	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
-	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
-#endif
+	uint16_t intr_type;	/* INTx, MSI, or MSI-X */
+	uint16_t nirq;		/* Total # of vectors */
+	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
+	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
+	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
+	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
+	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
+	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
+	uint16_t rsrv_noflowq;	/* Flag whether to reserve queue 0 */
+	uint16_t nofldtxq10g;	/* # of TOE txq's for each 10G port */
+	uint16_t nofldrxq10g;	/* # of TOE rxq's for each 10G port */
+	uint16_t nofldtxq1g;	/* # of TOE txq's for each 1G port */
+	uint16_t nofldrxq1g;	/* # of TOE rxq's for each 1G port */
+
+	/* The vcxgbe/vcxl interfaces use these and not the ones above. */
+	uint16_t ntxq_vi;	/* # of NIC txq's */
+	uint16_t nrxq_vi;	/* # of NIC rxq's */
+	uint16_t nofldtxq_vi;	/* # of TOE txq's */
+	uint16_t nofldrxq_vi;	/* # of TOE rxq's */
+	uint16_t nnmtxq_vi;	/* # of netmap txq's */
+	uint16_t nnmrxq_vi;	/* # of netmap rxq's */
 };
 
 struct filter_entry {
@@ -267,66 +455,63 @@
         struct t4_filter_specification fs;
 };
 
-enum {
-	XGMAC_MTU	= (1 << 0),
-	XGMAC_PROMISC	= (1 << 1),
-	XGMAC_ALLMULTI	= (1 << 2),
-	XGMAC_VLANEX	= (1 << 3),
-	XGMAC_UCADDR	= (1 << 4),
-	XGMAC_MCADDRS	= (1 << 5),
-
-	XGMAC_ALL	= 0xffff
-};
-
-static int map_bars(struct adapter *);
 static void setup_memwin(struct adapter *);
-static int cfg_itype_and_nqueues(struct adapter *, int, int,
+static void position_memwin(struct adapter *, int, uint32_t);
+static int rw_via_memwin(struct adapter *, int, uint32_t, uint32_t *, int, int);
+static inline int read_via_memwin(struct adapter *, int, uint32_t, uint32_t *,
+    int);
+static inline int write_via_memwin(struct adapter *, int, uint32_t,
+    const uint32_t *, int);
+static int validate_mem_range(struct adapter *, uint32_t, int);
+static int fwmtype_to_hwmtype(int);
+static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
+    uint32_t *);
+static int fixup_devlog_params(struct adapter *);
+static int cfg_itype_and_nqueues(struct adapter *, int, int, int,
     struct intrs_and_queues *);
 static int prep_firmware(struct adapter *);
-static int upload_config_file(struct adapter *, const struct firmware *,
-    uint32_t *, uint32_t *);
-static int partition_resources(struct adapter *, const struct firmware *);
+static int partition_resources(struct adapter *, const struct firmware *,
+    const char *);
 static int get_params__pre_init(struct adapter *);
 static int get_params__post_init(struct adapter *);
 static int set_params__post_init(struct adapter *);
 static void t4_set_desc(struct adapter *);
-static void build_medialist(struct port_info *);
-static int update_mac_settings(struct port_info *, int);
-static int cxgbe_init_synchronized(struct port_info *);
-static int cxgbe_uninit_synchronized(struct port_info *);
-static int setup_intr_handlers(struct adapter *);
-static int adapter_full_init(struct adapter *);
-static int adapter_full_uninit(struct adapter *);
-static int port_full_init(struct port_info *);
-static int port_full_uninit(struct port_info *);
-static void quiesce_eq(struct adapter *, struct sge_eq *);
+static void build_medialist(struct port_info *, struct ifmedia *);
+static int cxgbe_init_synchronized(struct vi_info *);
+static int cxgbe_uninit_synchronized(struct vi_info *);
+static void quiesce_txq(struct adapter *, struct sge_txq *);
+static void quiesce_wrq(struct adapter *, struct sge_wrq *);
 static void quiesce_iq(struct adapter *, struct sge_iq *);
 static void quiesce_fl(struct adapter *, struct sge_fl *);
 static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
     driver_intr_t *, void *, char *);
 static int t4_free_irq(struct adapter *, struct irq *);
-static void reg_block_dump(struct adapter *, uint8_t *, unsigned int,
-    unsigned int);
-static void t4_get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
+static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
+static void vi_refresh_stats(struct adapter *, struct vi_info *);
+static void cxgbe_refresh_stats(struct adapter *, struct port_info *);
 static void cxgbe_tick(void *);
 static void cxgbe_vlan_config(void *, struct ifnet *, uint16_t);
-static int cpl_not_handled(struct sge_iq *, const struct rss_header *,
-    struct mbuf *);
-static int an_not_handled(struct sge_iq *, const struct rsp_ctrl *);
-static int fw_msg_not_handled(struct adapter *, const __be64 *);
-static int t4_sysctls(struct adapter *);
-static int cxgbe_sysctls(struct port_info *);
+static void cxgbe_sysctls(struct port_info *);
 static int sysctl_int_array(SYSCTL_HANDLER_ARGS);
 static int sysctl_bitfield(SYSCTL_HANDLER_ARGS);
+static int sysctl_btphy(SYSCTL_HANDLER_ARGS);
+static int sysctl_noflowq(SYSCTL_HANDLER_ARGS);
 static int sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS);
 static int sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS);
 static int sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS);
 static int sysctl_qsize_txq(SYSCTL_HANDLER_ARGS);
+static int sysctl_pause_settings(SYSCTL_HANDLER_ARGS);
+static int sysctl_fec(SYSCTL_HANDLER_ARGS);
+static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
 static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
+static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
 #ifdef SBUF_DRAIN
 static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
 static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
 static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
 static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
 static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
 static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
@@ -334,7 +519,10 @@
 static int sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS);
 static int sysctl_hw_sched(SYSCTL_HANDLER_ARGS);
 static int sysctl_lb_stats(SYSCTL_HANDLER_ARGS);
+static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
 static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
 static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
 static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
 static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
@@ -341,12 +529,23 @@
 static int sysctl_tcp_stats(SYSCTL_HANDLER_ARGS);
 static int sysctl_tids(SYSCTL_HANDLER_ARGS);
 static int sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS);
+static int sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS);
+static int sysctl_tp_la(SYSCTL_HANDLER_ARGS);
 static int sysctl_tx_rate(SYSCTL_HANDLER_ARGS);
+static int sysctl_ulprx_la(SYSCTL_HANDLER_ARGS);
+static int sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS);
+static int sysctl_tc_params(SYSCTL_HANDLER_ARGS);
 #endif
-static inline void txq_start(struct ifnet *, struct sge_txq *);
-static uint32_t fconf_to_mode(uint32_t);
+#ifdef TCP_OFFLOAD
+static int sysctl_tp_tick(SYSCTL_HANDLER_ARGS);
+static int sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS);
+static int sysctl_tp_timer(SYSCTL_HANDLER_ARGS);
+#endif
+static uint32_t fconf_iconf_to_mode(uint32_t, uint32_t);
 static uint32_t mode_to_fconf(uint32_t);
-static uint32_t fspec_to_fconf(struct t4_filter_specification *);
+static uint32_t mode_to_iconf(uint32_t);
+static int check_fspec_against_fconf_iconf(struct adapter *,
+    struct t4_filter_specification *);
 static int get_filter_mode(struct adapter *, uint32_t *);
 static int set_filter_mode(struct adapter *, uint32_t);
 static inline uint64_t get_filter_hits(struct adapter *, uint32_t);
@@ -356,16 +555,19 @@
 static void clear_filter(struct filter_entry *);
 static int set_filter_wr(struct adapter *, int);
 static int del_filter_wr(struct adapter *, int);
+static int set_tcb_rpl(struct sge_iq *, const struct rss_header *,
+    struct mbuf *);
 static int get_sge_context(struct adapter *, struct t4_sge_context *);
 static int load_fw(struct adapter *, struct t4_data *);
-static int read_card_mem(struct adapter *, struct t4_mem_range *);
+static int load_cfg(struct adapter *, struct t4_data *);
+static int read_card_mem(struct adapter *, int, struct t4_mem_range *);
 static int read_i2c(struct adapter *, struct t4_i2c_data *);
 #ifdef TCP_OFFLOAD
-static int toe_capability(struct port_info *, int);
+static int toe_capability(struct vi_info *, int);
 #endif
-static int t4_mod_event(module_t, int, void *);
+static int mod_event(module_t, int, void *);
 
-struct t4_pciids {
+struct {
 	uint16_t device;
 	char *desc;
 } t4_pciids[] = {
@@ -382,6 +584,53 @@
 	{0x4409, "Chelsio T420-BT"},
 	{0x440a, "Chelsio T404-BT"},
 	{0x440e, "Chelsio T440-LP-CR"},
+}, t5_pciids[] = {
+	{0xb000, "Chelsio Terminator 5 FPGA"},
+	{0x5400, "Chelsio T580-dbg"},
+	{0x5401,  "Chelsio T520-CR"},		/* 2 x 10G */
+	{0x5402,  "Chelsio T522-CR"},		/* 2 x 10G, 2 X 1G */
+	{0x5403,  "Chelsio T540-CR"},		/* 4 x 10G */
+	{0x5407,  "Chelsio T520-SO"},		/* 2 x 10G, nomem */
+	{0x5409,  "Chelsio T520-BT"},		/* 2 x 10GBaseT */
+	{0x540a,  "Chelsio T504-BT"},		/* 4 x 1G */
+	{0x540d,  "Chelsio T580-CR"},		/* 2 x 40G */
+	{0x540e,  "Chelsio T540-LP-CR"},	/* 4 x 10G */
+	{0x5410,  "Chelsio T580-LP-CR"},	/* 2 x 40G */
+	{0x5411,  "Chelsio T520-LL-CR"},	/* 2 x 10G */
+	{0x5412,  "Chelsio T560-CR"},		/* 1 x 40G, 2 x 10G */
+	{0x5414,  "Chelsio T580-LP-SO-CR"},	/* 2 x 40G, nomem */
+	{0x5415,  "Chelsio T502-BT"},		/* 2 x 1G */
+#ifdef notyet
+	{0x5404,  "Chelsio T520-BCH"},
+	{0x5405,  "Chelsio T540-BCH"},
+	{0x5406,  "Chelsio T540-CH"},
+	{0x5408,  "Chelsio T520-CX"},
+	{0x540b,  "Chelsio B520-SR"},
+	{0x540c,  "Chelsio B504-BT"},
+	{0x540f,  "Chelsio Amsterdam"},
+	{0x5413,  "Chelsio T580-CHR"},
+#endif
+}, t6_pciids[] = {
+	{0xc006, "Chelsio Terminator 6 FPGA"},	/* T6 PE10K6 FPGA (PF0) */
+	{0x6400, "Chelsio T6-DBG-25"},		/* 2 x 10/25G, debug */
+	{0x6401, "Chelsio T6225-CR"},		/* 2 x 10/25G */
+	{0x6402, "Chelsio T6225-SO-CR"},	/* 2 x 10/25G, nomem */
+	{0x6403, "Chelsio T6425-CR"},		/* 4 x 10/25G */
+	{0x6404, "Chelsio T6425-SO-CR"},	/* 4 x 10/25G, nomem */
+	{0x6405, "Chelsio T6225-OCP-SO"},	/* 2 x 10/25G, nomem */
+	{0x6406, "Chelsio T62100-OCP-SO"},	/* 2 x 40/50/100G, nomem */
+	{0x6407, "Chelsio T62100-LP-CR"},	/* 2 x 40/50/100G */
+	{0x6408, "Chelsio T62100-SO-CR"},	/* 2 x 40/50/100G, nomem */
+	{0x6409, "Chelsio T6210-BT"},		/* 2 x 10GBASE-T */
+	{0x640d, "Chelsio T62100-CR"},		/* 2 x 40/50/100G */
+	{0x6410, "Chelsio T6-DBG-100"},		/* 2 x 40/50/100G, debug */
+	{0x6411, "Chelsio T6225-LL-CR"},	/* 2 x 10/25G */
+	{0x6414, "Chelsio T61100-OCP-SO"},	/* 1 x 40/50/100G, nomem */
+	{0x6415, "Chelsio T6201-BT"},		/* 2 x 1000BASE-T */
+
+	/* Custom */
+	{0x6480, "Chelsio T6225 80"},
+	{0x6481, "Chelsio T62100 81"},
 };
 
 #ifdef TCP_OFFLOAD
@@ -392,11 +641,8 @@
 CTASSERT(offsetof(struct sge_ofld_rxq, iq) == offsetof(struct sge_rxq, iq));
 CTASSERT(offsetof(struct sge_ofld_rxq, fl) == offsetof(struct sge_rxq, fl));
 #endif
+CTASSERT(sizeof(struct cluster_metadata) <= CL_METADATA_SIZE);
 
-/* No easy way to include t4_msg.h before adapter.h so we check this way */
-CTASSERT(nitems(((struct adapter *)0)->cpl_handler) == NUM_CPL_CMDS);
-CTASSERT(nitems(((struct adapter *)0)->fw_msg_handler) == NUM_FW6_TYPES);
-
 static int
 t4_probe(device_t dev)
 {
@@ -423,19 +669,139 @@
 }
 
 static int
+t5_probe(device_t dev)
+{
+	int i;
+	uint16_t v = pci_get_vendor(dev);
+	uint16_t d = pci_get_device(dev);
+	uint8_t f = pci_get_function(dev);
+
+	if (v != PCI_VENDOR_ID_CHELSIO)
+		return (ENXIO);
+
+	/* Attach only to PF0 of the FPGA */
+	if (d == 0xb000 && f != 0)
+		return (ENXIO);
+
+	for (i = 0; i < nitems(t5_pciids); i++) {
+		if (d == t5_pciids[i].device) {
+			device_set_desc(dev, t5_pciids[i].desc);
+			return (BUS_PROBE_DEFAULT);
+		}
+	}
+
+	return (ENXIO);
+}
+
+static int
+t6_probe(device_t dev)
+{
+	int i;
+	uint16_t v = pci_get_vendor(dev);
+	uint16_t d = pci_get_device(dev);
+
+	if (v != PCI_VENDOR_ID_CHELSIO)
+		return (ENXIO);
+
+	for (i = 0; i < nitems(t6_pciids); i++) {
+		if (d == t6_pciids[i].device) {
+			device_set_desc(dev, t6_pciids[i].desc);
+			return (BUS_PROBE_DEFAULT);
+		}
+	}
+
+	return (ENXIO);
+}
+
+static void
+t5_attribute_workaround(device_t dev)
+{
+	device_t root_port;
+	uint32_t v;
+
+	/*
+	 * The T5 chips do not properly echo the No Snoop and Relaxed
+	 * Ordering attributes when replying to a TLP from a Root
+	 * Port.  As a workaround, find the parent Root Port and
+	 * disable No Snoop and Relaxed Ordering.  Note that this
+	 * affects all devices under this root port.
+	 */
+	root_port = pci_find_pcie_root_port(dev);
+	if (root_port == NULL) {
+		device_printf(dev, "Unable to find parent root port\n");
+		return;
+	}
+
+	v = pcie_adjust_config(root_port, PCIER_DEVICE_CTL,
+	    PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE, 0, 2);
+	if ((v & (PCIEM_CTL_RELAXED_ORD_ENABLE | PCIEM_CTL_NOSNOOP_ENABLE)) !=
+	    0)
+		device_printf(dev, "Disabled No Snoop/Relaxed Ordering on %s\n",
+		    device_get_nameunit(root_port));
+}
+
+static const struct devnames devnames[] = {
+	{
+		.nexus_name = "t4nex",
+		.ifnet_name = "cxgbe",
+		.vi_ifnet_name = "vcxgbe",
+		.pf03_drv_name = "t4iov",
+		.vf_nexus_name = "t4vf",
+		.vf_ifnet_name = "cxgbev"
+	}, {
+		.nexus_name = "t5nex",
+		.ifnet_name = "cxl",
+		.vi_ifnet_name = "vcxl",
+		.pf03_drv_name = "t5iov",
+		.vf_nexus_name = "t5vf",
+		.vf_ifnet_name = "cxlv"
+	}, {
+		.nexus_name = "t6nex",
+		.ifnet_name = "cc",
+		.vi_ifnet_name = "vcc",
+		.pf03_drv_name = "t6iov",
+		.vf_nexus_name = "t6vf",
+		.vf_ifnet_name = "ccv"
+	}
+};
+
+void
+t4_init_devnames(struct adapter *sc)
+{
+	int id;
+
+	id = chip_id(sc);
+	if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
+		sc->names = &devnames[id - CHELSIO_T4];
+	else {
+		device_printf(sc->dev, "chip id %d is not supported.\n", id);
+		sc->names = NULL;
+	}
+}
+
+static int
 t4_attach(device_t dev)
 {
 	struct adapter *sc;
-	int rc = 0, i, n10g, n1g, rqidx, tqidx;
+	int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
+	struct make_dev_args mda;
 	struct intrs_and_queues iaq;
 	struct sge *s;
+	uint8_t *buf;
 #ifdef TCP_OFFLOAD
 	int ofld_rqidx, ofld_tqidx;
 #endif
+#ifdef DEV_NETMAP
+	int nm_rqidx, nm_tqidx;
+#endif
+	int num_vis;
 
 	sc = device_get_softc(dev);
 	sc->dev = dev;
+	TUNABLE_INT_FETCH("hw.cxgbe.dflags", &sc->debug_flags);
 
+	if ((pci_get_device(dev) & 0xff00) == 0x5400)
+		t5_attribute_workaround(dev);
 	pci_enable_busmaster(dev);
 	if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) {
 		uint32_t v;
@@ -444,23 +810,43 @@
 		v = pci_read_config(dev, i + PCIER_DEVICE_CTL, 2);
 		v |= PCIEM_CTL_RELAXED_ORD_ENABLE;
 		pci_write_config(dev, i + PCIER_DEVICE_CTL, v, 2);
+
+		sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
 	}
 
+	sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
+	sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
+	sc->traceq = -1;
+	mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
+	snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
+	    device_get_nameunit(dev));
+
 	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
 	    device_get_nameunit(dev));
 	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
-	mtx_lock(&t4_list_lock);
-	SLIST_INSERT_HEAD(&t4_list, sc, link);
-	mtx_unlock(&t4_list_lock);
+	t4_add_adapter(sc);
 
 	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
 	TAILQ_INIT(&sc->sfl);
-	callout_init(&sc->sfl_callout, CALLOUT_MPSAFE);
+	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
 
-	rc = map_bars(sc);
+	mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
+
+	rc = t4_map_bars_0_and_4(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
+	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+
+	/* Prepare the adapter for operation. */
+	buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
+	rc = -t4_prep_adapter(sc, buf);
+	free(buf, M_CXGBE);
+	if (rc != 0) {
+		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
+		goto done;
+	}
+
 	/*
 	 * This is the real PF# to which we're attaching.  Works from within PCI
 	 * passthrough environments too, where pci_get_function() could return a
@@ -467,22 +853,14 @@
 	 * different PF# depending on the passthrough configuration.  We need to
 	 * use the real PF# in all our communication with the firmware.
 	 */
-	sc->pf = G_SOURCEPF(t4_read_reg(sc, A_PL_WHOAMI));
+	j = t4_read_reg(sc, A_PL_WHOAMI);
+	sc->pf = chip_id(sc) <= CHELSIO_T5 ? G_SOURCEPF(j) : G_T6_SOURCEPF(j);
 	sc->mbox = sc->pf;
 
-	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
-	sc->an_handler = an_not_handled;
-	for (i = 0; i < nitems(sc->cpl_handler); i++)
-		sc->cpl_handler[i] = cpl_not_handled;
-	for (i = 0; i < nitems(sc->fw_msg_handler); i++)
-		sc->fw_msg_handler[i] = fw_msg_not_handled;
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
-
-	/* Prepare the adapter for operation */
-	rc = -t4_prep_adapter(sc);
-	if (rc != 0) {
-		device_printf(dev, "failed to prepare adapter: %d.\n", rc);
-		goto done;
+	t4_init_devnames(sc);
+	if (sc->names == NULL) {
+		rc = ENOTSUP;
+		goto done; /* error message displayed already */
 	}
 
 	/*
@@ -491,9 +869,18 @@
 	 * will work even in "recovery mode".
 	 */
 	setup_memwin(sc);
-	sc->cdev = make_dev(&t4_cdevsw, device_get_unit(dev), UID_ROOT,
-	    GID_WHEEL, 0600, "%s", device_get_nameunit(dev));
-	sc->cdev->si_drv1 = sc;
+	if (t4_init_devlog_params(sc, 0) == 0)
+		fixup_devlog_params(sc);
+	make_dev_args_init(&mda);
+	mda.mda_devsw = &t4_cdevsw;
+	mda.mda_uid = UID_ROOT;
+	mda.mda_gid = GID_WHEEL;
+	mda.mda_mode = 0600;
+	mda.mda_si_drv1 = sc;
+	rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
+	if (rc != 0)
+		device_printf(dev, "failed to create nexus char device: %d.\n",
+		    rc);
 
 	/* Go no further if recovery mode has been requested. */
 	if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
@@ -501,70 +888,53 @@
 		goto done;
 	}
 
+#if defined(__i386__)
+	if ((cpu_feature & CPUID_CX8) == 0) {
+		device_printf(dev, "64 bit atomics not available.\n");
+		rc = ENOTSUP;
+		goto done;
+	}
+#endif
+
 	/* Prepare the firmware for operation */
 	rc = prep_firmware(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
-	rc = get_params__pre_init(sc);
+	rc = get_params__post_init(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
-	rc = t4_sge_init(sc);
+	rc = set_params__post_init(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
-	if (sc->flags & MASTER_PF) {
-		/* get basic stuff going */
-		rc = -t4_fw_initialize(sc, sc->mbox);
-		if (rc != 0) {
-			device_printf(dev, "early init failed: %d.\n", rc);
-			goto done;
-		}
-	}
-
-	rc = get_params__post_init(sc);
+	rc = t4_map_bar_2(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
-	rc = set_params__post_init(sc);
+	rc = t4_create_dma_tag(sc);
 	if (rc != 0)
 		goto done; /* error message displayed already */
 
-	if (sc->flags & MASTER_PF) {
-		uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
-
-		/* final tweaks to some settings */
-
-		t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd,
-		    sc->params.b_wnd);
-		/* 4K, 16K, 64K, 256K DDP "page sizes" */
-		t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(0) | V_HPZ1(2) |
-		    V_HPZ2(4) | V_HPZ3(6));
-		t4_set_reg_field(sc, A_ULP_RX_CTL, F_TDDPTAGTCB, F_TDDPTAGTCB);
-		t4_set_reg_field(sc, A_TP_PARA_REG5,
-		    V_INDICATESIZE(M_INDICATESIZE) |
-		    F_REARMDDPOFFSET | F_RESETDDPOFFSET,
-		    V_INDICATESIZE(indsz) |
-		    F_REARMDDPOFFSET | F_RESETDDPOFFSET);
-	} else {
-		/*
-		 * XXX: Verify that we can live with whatever the master driver
-		 * has done so far, and hope that it doesn't change any global
-		 * setting from underneath us in the future.
-		 */
+	/*
+	 * Number of VIs to create per-port.  The first VI is the "main" regular
+	 * VI for the port.  The rest are additional virtual interfaces on the
+	 * same physical port.  Note that the main VI does not have native
+	 * netmap support but the extra VIs do.
+	 *
+	 * Limit the number of VIs per port to the number of available
+	 * MAC addresses per port.
+	 */
+	if (t4_num_vis >= 1)
+		num_vis = t4_num_vis;
+	else
+		num_vis = 1;
+	if (num_vis > nitems(vi_mac_funcs)) {
+		num_vis = nitems(vi_mac_funcs);
+		device_printf(dev, "Number of VIs limited to %d\n", num_vis);
 	}
 
-	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
-	    A_TP_VLAN_PRI_MAP);
-
-	for (i = 0; i < NCHAN; i++)
-		sc->params.tp.tx_modq[i] = i;
-
-	rc = t4_create_dma_tag(sc);
-	if (rc != 0)
-		goto done; /* error message displayed already */
-
 	/*
 	 * First pass over all the ports - allocate VIs and initialize some
 	 * basic parameters like mac address, port type, etc.  We also figure
@@ -574,6 +944,7 @@
 	n10g = n1g = 0;
 	for_each_port(sc, i) {
 		struct port_info *pi;
+		struct link_config *lc;
 
 		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
 		sc->port[i] = pi;
@@ -581,37 +952,61 @@
 		/* These must be set before t4_port_init */
 		pi->adapter = sc;
 		pi->port_id = i;
+		/*
+		 * XXX: vi[0] is special so we can't delay this allocation until
+		 * pi->nvi's final value is known.
+		 */
+		pi->vi = malloc(sizeof(struct vi_info) * num_vis, M_CXGBE,
+		    M_ZERO | M_WAITOK);
 
-		/* Allocate the vi and initialize parameters like mac addr */
-		rc = -t4_port_init(pi, sc->mbox, sc->pf, 0);
+		/*
+		 * Allocate the "main" VI and initialize parameters
+		 * like mac addr.
+		 */
+		rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
 		if (rc != 0) {
 			device_printf(dev, "unable to initialize port %d: %d\n",
 			    i, rc);
+			free(pi->vi, M_CXGBE);
 			free(pi, M_CXGBE);
 			sc->port[i] = NULL;
 			goto done;
 		}
 
+		lc = &pi->link_cfg;
+		lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
+		lc->requested_fc |= t4_pause_settings;
+		if (t4_fec != -1) {
+			lc->requested_fec = t4_fec &
+			    G_FW_PORT_CAP_FEC(lc->supported);
+		}
+		if (lc->supported & FW_PORT_CAP_ANEG && t4_autoneg != -1) {
+			lc->autoneg = t4_autoneg ? AUTONEG_ENABLE :
+			    AUTONEG_DISABLE;
+		}
+		lc->requested_speed = port_top_speed_raw(pi);
+
+		rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+		if (rc != 0) {
+			device_printf(dev, "port %d l1cfg failed: %d\n", i, rc);
+			free(pi->vi, M_CXGBE);
+			free(pi, M_CXGBE);
+			sc->port[i] = NULL;
+			goto done;
+		}
+
 		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
 		    device_get_nameunit(dev), i);
 		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
+		sc->chan_map[pi->tx_chan] = i;
 
-		if (is_10G_port(pi)) {
+		if (port_top_speed(pi) >= 10) {
 			n10g++;
-			pi->tmr_idx = t4_tmr_idx_10g;
-			pi->pktc_idx = t4_pktc_idx_10g;
 		} else {
 			n1g++;
-			pi->tmr_idx = t4_tmr_idx_1g;
-			pi->pktc_idx = t4_pktc_idx_1g;
 		}
 
-		pi->xact_addr_filt = -1;
-
-		pi->qsize_rxq = t4_qsize_rxq;
-		pi->qsize_txq = t4_qsize_txq;
-
-		pi->dev = device_add_child(dev, "cxgbe", -1);
+		pi->dev = device_add_child(dev, sc->names->ifnet_name, -1);
 		if (pi->dev == NULL) {
 			device_printf(dev,
 			    "failed to add device for port %d.\n", i);
@@ -618,6 +1013,7 @@
 			rc = ENXIO;
 			goto done;
 		}
+		pi->vi[0].dev = pi->dev;
 		device_set_softc(pi->dev, pi);
 	}
 
@@ -624,26 +1020,35 @@
 	/*
 	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
 	 */
-	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
+	rc = cfg_itype_and_nqueues(sc, n10g, n1g, num_vis, &iaq);
 	if (rc != 0)
 		goto done; /* error message displayed already */
+	if (iaq.nrxq_vi + iaq.nofldrxq_vi + iaq.nnmrxq_vi == 0)
+		num_vis = 1;
 
 	sc->intr_type = iaq.intr_type;
 	sc->intr_count = iaq.nirq;
-	sc->flags |= iaq.intr_flags;
 
 	s = &sc->sge;
 	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
 	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
+	if (num_vis > 1) {
+		s->nrxq += (n10g + n1g) * (num_vis - 1) * iaq.nrxq_vi;
+		s->ntxq += (n10g + n1g) * (num_vis - 1) * iaq.ntxq_vi;
+	}
 	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
 	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
-
 #ifdef TCP_OFFLOAD
 	if (is_offload(sc)) {
-
 		s->nofldrxq = n10g * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
 		s->nofldtxq = n10g * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
+		if (num_vis > 1) {
+			s->nofldrxq += (n10g + n1g) * (num_vis - 1) *
+			    iaq.nofldrxq_vi;
+			s->nofldtxq += (n10g + n1g) * (num_vis - 1) *
+			    iaq.nofldtxq_vi;
+		}
 		s->neq += s->nofldtxq + s->nofldrxq;
 		s->niq += s->nofldrxq;
 
@@ -653,7 +1058,20 @@
 		    M_CXGBE, M_ZERO | M_WAITOK);
 	}
 #endif
+#ifdef DEV_NETMAP
+	if (num_vis > 1) {
+		s->nnmrxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmrxq_vi;
+		s->nnmtxq = (n10g + n1g) * (num_vis - 1) * iaq.nnmtxq_vi;
+	}
+	s->neq += s->nnmtxq + s->nnmrxq;
+	s->niq += s->nnmrxq;
 
+	s->nm_rxq = malloc(s->nnmrxq * sizeof(struct sge_nm_rxq),
+	    M_CXGBE, M_ZERO | M_WAITOK);
+	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
+	    M_CXGBE, M_ZERO | M_WAITOK);
+#endif
+
 	s->ctrlq = malloc(sc->params.nports * sizeof(struct sge_wrq), M_CXGBE,
 	    M_ZERO | M_WAITOK);
 	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
@@ -669,6 +1087,7 @@
 	    M_ZERO | M_WAITOK);
 
 	t4_init_l2t(sc, M_WAITOK);
+	t4_init_tx_sched(sc);
 
 	/*
 	 * Second pass over the ports.  This time we know the number of rx and
@@ -678,43 +1097,78 @@
 #ifdef TCP_OFFLOAD
 	ofld_rqidx = ofld_tqidx = 0;
 #endif
+#ifdef DEV_NETMAP
+	nm_rqidx = nm_tqidx = 0;
+#endif
 	for_each_port(sc, i) {
 		struct port_info *pi = sc->port[i];
+		struct vi_info *vi;
 
 		if (pi == NULL)
 			continue;
 
-		pi->first_rxq = rqidx;
-		pi->first_txq = tqidx;
-		if (is_10G_port(pi)) {
-			pi->nrxq = iaq.nrxq10g;
-			pi->ntxq = iaq.ntxq10g;
-		} else {
-			pi->nrxq = iaq.nrxq1g;
-			pi->ntxq = iaq.ntxq1g;
-		}
+		pi->nvi = num_vis;
+		for_each_vi(pi, j, vi) {
+			vi->pi = pi;
+			vi->qsize_rxq = t4_qsize_rxq;
+			vi->qsize_txq = t4_qsize_txq;
 
-		rqidx += pi->nrxq;
-		tqidx += pi->ntxq;
+			vi->first_rxq = rqidx;
+			vi->first_txq = tqidx;
+			if (port_top_speed(pi) >= 10) {
+				vi->tmr_idx = t4_tmr_idx_10g;
+				vi->pktc_idx = t4_pktc_idx_10g;
+				vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
+				vi->nrxq = j == 0 ? iaq.nrxq10g : iaq.nrxq_vi;
+				vi->ntxq = j == 0 ? iaq.ntxq10g : iaq.ntxq_vi;
+			} else {
+				vi->tmr_idx = t4_tmr_idx_1g;
+				vi->pktc_idx = t4_pktc_idx_1g;
+				vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
+				vi->nrxq = j == 0 ? iaq.nrxq1g : iaq.nrxq_vi;
+				vi->ntxq = j == 0 ? iaq.ntxq1g : iaq.ntxq_vi;
+			}
+			rqidx += vi->nrxq;
+			tqidx += vi->ntxq;
 
+			if (j == 0 && vi->ntxq > 1)
+				vi->rsrv_noflowq = iaq.rsrv_noflowq ? 1 : 0;
+			else
+				vi->rsrv_noflowq = 0;
+
 #ifdef TCP_OFFLOAD
-		if (is_offload(sc)) {
-			pi->first_ofld_rxq = ofld_rqidx;
-			pi->first_ofld_txq = ofld_tqidx;
-			if (is_10G_port(pi)) {
-				pi->nofldrxq = iaq.nofldrxq10g;
-				pi->nofldtxq = iaq.nofldtxq10g;
+			vi->first_ofld_rxq = ofld_rqidx;
+			vi->first_ofld_txq = ofld_tqidx;
+			if (port_top_speed(pi) >= 10) {
+				vi->flags |= iaq.intr_flags_10g & INTR_OFLD_RXQ;
+				vi->nofldrxq = j == 0 ? iaq.nofldrxq10g :
+				    iaq.nofldrxq_vi;
+				vi->nofldtxq = j == 0 ? iaq.nofldtxq10g :
+				    iaq.nofldtxq_vi;
 			} else {
-				pi->nofldrxq = iaq.nofldrxq1g;
-				pi->nofldtxq = iaq.nofldtxq1g;
+				vi->flags |= iaq.intr_flags_1g & INTR_OFLD_RXQ;
+				vi->nofldrxq = j == 0 ? iaq.nofldrxq1g :
+				    iaq.nofldrxq_vi;
+				vi->nofldtxq = j == 0 ? iaq.nofldtxq1g :
+				    iaq.nofldtxq_vi;
 			}
-			ofld_rqidx += pi->nofldrxq;
-			ofld_tqidx += pi->nofldtxq;
+			ofld_rqidx += vi->nofldrxq;
+			ofld_tqidx += vi->nofldtxq;
+#endif
+#ifdef DEV_NETMAP
+			if (j > 0) {
+				vi->first_nm_rxq = nm_rqidx;
+				vi->first_nm_txq = nm_tqidx;
+				vi->nnmrxq = iaq.nnmrxq_vi;
+				vi->nnmtxq = iaq.nnmtxq_vi;
+				nm_rqidx += vi->nnmrxq;
+				nm_tqidx += vi->nnmtxq;
+			}
+#endif
 		}
-#endif
 	}
 
-	rc = setup_intr_handlers(sc);
+	rc = t4_setup_intr_handlers(sc);
 	if (rc != 0) {
 		device_printf(dev,
 		    "failed to setup interrupt handlers: %d\n", rc);
@@ -721,6 +1175,12 @@
 		goto done;
 	}
 
+	rc = bus_generic_probe(dev);
+	if (rc != 0) {
+		device_printf(dev, "failed to probe child drivers: %d\n", rc);
+		goto done;
+	}
+
 	rc = bus_generic_attach(dev);
 	if (rc != 0) {
 		device_printf(dev,
@@ -729,9 +1189,9 @@
 	}
 
 	device_printf(dev,
-	    "PCIe x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
-	    sc->params.pci.width, sc->params.nports, sc->intr_count,
-	    sc->intr_type == INTR_MSIX ? "MSI-X" :
+	    "PCIe gen%d x%d, %d ports, %d %s interrupt%s, %d eq, %d iq\n",
+	    sc->params.pci.speed, sc->params.pci.width, sc->params.nports,
+	    sc->intr_count, sc->intr_type == INTR_MSIX ? "MSI-X" :
 	    (sc->intr_type == INTR_MSI ? "MSI" : "INTx"),
 	    sc->intr_count > 1 ? "s" : "", sc->sge.neq, sc->sge.niq);
 
@@ -746,7 +1206,7 @@
 	}
 
 	if (rc != 0)
-		t4_detach(dev);
+		t4_detach_common(dev);
 	else
 		t4_sysctls(sc);
 
@@ -760,13 +1220,25 @@
 t4_detach(device_t dev)
 {
 	struct adapter *sc;
+
+	sc = device_get_softc(dev);
+
+	return (t4_detach_common(dev));
+}
+
+int
+t4_detach_common(device_t dev)
+{
+	struct adapter *sc;
 	struct port_info *pi;
 	int i, rc;
 
 	sc = device_get_softc(dev);
 
-	if (sc->flags & FULL_INIT_DONE)
-		t4_intr_disable(sc);
+	if (sc->flags & FULL_INIT_DONE) {
+		if (!(sc->flags & IS_VF))
+			t4_intr_disable(sc);
+	}
 
 	if (sc->cdev) {
 		destroy_dev(sc->cdev);
@@ -773,32 +1245,40 @@
 		sc->cdev = NULL;
 	}
 
-	rc = bus_generic_detach(dev);
-	if (rc) {
-		device_printf(dev,
-		    "failed to detach child devices: %d\n", rc);
-		return (rc);
+	if (device_is_attached(dev)) {
+		rc = bus_generic_detach(dev);
+		if (rc) {
+			device_printf(dev,
+			    "failed to detach child devices: %d\n", rc);
+			return (rc);
+		}
 	}
 
 	for (i = 0; i < sc->intr_count; i++)
 		t4_free_irq(sc, &sc->irq[i]);
 
+	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
+		t4_free_tx_sched(sc);
+
 	for (i = 0; i < MAX_NPORTS; i++) {
 		pi = sc->port[i];
 		if (pi) {
-			t4_free_vi(pi->adapter, sc->mbox, sc->pf, 0, pi->viid);
+			t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
 			if (pi->dev)
 				device_delete_child(dev, pi->dev);
 
 			mtx_destroy(&pi->pi_lock);
+			free(pi->vi, M_CXGBE);
 			free(pi, M_CXGBE);
 		}
 	}
 
+	device_delete_children(dev);
+
 	if (sc->flags & FULL_INIT_DONE)
 		adapter_full_uninit(sc);
 
-	if (sc->flags & FW_OK)
+	if ((sc->flags & (IS_VF | FW_OK)) == FW_OK)
 		t4_fw_bye(sc, sc->mbox);
 
 	if (sc->intr_type == INTR_MSI || sc->intr_type == INTR_MSIX)
@@ -808,6 +1288,10 @@
 		bus_release_resource(dev, SYS_RES_MEMORY, sc->regs_rid,
 		    sc->regs_res);
 
+	if (sc->udbs_res)
+		bus_release_resource(dev, SYS_RES_MEMORY, sc->udbs_rid,
+		    sc->udbs_res);
+
 	if (sc->msix_res)
 		bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_rid,
 		    sc->msix_res);
@@ -819,6 +1303,10 @@
 	free(sc->sge.ofld_rxq, M_CXGBE);
 	free(sc->sge.ofld_txq, M_CXGBE);
 #endif
+#ifdef DEV_NETMAP
+	free(sc->sge.nm_rxq, M_CXGBE);
+	free(sc->sge.nm_txq, M_CXGBE);
+#endif
 	free(sc->irq, M_CXGBE);
 	free(sc->sge.rxq, M_CXGBE);
 	free(sc->sge.txq, M_CXGBE);
@@ -828,23 +1316,34 @@
 	free(sc->tids.ftid_tab, M_CXGBE);
 	t4_destroy_dma_tag(sc);
 	if (mtx_initialized(&sc->sc_lock)) {
-		mtx_lock(&t4_list_lock);
+		sx_xlock(&t4_list_lock);
 		SLIST_REMOVE(&t4_list, sc, adapter, link);
-		mtx_unlock(&t4_list_lock);
+		sx_xunlock(&t4_list_lock);
 		mtx_destroy(&sc->sc_lock);
 	}
 
+	callout_drain(&sc->sfl_callout);
 	if (mtx_initialized(&sc->tids.ftid_lock))
 		mtx_destroy(&sc->tids.ftid_lock);
 	if (mtx_initialized(&sc->sfl_lock))
 		mtx_destroy(&sc->sfl_lock);
+	if (mtx_initialized(&sc->ifp_lock))
+		mtx_destroy(&sc->ifp_lock);
+	if (mtx_initialized(&sc->reg_lock))
+		mtx_destroy(&sc->reg_lock);
 
+	for (i = 0; i < NUM_MEMWIN; i++) {
+		struct memwin *mw = &sc->memwin[i];
+
+		if (rw_initialized(&mw->mw_lock))
+			rw_destroy(&mw->mw_lock);
+	}
+
 	bzero(sc, sizeof(*sc));
 
 	return (0);
 }
 
-
 static int
 cxgbe_probe(device_t dev)
 {
@@ -859,15 +1358,18 @@
 
 #define T4_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \
     IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \
-    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6)
+    IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6 | IFCAP_HWSTATS)
 #define T4_CAP_ENABLE (T4_CAP)
 
 static int
-cxgbe_attach(device_t dev)
+cxgbe_vi_attach(device_t dev, struct vi_info *vi)
 {
-	struct port_info *pi = device_get_softc(dev);
 	struct ifnet *ifp;
+	struct sbuf *sb;
 
+	vi->xact_addr_filt = -1;
+	callout_init(&vi->tick, 1);
+
 	/* Allocate an ifnet and set it up */
 	ifp = if_alloc(IFT_ETHER);
 	if (ifp == NULL) {
@@ -874,11 +1376,9 @@
 		device_printf(dev, "Cannot allocate ifnet\n");
 		return (ENOMEM);
 	}
-	pi->ifp = ifp;
-	ifp->if_softc = pi;
+	vi->ifp = ifp;
+	ifp->if_softc = vi;
 
-	callout_init(&pi->tick, CALLOUT_MPSAFE);
-
 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 
@@ -889,79 +1389,136 @@
 
 	ifp->if_capabilities = T4_CAP;
 #ifdef TCP_OFFLOAD
-	if (is_offload(pi->adapter))
+	if (vi->nofldrxq != 0)
 		ifp->if_capabilities |= IFCAP_TOE;
 #endif
+#ifdef DEV_NETMAP
+	if (vi->nnmrxq != 0)
+		ifp->if_capabilities |= IFCAP_NETMAP;
+#endif
 	ifp->if_capenable = T4_CAP_ENABLE;
 	ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6;
 
-	/* Initialize ifmedia for this port */
-	ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
+	ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
+	ifp->if_hw_tsomaxsegcount = TX_SGL_SEGS;
+	ifp->if_hw_tsomaxsegsize = 65536;
+
+	/* Initialize ifmedia for this VI */
+	ifmedia_init(&vi->media, IFM_IMASK, cxgbe_media_change,
 	    cxgbe_media_status);
-	build_medialist(pi);
+	build_medialist(vi->pi, &vi->media);
 
-	pi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
+	vi->vlan_c = EVENTHANDLER_REGISTER(vlan_config, cxgbe_vlan_config, ifp,
 	    EVENTHANDLER_PRI_ANY);
 
-	ether_ifattach(ifp, pi->hw_addr);
-
+	ether_ifattach(ifp, vi->hw_addr);
+#ifdef DEV_NETMAP
+	if (ifp->if_capabilities & IFCAP_NETMAP)
+		cxgbe_nm_attach(vi);
+#endif
+	sb = sbuf_new_auto();
+	sbuf_printf(sb, "%d txq, %d rxq (NIC)", vi->ntxq, vi->nrxq);
 #ifdef TCP_OFFLOAD
-	if (is_offload(pi->adapter)) {
-		device_printf(dev,
-		    "%d txq, %d rxq (NIC); %d txq, %d rxq (TOE)\n",
-		    pi->ntxq, pi->nrxq, pi->nofldtxq, pi->nofldrxq);
-	} else
+	if (ifp->if_capabilities & IFCAP_TOE)
+		sbuf_printf(sb, "; %d txq, %d rxq (TOE)",
+		    vi->nofldtxq, vi->nofldrxq);
 #endif
-		device_printf(dev, "%d txq, %d rxq\n", pi->ntxq, pi->nrxq);
+#ifdef DEV_NETMAP
+	if (ifp->if_capabilities & IFCAP_NETMAP)
+		sbuf_printf(sb, "; %d txq, %d rxq (netmap)",
+		    vi->nnmtxq, vi->nnmrxq);
+#endif
+	sbuf_finish(sb);
+	device_printf(dev, "%s\n", sbuf_data(sb));
+	sbuf_delete(sb);
 
+	vi_sysctls(vi);
+
+	return (0);
+}
+
+static int
+cxgbe_attach(device_t dev)
+{
+	struct port_info *pi = device_get_softc(dev);
+	struct adapter *sc = pi->adapter;
+	struct vi_info *vi;
+	int i, rc;
+
+	callout_init_mtx(&pi->tick, &pi->pi_lock, 0);
+
+	rc = cxgbe_vi_attach(dev, &pi->vi[0]);
+	if (rc)
+		return (rc);
+
+	for_each_vi(pi, i, vi) {
+		if (i == 0)
+			continue;
+		vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
+		if (vi->dev == NULL) {
+			device_printf(dev, "failed to add VI %d\n", i);
+			continue;
+		}
+		device_set_softc(vi->dev, vi);
+	}
+
 	cxgbe_sysctls(pi);
 
+	bus_generic_attach(dev);
+
 	return (0);
 }
 
+static void
+cxgbe_vi_detach(struct vi_info *vi)
+{
+	struct ifnet *ifp = vi->ifp;
+
+	ether_ifdetach(ifp);
+
+	if (vi->vlan_c)
+		EVENTHANDLER_DEREGISTER(vlan_config, vi->vlan_c);
+
+	/* Let detach proceed even if these fail. */
+#ifdef DEV_NETMAP
+	if (ifp->if_capabilities & IFCAP_NETMAP)
+		cxgbe_nm_detach(vi);
+#endif
+	cxgbe_uninit_synchronized(vi);
+	callout_drain(&vi->tick);
+	vi_full_uninit(vi);
+
+	ifmedia_removeall(&vi->media);
+	if_free(vi->ifp);
+	vi->ifp = NULL;
+}
+
 static int
 cxgbe_detach(device_t dev)
 {
 	struct port_info *pi = device_get_softc(dev);
 	struct adapter *sc = pi->adapter;
-	struct ifnet *ifp = pi->ifp;
+	int rc;
 
-	/* Tell if_ioctl and if_init that the port is going away */
-	ADAPTER_LOCK(sc);
-	SET_DOOMED(pi);
-	wakeup(&sc->flags);
-	while (IS_BUSY(sc))
-		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
-	SET_BUSY(sc);
-#ifdef INVARIANTS
-	sc->last_op = "t4detach";
-	sc->last_op_thr = curthread;
-#endif
-	ADAPTER_UNLOCK(sc);
+	/* Detach the extra VIs first. */
+	rc = bus_generic_detach(dev);
+	if (rc)
+		return (rc);
+	device_delete_children(dev);
 
-	if (pi->vlan_c)
-		EVENTHANDLER_DEREGISTER(vlan_config, pi->vlan_c);
+	doom_vi(sc, &pi->vi[0]);
 
-	PORT_LOCK(pi);
-	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
-	callout_stop(&pi->tick);
-	PORT_UNLOCK(pi);
+	if (pi->flags & HAS_TRACEQ) {
+		sc->traceq = -1;	/* cloner should not create ifnet */
+		t4_tracer_port_detach(sc);
+	}
+
+	cxgbe_vi_detach(&pi->vi[0]);
 	callout_drain(&pi->tick);
 
-	/* Let detach proceed even if these fail. */
-	cxgbe_uninit_synchronized(pi);
-	port_full_uninit(pi);
+	end_synchronized_op(sc, 0);
 
-	ifmedia_removeall(&pi->media);
-	ether_ifdetach(pi->ifp);
-	if_free(pi->ifp);
-
-	ADAPTER_LOCK(sc);
-	CLR_BUSY(sc);
-	wakeup(&sc->flags);
-	ADAPTER_UNLOCK(sc);
-
 	return (0);
 }
 
@@ -968,12 +1525,12 @@
 static void
 cxgbe_init(void *arg)
 {
-	struct port_info *pi = arg;
-	struct adapter *sc = pi->adapter;
+	struct vi_info *vi = arg;
+	struct adapter *sc = vi->pi->adapter;
 
-	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
+	if (begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4init") != 0)
 		return;
-	cxgbe_init_synchronized(pi);
+	cxgbe_init_synchronized(vi);
 	end_synchronized_op(sc, 0);
 }
 
@@ -980,9 +1537,9 @@
 static int
 cxgbe_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
 {
-	int rc = 0, mtu, flags;
-	struct port_info *pi = ifp->if_softc;
-	struct adapter *sc = pi->adapter;
+	int rc = 0, mtu, flags, can_sleep;
+	struct vi_info *vi = ifp->if_softc;
+	struct adapter *sc = vi->pi->adapter;
 	struct ifreq *ifr = (struct ifreq *)data;
 	uint32_t mask;
 
@@ -989,53 +1546,74 @@
 	switch (cmd) {
 	case SIOCSIFMTU:
 		mtu = ifr->ifr_mtu;
-		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
+		if (mtu < ETHERMIN || mtu > MAX_MTU)
 			return (EINVAL);
 
-		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
+		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4mtu");
 		if (rc)
 			return (rc);
 		ifp->if_mtu = mtu;
-		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		if (vi->flags & VI_INIT_DONE) {
 			t4_update_fl_bufsize(ifp);
-			rc = update_mac_settings(pi, XGMAC_MTU);
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				rc = update_mac_settings(ifp, XGMAC_MTU);
 		}
 		end_synchronized_op(sc, 0);
 		break;
 
 	case SIOCSIFFLAGS:
-		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
+		can_sleep = 0;
+redo_sifflags:
+		rc = begin_synchronized_op(sc, vi,
+		    can_sleep ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4flg");
 		if (rc)
 			return (rc);
 
 		if (ifp->if_flags & IFF_UP) {
 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-				flags = pi->if_flags;
+				flags = vi->if_flags;
 				if ((ifp->if_flags ^ flags) &
 				    (IFF_PROMISC | IFF_ALLMULTI)) {
-					rc = update_mac_settings(pi,
+					if (can_sleep == 1) {
+						end_synchronized_op(sc, 0);
+						can_sleep = 0;
+						goto redo_sifflags;
+					}
+					rc = update_mac_settings(ifp,
 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
 				}
-			} else
-				rc = cxgbe_init_synchronized(pi);
-			pi->if_flags = ifp->if_flags;
-		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
-			rc = cxgbe_uninit_synchronized(pi);
-		end_synchronized_op(sc, 0);
+			} else {
+				if (can_sleep == 0) {
+					end_synchronized_op(sc, LOCK_HELD);
+					can_sleep = 1;
+					goto redo_sifflags;
+				}
+				rc = cxgbe_init_synchronized(vi);
+			}
+			vi->if_flags = ifp->if_flags;
+		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			if (can_sleep == 0) {
+				end_synchronized_op(sc, LOCK_HELD);
+				can_sleep = 1;
+				goto redo_sifflags;
+			}
+			rc = cxgbe_uninit_synchronized(vi);
+		}
+		end_synchronized_op(sc, can_sleep ? 0 : LOCK_HELD);
 		break;
 
-	case SIOCADDMULTI:	
+	case SIOCADDMULTI:
 	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
-		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
+		rc = begin_synchronized_op(sc, vi, HOLD_LOCK, "t4multi");
 		if (rc)
 			return (rc);
 		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
-			rc = update_mac_settings(pi, XGMAC_MCADDRS);
+			rc = update_mac_settings(ifp, XGMAC_MCADDRS);
 		end_synchronized_op(sc, LOCK_HELD);
 		break;
 
 	case SIOCSIFCAP:
-		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
+		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4cap");
 		if (rc)
 			return (rc);
 
@@ -1097,7 +1675,7 @@
 			struct sge_rxq *rxq;
 
 			ifp->if_capenable ^= IFCAP_LRO;
-			for_each_rxq(pi, i, rxq) {
+			for_each_rxq(vi, i, rxq) {
 				if (ifp->if_capenable & IFCAP_LRO)
 					rxq->iq.flags |= IQ_LRO_ENABLED;
 				else
@@ -1109,7 +1687,7 @@
 		if (mask & IFCAP_TOE) {
 			int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE;
 
-			rc = toe_capability(pi, enable);
+			rc = toe_capability(vi, enable);
 			if (rc != 0)
 				goto fail;
 
@@ -1119,7 +1697,7 @@
 		if (mask & IFCAP_VLAN_HWTAGGING) {
 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
 			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
-				rc = update_mac_settings(pi, XGMAC_VLANEX);
+				rc = update_mac_settings(ifp, XGMAC_VLANEX);
 		}
 		if (mask & IFCAP_VLAN_MTU) {
 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
@@ -1140,9 +1718,35 @@
 
 	case SIOCSIFMEDIA:
 	case SIOCGIFMEDIA:
-		ifmedia_ioctl(ifp, ifr, &pi->media, cmd);
+	case SIOCGIFXMEDIA:
+		ifmedia_ioctl(ifp, ifr, &vi->media, cmd);
 		break;
 
+	case SIOCGI2C: {
+		struct ifi2creq i2c;
+
+		rc = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+		if (rc != 0)
+			break;
+		if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) {
+			rc = EPERM;
+			break;
+		}
+		if (i2c.len > sizeof(i2c.data)) {
+			rc = EINVAL;
+			break;
+		}
+		rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
+		if (rc)
+			return (rc);
+		rc = -t4_i2c_rd(sc, sc->mbox, vi->pi->port_id, i2c.dev_addr,
+		    i2c.offset, i2c.len, &i2c.data[0]);
+		end_synchronized_op(sc, 0);
+		if (rc == 0)
+			rc = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+		break;
+	}
+
 	default:
 		rc = ether_ioctl(ifp, cmd, data);
 	}
@@ -1153,13 +1757,15 @@
 static int
 cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
 {
-	struct port_info *pi = ifp->if_softc;
+	struct vi_info *vi = ifp->if_softc;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
-	struct sge_txq *txq = &sc->sge.txq[pi->first_txq];
-	struct buf_ring *br;
+	struct sge_txq *txq;
+	void *items[1];
 	int rc;
 
 	M_ASSERTPKTHDR(m);
+	MPASS(m->m_nextpkt == NULL);	/* not quite ready for this yet */
 
 	if (__predict_false(pi->link_cfg.link_ok == 0)) {
 		m_freem(m);
@@ -1166,54 +1772,24 @@
 		return (ENETDOWN);
 	}
 
-	if (m->m_flags & M_FLOWID)
-		txq += (m->m_pkthdr.flowid % pi->ntxq);
-	br = txq->br;
-
-	if (TXQ_TRYLOCK(txq) == 0) {
-		struct sge_eq *eq = &txq->eq;
-
-		/*
-		 * It is possible that t4_eth_tx finishes up and releases the
-		 * lock between the TRYLOCK above and the drbr_enqueue here.  We
-		 * need to make sure that this mbuf doesn't just sit there in
-		 * the drbr.
-		 */
-
-		rc = drbr_enqueue(ifp, br, m);
-		if (rc == 0 && callout_pending(&eq->tx_callout) == 0 &&
-		    !(eq->flags & EQ_DOOMED))
-			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
+	rc = parse_pkt(sc, &m);
+	if (__predict_false(rc != 0)) {
+		MPASS(m == NULL);			/* was freed already */
+		atomic_add_int(&pi->tx_parse_error, 1);	/* rare, atomic is ok */
 		return (rc);
 	}
 
-	/*
-	 * txq->m is the mbuf that is held up due to a temporary shortage of
-	 * resources and it should be put on the wire first.  Then what's in
-	 * drbr and finally the mbuf that was just passed in to us.
-	 *
-	 * Return code should indicate the fate of the mbuf that was passed in
-	 * this time.
-	 */
+	/* Select a txq. */
+	txq = &sc->sge.txq[vi->first_txq];
+	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
+		txq += ((m->m_pkthdr.flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+		    vi->rsrv_noflowq);
 
-	TXQ_LOCK_ASSERT_OWNED(txq);
-	if (drbr_needs_enqueue(ifp, br) || txq->m) {
+	items[0] = m;
+	rc = mp_ring_enqueue(txq->r, items, 1, 4096);
+	if (__predict_false(rc != 0))
+		m_freem(m);
 
-		/* Queued for transmission. */
-
-		rc = drbr_enqueue(ifp, br, m);
-		m = txq->m ? txq->m : drbr_dequeue(ifp, br);
-		(void) t4_eth_tx(ifp, txq, m);
-		TXQ_UNLOCK(txq);
-		return (rc);
-	}
-
-	/* Direct transmission. */
-	rc = t4_eth_tx(ifp, txq, m);
-	if (rc != 0 && txq->m)
-		rc = 0;	/* held, will be transmitted soon (hopefully) */
-
-	TXQ_UNLOCK(txq);
 	return (rc);
 }
 
@@ -1220,20 +1796,23 @@
 static void
 cxgbe_qflush(struct ifnet *ifp)
 {
-	struct port_info *pi = ifp->if_softc;
+	struct vi_info *vi = ifp->if_softc;
 	struct sge_txq *txq;
 	int i;
-	struct mbuf *m;
 
-	/* queues do not exist if !PORT_INIT_DONE. */
-	if (pi->flags & PORT_INIT_DONE) {
-		for_each_txq(pi, i, txq) {
+	/* queues do not exist if !VI_INIT_DONE. */
+	if (vi->flags & VI_INIT_DONE) {
+		for_each_txq(vi, i, txq) {
 			TXQ_LOCK(txq);
-			m_freem(txq->m);
-			txq->m = NULL;
-			while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
-				m_freem(m);
+			txq->eq.flags |= EQ_QFLUSH;
 			TXQ_UNLOCK(txq);
+			while (!mp_ring_is_idle(txq->r)) {
+				mp_ring_check_drainage(txq->r, 0);
+				pause("qflush", 1);
+			}
+			TXQ_LOCK(txq);
+			txq->eq.flags &= ~EQ_QFLUSH;
+			TXQ_UNLOCK(txq);
 		}
 	}
 	if_qflush(ifp);
@@ -1242,9 +1821,9 @@
 static int
 cxgbe_media_change(struct ifnet *ifp)
 {
-	struct port_info *pi = ifp->if_softc;
+	struct vi_info *vi = ifp->if_softc;
 
-	device_printf(pi->dev, "%s unimplemented.\n", __func__);
+	device_printf(vi->dev, "%s unimplemented.\n", __func__);
 
 	return (EOPNOTSUPP);
 }
@@ -1252,15 +1831,12 @@
 static void
 cxgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
 {
-	struct port_info *pi = ifp->if_softc;
-	struct ifmedia_entry *cur = pi->media.ifm_cur;
+	struct vi_info *vi = ifp->if_softc;
+	struct port_info *pi = vi->pi;
+	struct ifmedia_entry *cur;
 	int speed = pi->link_cfg.speed;
-	int data = (pi->port_type << 8) | pi->mod_type;
 
-	if (cur->ifm_data != data) {
-		build_medialist(pi);
-		cur = pi->media.ifm_cur;
-	}
+	cur = vi->media.ifm_cur;
 
 	ifmr->ifm_status = IFM_AVALID;
 	if (!pi->link_cfg.link_ok)
@@ -1273,13 +1849,13 @@
 		return;
 
 	ifmr->ifm_active = IFM_ETHER | IFM_FDX;
-	if (speed == SPEED_10000)
+	if (speed == 10000)
 		ifmr->ifm_active |= IFM_10G_T;
-	else if (speed == SPEED_1000)
+	else if (speed == 1000)
 		ifmr->ifm_active |= IFM_1000_T;
-	else if (speed == SPEED_100)
+	else if (speed == 100)
 		ifmr->ifm_active |= IFM_100_TX;
-	else if (speed == SPEED_10)
+	else if (speed == 10)
 		ifmr->ifm_active |= IFM_10_T;
 	else
 		KASSERT(0, ("%s: link up but speed unknown (%u)", __func__,
@@ -1286,6 +1862,88 @@
 			    speed));
 }
 
+static int
+vcxgbe_probe(device_t dev)
+{
+	char buf[128];
+	struct vi_info *vi = device_get_softc(dev);
+
+	snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
+	    vi - vi->pi->vi);
+	device_set_desc_copy(dev, buf);
+
+	return (BUS_PROBE_DEFAULT);
+}
+
+static int
+vcxgbe_attach(device_t dev)
+{
+	struct vi_info *vi;
+	struct port_info *pi;
+	struct adapter *sc;
+	int func, index, rc;
+	u32 param, val;
+
+	vi = device_get_softc(dev);
+	pi = vi->pi;
+	sc = pi->adapter;
+
+	index = vi - pi->vi;
+	KASSERT(index < nitems(vi_mac_funcs),
+	    ("%s: VI %s doesn't have a MAC func", __func__,
+	    device_get_nameunit(dev)));
+	func = vi_mac_funcs[index];
+	rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+	    vi->hw_addr, &vi->rss_size, func, 0);
+	if (rc < 0) {
+		device_printf(dev, "Failed to allocate virtual interface "
+		    "for port %d: %d\n", pi->port_id, -rc);
+		return (-rc);
+	}
+	vi->viid = rc;
+	if (chip_id(sc) <= CHELSIO_T5)
+		vi->smt_idx = (rc & 0x7f) << 1;
+	else
+		vi->smt_idx = (rc & 0x7f);
+
+	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_RSSINFO) |
+	    V_FW_PARAMS_PARAM_YZ(vi->viid);
+	rc = t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+	if (rc)
+		vi->rss_base = 0xffff;
+	else {
+		/* MPASS((val >> 16) == rss_size); */
+		vi->rss_base = val & 0xffff;
+	}
+
+	rc = cxgbe_vi_attach(dev, vi);
+	if (rc) {
+		t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
+		return (rc);
+	}
+	return (0);
+}
+
+static int
+vcxgbe_detach(device_t dev)
+{
+	struct vi_info *vi;
+	struct adapter *sc;
+
+	vi = device_get_softc(dev);
+	sc = vi->pi->adapter;
+
+	doom_vi(sc, vi);
+
+	cxgbe_vi_detach(vi);
+	t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
+
+	end_synchronized_op(sc, 0);
+
+	return (0);
+}
+
 void
 t4_fatal_err(struct adapter *sc)
 {
@@ -1295,9 +1953,17 @@
 	    device_get_nameunit(sc->dev));
 }
 
-static int
-map_bars(struct adapter *sc)
+void
+t4_add_adapter(struct adapter *sc)
 {
+	sx_xlock(&t4_list_lock);
+	SLIST_INSERT_HEAD(&t4_list, sc, link);
+	sx_xunlock(&t4_list_lock);
+}
+
+int
+t4_map_bars_0_and_4(struct adapter *sc)
+{
 	sc->regs_rid = PCIR_BAR(0);
 	sc->regs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
 	    &sc->regs_rid, RF_ACTIVE);
@@ -1308,6 +1974,7 @@
 	sc->bt = rman_get_bustag(sc->regs_res);
 	sc->bh = rman_get_bushandle(sc->regs_res);
 	sc->mmio_len = rman_get_size(sc->regs_res);
+	setbit(&sc->doorbells, DOORBELL_KDB);
 
 	sc->msix_rid = PCIR_BAR(4);
 	sc->msix_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
@@ -1320,39 +1987,433 @@
 	return (0);
 }
 
+int
+t4_map_bar_2(struct adapter *sc)
+{
+
+	/*
+	 * T4: only iWARP driver uses the userspace doorbells.  There is no need
+	 * to map it if RDMA is disabled.
+	 */
+	if (is_t4(sc) && sc->rdmacaps == 0)
+		return (0);
+
+	sc->udbs_rid = PCIR_BAR(2);
+	sc->udbs_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+	    &sc->udbs_rid, RF_ACTIVE);
+	if (sc->udbs_res == NULL) {
+		device_printf(sc->dev, "cannot map doorbell BAR.\n");
+		return (ENXIO);
+	}
+	sc->udbs_base = rman_get_virtual(sc->udbs_res);
+
+	if (chip_id(sc) >= CHELSIO_T5) {
+		setbit(&sc->doorbells, DOORBELL_UDB);
+#if defined(__i386__) || defined(__amd64__)
+		if (t5_write_combine) {
+			int rc, mode;
+
+			/*
+			 * Enable write combining on BAR2.  This is the
+			 * userspace doorbell BAR and is split into 128B
+			 * (UDBS_SEG_SIZE) doorbell regions, each associated
+			 * with an egress queue.  The first 64B has the doorbell
+			 * and the second 64B can be used to submit a tx work
+			 * request with an implicit doorbell.
+			 */
+
+			rc = pmap_change_attr((vm_offset_t)sc->udbs_base,
+			    rman_get_size(sc->udbs_res), PAT_WRITE_COMBINING);
+			if (rc == 0) {
+				clrbit(&sc->doorbells, DOORBELL_UDB);
+				setbit(&sc->doorbells, DOORBELL_WCWR);
+				setbit(&sc->doorbells, DOORBELL_UDBWC);
+			} else {
+				device_printf(sc->dev,
+				    "couldn't enable write combining: %d\n",
+				    rc);
+			}
+
+			mode = is_t5(sc) ? V_STATMODE(0) : V_T6_STATMODE(0);
+			t4_write_reg(sc, A_SGE_STAT_CFG,
+			    V_STATSOURCE_T5(7) | mode);
+		}
+#endif
+	}
+
+	return (0);
+}
+
+struct memwin_init {
+	uint32_t base;
+	uint32_t aperture;
+};
+
+static const struct memwin_init t4_memwin[NUM_MEMWIN] = {
+	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
+	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
+	{ MEMWIN2_BASE_T4, MEMWIN2_APERTURE_T4 }
+};
+
+static const struct memwin_init t5_memwin[NUM_MEMWIN] = {
+	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
+	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
+	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
+};
+
 static void
 setup_memwin(struct adapter *sc)
 {
+	const struct memwin_init *mw_init;
+	struct memwin *mw;
+	int i;
 	uint32_t bar0;
 
-	/*
-	 * Read low 32b of bar0 indirectly via the hardware backdoor mechanism.
-	 * Works from within PCI passthrough environments too, where
-	 * rman_get_start() can return a different value.  We need to program
-	 * the memory window decoders with the actual addresses that will be
-	 * coming across the PCIe link.
-	 */
-	bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
-	bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
+	if (is_t4(sc)) {
+		/*
+		 * Read low 32b of bar0 indirectly via the hardware backdoor
+		 * mechanism.  Works from within PCI passthrough environments
+		 * too, where rman_get_start() can return a different value.  We
+		 * need to program the T4 memory window decoders with the actual
+		 * addresses that will be coming across the PCIe link.
+		 */
+		bar0 = t4_hw_pci_read_cfg4(sc, PCIR_BAR(0));
+		bar0 &= (uint32_t) PCIM_BAR_MEM_BASE;
 
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
-	    	     (bar0 + MEMWIN0_BASE) | V_BIR(0) |
-		     V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+		mw_init = &t4_memwin[0];
+	} else {
+		/* T5+ use the relative offset inside the PCIe BAR */
+		bar0 = 0;
 
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
-		     (bar0 + MEMWIN1_BASE) | V_BIR(0) |
-		     V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+		mw_init = &t5_memwin[0];
+	}
 
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
-		     (bar0 + MEMWIN2_BASE) | V_BIR(0) |
-		     V_WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+	for (i = 0, mw = &sc->memwin[0]; i < NUM_MEMWIN; i++, mw_init++, mw++) {
+		rw_init(&mw->mw_lock, "memory window access");
+		mw->mw_base = mw_init->base;
+		mw->mw_aperture = mw_init->aperture;
+		mw->mw_curpos = 0;
+		t4_write_reg(sc,
+		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
+		    (mw->mw_base + bar0) | V_BIR(0) |
+		    V_WINDOW(ilog2(mw->mw_aperture) - 10));
+		rw_wlock(&mw->mw_lock);
+		position_memwin(sc, i, 0);
+		rw_wunlock(&mw->mw_lock);
+	}
 
 	/* flush */
 	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
 }
 
+/*
+ * Positions the memory window at the given address in the card's address space.
+ * There are some alignment requirements and the actual position may be at an
+ * address prior to the requested address.  mw->mw_curpos always has the actual
+ * position of the window.
+ */
+static void
+position_memwin(struct adapter *sc, int idx, uint32_t addr)
+{
+	struct memwin *mw;
+	uint32_t pf;
+	uint32_t reg;
+
+	MPASS(idx >= 0 && idx < NUM_MEMWIN);
+	mw = &sc->memwin[idx];
+	rw_assert(&mw->mw_lock, RA_WLOCKED);
+
+	if (is_t4(sc)) {
+		pf = 0;
+		mw->mw_curpos = addr & ~0xf;	/* start must be 16B aligned */
+	} else {
+		pf = V_PFNUM(sc->pf);
+		mw->mw_curpos = addr & ~0x7f;	/* start must be 128B aligned */
+	}
+	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
+	t4_write_reg(sc, reg, mw->mw_curpos | pf);
+	t4_read_reg(sc, reg);	/* flush */
+}
+
 static int
-cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
+rw_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
+    int len, int rw)
+{
+	struct memwin *mw;
+	uint32_t mw_end, v;
+
+	MPASS(idx >= 0 && idx < NUM_MEMWIN);
+
+	/* Memory can only be accessed in naturally aligned 4 byte units */
+	if (addr & 3 || len & 3 || len <= 0)
+		return (EINVAL);
+
+	mw = &sc->memwin[idx];
+	while (len > 0) {
+		rw_rlock(&mw->mw_lock);
+		mw_end = mw->mw_curpos + mw->mw_aperture;
+		if (addr >= mw_end || addr < mw->mw_curpos) {
+			/* Will need to reposition the window */
+			if (!rw_try_upgrade(&mw->mw_lock)) {
+				rw_runlock(&mw->mw_lock);
+				rw_wlock(&mw->mw_lock);
+			}
+			rw_assert(&mw->mw_lock, RA_WLOCKED);
+			position_memwin(sc, idx, addr);
+			rw_downgrade(&mw->mw_lock);
+			mw_end = mw->mw_curpos + mw->mw_aperture;
+		}
+		rw_assert(&mw->mw_lock, RA_RLOCKED);
+		while (addr < mw_end && len > 0) {
+			if (rw == 0) {
+				v = t4_read_reg(sc, mw->mw_base + addr -
+				    mw->mw_curpos);
+				*val++ = le32toh(v);
+			} else {
+				v = *val++;
+				t4_write_reg(sc, mw->mw_base + addr -
+				    mw->mw_curpos, htole32(v));;
+			}
+			addr += 4;
+			len -= 4;
+		}
+		rw_runlock(&mw->mw_lock);
+	}
+
+	return (0);
+}
+
+static inline int
+read_via_memwin(struct adapter *sc, int idx, uint32_t addr, uint32_t *val,
+    int len)
+{
+
+	return (rw_via_memwin(sc, idx, addr, val, len, 0));
+}
+
+static inline int
+write_via_memwin(struct adapter *sc, int idx, uint32_t addr,
+    const uint32_t *val, int len)
+{
+
+	return (rw_via_memwin(sc, idx, addr, (void *)(uintptr_t)val, len, 1));
+}
+
+static int
+t4_range_cmp(const void *a, const void *b)
+{
+	return ((const struct t4_range *)a)->start -
+	       ((const struct t4_range *)b)->start;
+}
+
+/*
+ * Verify that the memory range specified by the addr/len pair is valid within
+ * the card's address space.
+ */
+static int
+validate_mem_range(struct adapter *sc, uint32_t addr, int len)
+{
+	struct t4_range mem_ranges[4], *r, *next;
+	uint32_t em, addr_len;
+	int i, n, remaining;
+
+	/* Memory can only be accessed in naturally aligned 4 byte units */
+	if (addr & 3 || len & 3 || len <= 0)
+		return (EINVAL);
+
+	/* Enabled memories */
+	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
+
+	r = &mem_ranges[0];
+	n = 0;
+	bzero(r, sizeof(mem_ranges));
+	if (em & F_EDRAM0_ENABLE) {
+		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
+		r->size = G_EDRAM0_SIZE(addr_len) << 20;
+		if (r->size > 0) {
+			r->start = G_EDRAM0_BASE(addr_len) << 20;
+			if (addr >= r->start &&
+			    addr + len <= r->start + r->size)
+				return (0);
+			r++;
+			n++;
+		}
+	}
+	if (em & F_EDRAM1_ENABLE) {
+		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
+		r->size = G_EDRAM1_SIZE(addr_len) << 20;
+		if (r->size > 0) {
+			r->start = G_EDRAM1_BASE(addr_len) << 20;
+			if (addr >= r->start &&
+			    addr + len <= r->start + r->size)
+				return (0);
+			r++;
+			n++;
+		}
+	}
+	if (em & F_EXT_MEM_ENABLE) {
+		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+		r->size = G_EXT_MEM_SIZE(addr_len) << 20;
+		if (r->size > 0) {
+			r->start = G_EXT_MEM_BASE(addr_len) << 20;
+			if (addr >= r->start &&
+			    addr + len <= r->start + r->size)
+				return (0);
+			r++;
+			n++;
+		}
+	}
+	if (is_t5(sc) && em & F_EXT_MEM1_ENABLE) {
+		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+		r->size = G_EXT_MEM1_SIZE(addr_len) << 20;
+		if (r->size > 0) {
+			r->start = G_EXT_MEM1_BASE(addr_len) << 20;
+			if (addr >= r->start &&
+			    addr + len <= r->start + r->size)
+				return (0);
+			r++;
+			n++;
+		}
+	}
+	MPASS(n <= nitems(mem_ranges));
+
+	if (n > 1) {
+		/* Sort and merge the ranges. */
+		qsort(mem_ranges, n, sizeof(struct t4_range), t4_range_cmp);
+
+		/* Start from index 0 and examine the next n - 1 entries. */
+		r = &mem_ranges[0];
+		for (remaining = n - 1; remaining > 0; remaining--, r++) {
+
+			MPASS(r->size > 0);	/* r is a valid entry. */
+			next = r + 1;
+			MPASS(next->size > 0);	/* and so is the next one. */
+
+			while (r->start + r->size >= next->start) {
+				/* Merge the next one into the current entry. */
+				r->size = max(r->start + r->size,
+				    next->start + next->size) - r->start;
+				n--;	/* One fewer entry in total. */
+				if (--remaining == 0)
+					goto done;	/* short circuit */
+				next++;
+			}
+			if (next != r + 1) {
+				/*
+				 * Some entries were merged into r and next
+				 * points to the first valid entry that couldn't
+				 * be merged.
+				 */
+				MPASS(next->size > 0);	/* must be valid */
+				memcpy(r + 1, next, remaining * sizeof(*r));
+#ifdef INVARIANTS
+				/*
+				 * This so that the foo->size assertion in the
+				 * next iteration of the loop do the right
+				 * thing for entries that were pulled up and are
+				 * no longer valid.
+				 */
+				MPASS(n < nitems(mem_ranges));
+				bzero(&mem_ranges[n], (nitems(mem_ranges) - n) *
+				    sizeof(struct t4_range));
+#endif
+			}
+		}
+done:
+		/* Done merging the ranges. */
+		MPASS(n > 0);
+		r = &mem_ranges[0];
+		for (i = 0; i < n; i++, r++) {
+			if (addr >= r->start &&
+			    addr + len <= r->start + r->size)
+				return (0);
+		}
+	}
+
+	return (EFAULT);
+}
+
+static int
+fwmtype_to_hwmtype(int mtype)
+{
+
+	switch (mtype) {
+	case FW_MEMTYPE_EDC0:
+		return (MEM_EDC0);
+	case FW_MEMTYPE_EDC1:
+		return (MEM_EDC1);
+	case FW_MEMTYPE_EXTMEM:
+		return (MEM_MC0);
+	case FW_MEMTYPE_EXTMEM1:
+		return (MEM_MC1);
+	default:
+		panic("%s: cannot translate fw mtype %d.", __func__, mtype);
+	}
+}
+
+/*
+ * Verify that the memory range specified by the memtype/offset/len pair is
+ * valid and lies entirely within the memtype specified.  The global address of
+ * the start of the range is returned in addr.
+ */
+static int
+validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
+    uint32_t *addr)
+{
+	uint32_t em, addr_len, maddr;
+
+	/* Memory can only be accessed in naturally aligned 4 byte units */
+	if (off & 3 || len & 3 || len == 0)
+		return (EINVAL);
+
+	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
+	switch (fwmtype_to_hwmtype(mtype)) {
+	case MEM_EDC0:
+		if (!(em & F_EDRAM0_ENABLE))
+			return (EINVAL);
+		addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
+		maddr = G_EDRAM0_BASE(addr_len) << 20;
+		break;
+	case MEM_EDC1:
+		if (!(em & F_EDRAM1_ENABLE))
+			return (EINVAL);
+		addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
+		maddr = G_EDRAM1_BASE(addr_len) << 20;
+		break;
+	case MEM_MC:
+		if (!(em & F_EXT_MEM_ENABLE))
+			return (EINVAL);
+		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+		maddr = G_EXT_MEM_BASE(addr_len) << 20;
+		break;
+	case MEM_MC1:
+		if (!is_t5(sc) || !(em & F_EXT_MEM1_ENABLE))
+			return (EINVAL);
+		addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+		maddr = G_EXT_MEM1_BASE(addr_len) << 20;
+		break;
+	default:
+		return (EINVAL);
+	}
+
+	*addr = maddr + off;	/* global address */
+	return (validate_mem_range(sc, *addr, len));
+}
+
+static int
+fixup_devlog_params(struct adapter *sc)
+{
+	struct devlog_params *dparams = &sc->params.devlog;
+	int rc;
+
+	rc = validate_mt_off_len(sc, dparams->memtype, dparams->start,
+	    dparams->size, &dparams->addr);
+
+	return (rc);
+}
+
+static int
+cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, int num_vis,
     struct intrs_and_queues *iaq)
 {
 	int rc, itype, navail, nrxq10g, nrxq1g, n;
@@ -1362,16 +2423,25 @@
 
 	iaq->ntxq10g = t4_ntxq10g;
 	iaq->ntxq1g = t4_ntxq1g;
+	iaq->ntxq_vi = t4_ntxq_vi;
 	iaq->nrxq10g = nrxq10g = t4_nrxq10g;
 	iaq->nrxq1g = nrxq1g = t4_nrxq1g;
+	iaq->nrxq_vi = t4_nrxq_vi;
+	iaq->rsrv_noflowq = t4_rsrv_noflowq;
 #ifdef TCP_OFFLOAD
 	if (is_offload(sc)) {
 		iaq->nofldtxq10g = t4_nofldtxq10g;
 		iaq->nofldtxq1g = t4_nofldtxq1g;
+		iaq->nofldtxq_vi = t4_nofldtxq_vi;
 		iaq->nofldrxq10g = nofldrxq10g = t4_nofldrxq10g;
 		iaq->nofldrxq1g = nofldrxq1g = t4_nofldrxq1g;
+		iaq->nofldrxq_vi = t4_nofldrxq_vi;
 	}
 #endif
+#ifdef DEV_NETMAP
+	iaq->nnmtxq_vi = t4_nnmtxq_vi;
+	iaq->nnmrxq_vi = t4_nnmrxq_vi;
+#endif
 
 	for (itype = INTR_MSIX; itype; itype >>= 1) {
 
@@ -1389,30 +2459,68 @@
 			continue;
 
 		iaq->intr_type = itype;
-		iaq->intr_flags = 0;
+		iaq->intr_flags_10g = 0;
+		iaq->intr_flags_1g = 0;
 
 		/*
 		 * Best option: an interrupt vector for errors, one for the
-		 * firmware event queue, and one each for each rxq (NIC as well
-		 * as offload).
+		 * firmware event queue, and one for every rxq (NIC and TOE) of
+		 * every VI.  The VIs that support netmap use the same
+		 * interrupts for the NIC rx queues and the netmap rx queues
+		 * because only one set of queues is active at a time.
 		 */
 		iaq->nirq = T4_EXTRA_INTR;
 		iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
 		iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
+		iaq->nirq += (n10g + n1g) * (num_vis - 1) *
+		    max(iaq->nrxq_vi, iaq->nnmrxq_vi);	/* See comment above. */
+		iaq->nirq += (n10g + n1g) * (num_vis - 1) * iaq->nofldrxq_vi;
 		if (iaq->nirq <= navail &&
 		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
-			iaq->intr_flags |= INTR_DIRECT;
+			iaq->intr_flags_10g = INTR_ALL;
+			iaq->intr_flags_1g = INTR_ALL;
 			goto allocate;
 		}
 
+		/* Disable the VIs (and netmap) if there aren't enough intrs */
+		if (num_vis > 1) {
+			device_printf(sc->dev, "virtual interfaces disabled "
+			    "because num_vis=%u with current settings "
+			    "(nrxq10g=%u, nrxq1g=%u, nofldrxq10g=%u, "
+			    "nofldrxq1g=%u, nrxq_vi=%u nofldrxq_vi=%u, "
+			    "nnmrxq_vi=%u) would need %u interrupts but "
+			    "only %u are available.\n", num_vis, nrxq10g,
+			    nrxq1g, nofldrxq10g, nofldrxq1g, iaq->nrxq_vi,
+			    iaq->nofldrxq_vi, iaq->nnmrxq_vi, iaq->nirq,
+			    navail);
+			num_vis = 1;
+			iaq->ntxq_vi = iaq->nrxq_vi = 0;
+			iaq->nofldtxq_vi = iaq->nofldrxq_vi = 0;
+			iaq->nnmtxq_vi = iaq->nnmrxq_vi = 0;
+			goto restart;
+		}
+
 		/*
-		 * Second best option: an interrupt vector for errors, one for
-		 * the firmware event queue, and one each for either NIC or
-		 * offload rxq's.
+		 * Second best option: a vector for errors, one for the firmware
+		 * event queue, and vectors for either all the NIC rx queues or
+		 * all the TOE rx queues.  The queues that don't get vectors
+		 * will forward their interrupts to those that do.
 		 */
 		iaq->nirq = T4_EXTRA_INTR;
-		iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
-		iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
+		if (nrxq10g >= nofldrxq10g) {
+			iaq->intr_flags_10g = INTR_RXQ;
+			iaq->nirq += n10g * nrxq10g;
+		} else {
+			iaq->intr_flags_10g = INTR_OFLD_RXQ;
+			iaq->nirq += n10g * nofldrxq10g;
+		}
+		if (nrxq1g >= nofldrxq1g) {
+			iaq->intr_flags_1g = INTR_RXQ;
+			iaq->nirq += n1g * nrxq1g;
+		} else {
+			iaq->intr_flags_1g = INTR_OFLD_RXQ;
+			iaq->nirq += n1g * nofldrxq1g;
+		}
 		if (iaq->nirq <= navail &&
 		    (itype != INTR_MSI || powerof2(iaq->nirq)))
 			goto allocate;
@@ -1419,9 +2527,9 @@
 
 		/*
 		 * Next best option: an interrupt vector for errors, one for the
-		 * firmware event queue, and at least one per port.  At this
-		 * point we know we'll have to downsize nrxq or nofldrxq to fit
-		 * what's available to us.
+		 * firmware event queue, and at least one per main-VI.  At this
+		 * point we know we'll have to downsize nrxq and/or nofldrxq to
+		 * fit what's available to us.
 		 */
 		iaq->nirq = T4_EXTRA_INTR;
 		iaq->nirq += n10g + n1g;
@@ -1431,6 +2539,9 @@
 			if (n10g > 0) {
 				int target = max(nrxq10g, nofldrxq10g);
 
+				iaq->intr_flags_10g = nrxq10g >= nofldrxq10g ?
+				    INTR_RXQ : INTR_OFLD_RXQ;
+
 				n = 1;
 				while (n < target && leftover >= n10g) {
 					leftover -= n10g;
@@ -1439,8 +2550,7 @@
 				}
 				iaq->nrxq10g = min(n, nrxq10g);
 #ifdef TCP_OFFLOAD
-				if (is_offload(sc))
-					iaq->nofldrxq10g = min(n, nofldrxq10g);
+				iaq->nofldrxq10g = min(n, nofldrxq10g);
 #endif
 			}
 
@@ -1447,6 +2557,9 @@
 			if (n1g > 0) {
 				int target = max(nrxq1g, nofldrxq1g);
 
+				iaq->intr_flags_1g = nrxq1g >= nofldrxq1g ?
+				    INTR_RXQ : INTR_OFLD_RXQ;
+
 				n = 1;
 				while (n < target && leftover >= n1g) {
 					leftover -= n1g;
@@ -1455,8 +2568,7 @@
 				}
 				iaq->nrxq1g = min(n, nrxq1g);
 #ifdef TCP_OFFLOAD
-				if (is_offload(sc))
-					iaq->nofldrxq1g = min(n, nofldrxq1g);
+				iaq->nofldrxq1g = min(n, nofldrxq1g);
 #endif
 			}
 
@@ -1468,11 +2580,11 @@
 		 * Least desirable option: one interrupt vector for everything.
 		 */
 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
+		iaq->intr_flags_10g = iaq->intr_flags_1g = 0;
 #ifdef TCP_OFFLOAD
 		if (is_offload(sc))
 			iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
 #endif
-
 allocate:
 		navail = iaq->nirq;
 		rc = 0;
@@ -1509,14 +2621,92 @@
 	return (ENXIO);
 }
 
+#define FW_VERSION(chip) ( \
+    V_FW_HDR_FW_VER_MAJOR(chip##FW_VERSION_MAJOR) | \
+    V_FW_HDR_FW_VER_MINOR(chip##FW_VERSION_MINOR) | \
+    V_FW_HDR_FW_VER_MICRO(chip##FW_VERSION_MICRO) | \
+    V_FW_HDR_FW_VER_BUILD(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (chip##FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+	uint8_t chip;
+	char *kld_name;
+	char *fw_mod_name;
+	struct fw_hdr fw_hdr;	/* XXX: waste of space, need a sparse struct */
+} fw_info[] = {
+	{
+		.chip = CHELSIO_T4,
+		.kld_name = "t4fw_cfg",
+		.fw_mod_name = "t4fw",
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T4,
+			.fw_ver = htobe32_const(FW_VERSION(T4)),
+			.intfver_nic = FW_INTFVER(T4, NIC),
+			.intfver_vnic = FW_INTFVER(T4, VNIC),
+			.intfver_ofld = FW_INTFVER(T4, OFLD),
+			.intfver_ri = FW_INTFVER(T4, RI),
+			.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU),
+			.intfver_iscsi = FW_INTFVER(T4, ISCSI),
+			.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU),
+			.intfver_fcoe = FW_INTFVER(T4, FCOE),
+		},
+	}, {
+		.chip = CHELSIO_T5,
+		.kld_name = "t5fw_cfg",
+		.fw_mod_name = "t5fw",
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T5,
+			.fw_ver = htobe32_const(FW_VERSION(T5)),
+			.intfver_nic = FW_INTFVER(T5, NIC),
+			.intfver_vnic = FW_INTFVER(T5, VNIC),
+			.intfver_ofld = FW_INTFVER(T5, OFLD),
+			.intfver_ri = FW_INTFVER(T5, RI),
+			.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU),
+			.intfver_iscsi = FW_INTFVER(T5, ISCSI),
+			.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU),
+			.intfver_fcoe = FW_INTFVER(T5, FCOE),
+		},
+	}, {
+		.chip = CHELSIO_T6,
+		.kld_name = "t6fw_cfg",
+		.fw_mod_name = "t6fw",
+		.fw_hdr = {
+			.chip = FW_HDR_CHIP_T6,
+			.fw_ver = htobe32_const(FW_VERSION(T6)),
+			.intfver_nic = FW_INTFVER(T6, NIC),
+			.intfver_vnic = FW_INTFVER(T6, VNIC),
+			.intfver_ofld = FW_INTFVER(T6, OFLD),
+			.intfver_ri = FW_INTFVER(T6, RI),
+			.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+			.intfver_iscsi = FW_INTFVER(T6, ISCSI),
+			.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+			.intfver_fcoe = FW_INTFVER(T6, FCOE),
+		},
+	}
+};
+
+static struct fw_info *
+find_fw_info(int chip)
+{
+	int i;
+
+	for (i = 0; i < nitems(fw_info); i++) {
+		if (fw_info[i].chip == chip)
+			return (&fw_info[i]);
+	}
+	return (NULL);
+}
+
 /*
- * Is the given firmware compatible with the one the driver was compiled with?
+ * Is the given firmware API compatible with the one the driver was compiled
+ * with?
  */
 static int
-fw_compatible(const struct fw_hdr *hdr)
+fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
 {
 
-	if (hdr->fw_ver == htonl(FW_VERSION))
+	/* short circuit if it's the exact same firmware version */
+	if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
 		return (1);
 
 	/*
@@ -1523,42 +2713,123 @@
 	 * XXX: Is this too conservative?  Perhaps I should limit this to the
 	 * features that are supported in the driver.
 	 */
-	if (hdr->intfver_nic == FW_HDR_INTFVER_NIC &&
-	    hdr->intfver_vnic == FW_HDR_INTFVER_VNIC &&
-	    hdr->intfver_ofld == FW_HDR_INTFVER_OFLD &&
-	    hdr->intfver_ri == FW_HDR_INTFVER_RI &&
-	    hdr->intfver_iscsipdu == FW_HDR_INTFVER_ISCSIPDU &&
-	    hdr->intfver_iscsi == FW_HDR_INTFVER_ISCSI &&
-	    hdr->intfver_fcoepdu == FW_HDR_INTFVER_FCOEPDU &&
-	    hdr->intfver_fcoe == FW_HDR_INTFVER_FCOEPDU)
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+	if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+	    SAME_INTF(ofld) && SAME_INTF(ri) && SAME_INTF(iscsipdu) &&
+	    SAME_INTF(iscsi) && SAME_INTF(fcoepdu) && SAME_INTF(fcoe))
 		return (1);
+#undef SAME_INTF
 
 	return (0);
 }
 
 /*
- * Install a compatible firmware (if required), establish contact with it (by
- * saying hello), and reset the device.  If we end up as the master driver,
- * partition adapter resources by providing a configuration file to the
- * firmware.
+ * The firmware in the KLD is usable, but should it be installed?  This routine
+ * explains itself in detail if it indicates the KLD firmware should be
+ * installed.
  */
 static int
+should_install_kld_fw(struct adapter *sc, int card_fw_usable, int k, int c)
+{
+	const char *reason;
+
+	if (!card_fw_usable) {
+		reason = "incompatible or unusable";
+		goto install;
+	}
+
+	if (k > c) {
+		reason = "older than the version bundled with this driver";
+		goto install;
+	}
+
+	if (t4_fw_install == 2 && k != c) {
+		reason = "different than the version bundled with this driver";
+		goto install;
+	}
+
+	return (0);
+
+install:
+	if (t4_fw_install == 0) {
+		device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
+		    "but the driver is prohibited from installing a different "
+		    "firmware on the card.\n",
+		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
+		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason);
+
+		return (0);
+	}
+
+	device_printf(sc->dev, "firmware on card (%u.%u.%u.%u) is %s, "
+	    "installing firmware %u.%u.%u.%u on card.\n",
+	    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
+	    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c), reason,
+	    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
+	    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
+
+	return (1);
+}
+/*
+ * Establish contact with the firmware and determine if we are the master driver
+ * or not, and whether we are responsible for chip initialization.
+ */
+static int
 prep_firmware(struct adapter *sc)
 {
-	const struct firmware *fw = NULL, *cfg = NULL, *default_cfg;
-	int rc, card_fw_usable, kld_fw_usable;
+	const struct firmware *fw = NULL, *default_cfg;
+	int rc, pf, card_fw_usable, kld_fw_usable, need_fw_reset = 1;
 	enum dev_state state;
-	struct fw_hdr *card_fw;
-	const struct fw_hdr *kld_fw;
+	struct fw_info *fw_info;
+	struct fw_hdr *card_fw;		/* fw on the card */
+	const struct fw_hdr *kld_fw;	/* fw in the KLD */
+	const struct fw_hdr *drv_fw;	/* fw header the driver was compiled
+					   against */
 
-	default_cfg = firmware_get(T4_CFGNAME);
+	/* Contact firmware. */
+	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
+	if (rc < 0 || state == DEV_STATE_ERR) {
+		rc = -rc;
+		device_printf(sc->dev,
+		    "failed to connect to the firmware: %d, %d.\n", rc, state);
+		return (rc);
+	}
+	pf = rc;
+	if (pf == sc->mbox)
+		sc->flags |= MASTER_PF;
+	else if (state == DEV_STATE_UNINIT) {
+		/*
+		 * We didn't get to be the master so we definitely won't be
+		 * configuring the chip.  It's a bug if someone else hasn't
+		 * configured it already.
+		 */
+		device_printf(sc->dev, "couldn't be master(%d), "
+		    "device not already initialized either(%d).\n", rc, state);
+		return (EDOOFUS);
+	}
 
+	/* This is the firmware whose headers the driver was compiled against */
+	fw_info = find_fw_info(chip_id(sc));
+	if (fw_info == NULL) {
+		device_printf(sc->dev,
+		    "unable to look up firmware information for chip %d.\n",
+		    chip_id(sc));
+		return (EINVAL);
+	}
+	drv_fw = &fw_info->fw_hdr;
+
+	/*
+	 * The firmware KLD contains many modules.  The KLD name is also the
+	 * name of the module that contains the default config file.
+	 */
+	default_cfg = firmware_get(fw_info->kld_name);
+
 	/* Read the header of the firmware on the card */
 	card_fw = malloc(sizeof(*card_fw), M_CXGBE, M_ZERO | M_WAITOK);
 	rc = -t4_read_flash(sc, FLASH_FW_START,
 	    sizeof (*card_fw) / sizeof (uint32_t), (uint32_t *)card_fw, 1);
 	if (rc == 0)
-		card_fw_usable = fw_compatible((const void*)card_fw);
+		card_fw_usable = fw_compatible(drv_fw, (const void*)card_fw);
 	else {
 		device_printf(sc->dev,
 		    "Unable to read card's firmware header: %d\n", rc);
@@ -1566,38 +2837,29 @@
 	}
 
 	/* This is the firmware in the KLD */
-	fw = firmware_get(T4_FWNAME);
+	fw = firmware_get(fw_info->fw_mod_name);
 	if (fw != NULL) {
 		kld_fw = (const void *)fw->data;
-		kld_fw_usable = fw_compatible(kld_fw);
+		kld_fw_usable = fw_compatible(drv_fw, kld_fw);
 	} else {
 		kld_fw = NULL;
 		kld_fw_usable = 0;
 	}
 
-	/*
-	 * Short circuit for the common case: the firmware on the card is an
-	 * exact match and the KLD is an exact match too, or it's
-	 * absent/incompatible, or we're prohibited from using it.  Note that
-	 * t4_fw_install = 2 is ignored here -- use cxgbetool loadfw if you want
-	 * to reinstall the same firmware as the one on the card.
-	 */
-	if (card_fw_usable && card_fw->fw_ver == htonl(FW_VERSION) &&
-	    (!kld_fw_usable || kld_fw->fw_ver == htonl(FW_VERSION) ||
-	    t4_fw_install == 0))
-		goto hello;
+	if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+	    (!kld_fw_usable || kld_fw->fw_ver == drv_fw->fw_ver)) {
+		/*
+		 * Common case: the firmware on the card is an exact match and
+		 * the KLD is an exact match too, or the KLD is
+		 * absent/incompatible.  Note that t4_fw_install = 2 is ignored
+		 * here -- use cxgbetool loadfw if you want to reinstall the
+		 * same firmware as the one on the card.
+		 */
+	} else if (kld_fw_usable && state == DEV_STATE_UNINIT &&
+	    should_install_kld_fw(sc, card_fw_usable, be32toh(kld_fw->fw_ver),
+	    be32toh(card_fw->fw_ver))) {
 
-	if (kld_fw_usable && (!card_fw_usable ||
-	    ntohl(kld_fw->fw_ver) > ntohl(card_fw->fw_ver) ||
-	    (t4_fw_install == 2 && kld_fw->fw_ver != card_fw->fw_ver))) {
-		uint32_t v = ntohl(kld_fw->fw_ver);
-
-		device_printf(sc->dev,
-		    "installing firmware %d.%d.%d.%d on card.\n",
-		    G_FW_HDR_FW_VER_MAJOR(v), G_FW_HDR_FW_VER_MINOR(v),
-		    G_FW_HDR_FW_VER_MICRO(v), G_FW_HDR_FW_VER_BUILD(v));
-
-		rc = -t4_load_fw(sc, fw->data, fw->datasize);
+		rc = -t4_fw_upgrade(sc, sc->mbox, fw->data, fw->datasize, 0);
 		if (rc != 0) {
 			device_printf(sc->dev,
 			    "failed to install firmware: %d\n", rc);
@@ -1607,92 +2869,73 @@
 		/* Installed successfully, update the cached header too. */
 		memcpy(card_fw, kld_fw, sizeof(*card_fw));
 		card_fw_usable = 1;
+		need_fw_reset = 0;	/* already reset as part of load_fw */
 	}
 
 	if (!card_fw_usable) {
-		uint32_t c, k;
+		uint32_t d, c, k;
 
+		d = ntohl(drv_fw->fw_ver);
 		c = ntohl(card_fw->fw_ver);
 		k = kld_fw ? ntohl(kld_fw->fw_ver) : 0;
 
 		device_printf(sc->dev, "Cannot find a usable firmware: "
-		    "fw_install %d, driver compiled with %d.%d.%d.%d, "
+		    "fw_install %d, chip state %d, "
+		    "driver compiled with %d.%d.%d.%d, "
 		    "card has %d.%d.%d.%d, KLD has %d.%d.%d.%d\n",
-		    t4_fw_install,
-		    G_FW_HDR_FW_VER_MAJOR(FW_VERSION),
-		    G_FW_HDR_FW_VER_MINOR(FW_VERSION),
-		    G_FW_HDR_FW_VER_MICRO(FW_VERSION),
-		    G_FW_HDR_FW_VER_BUILD(FW_VERSION),
+		    t4_fw_install, state,
+		    G_FW_HDR_FW_VER_MAJOR(d), G_FW_HDR_FW_VER_MINOR(d),
+		    G_FW_HDR_FW_VER_MICRO(d), G_FW_HDR_FW_VER_BUILD(d),
 		    G_FW_HDR_FW_VER_MAJOR(c), G_FW_HDR_FW_VER_MINOR(c),
 		    G_FW_HDR_FW_VER_MICRO(c), G_FW_HDR_FW_VER_BUILD(c),
 		    G_FW_HDR_FW_VER_MAJOR(k), G_FW_HDR_FW_VER_MINOR(k),
 		    G_FW_HDR_FW_VER_MICRO(k), G_FW_HDR_FW_VER_BUILD(k));
+		rc = EINVAL;
 		goto done;
 	}
 
-hello:
-	/* We're using whatever's on the card and it's known to be good. */
-	sc->params.fw_vers = ntohl(card_fw->fw_ver);
-	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
-	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
-	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
-	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
-	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
-
-	/* Contact firmware.  */
-	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MAY, &state);
-	if (rc < 0) {
-		rc = -rc;
-		device_printf(sc->dev,
-		    "failed to connect to the firmware: %d.\n", rc);
-		goto done;
-	}
-	if (rc == sc->mbox)
-		sc->flags |= MASTER_PF;
-
 	/* Reset device */
-	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
-	if (rc != 0) {
+	if (need_fw_reset &&
+	    (rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST)) != 0) {
 		device_printf(sc->dev, "firmware reset failed: %d.\n", rc);
 		if (rc != ETIMEDOUT && rc != EIO)
 			t4_fw_bye(sc, sc->mbox);
 		goto done;
 	}
+	sc->flags |= FW_OK;
 
+	rc = get_params__pre_init(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
 	/* Partition adapter resources as specified in the config file. */
-	if (sc->flags & MASTER_PF) {
-		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s",
-		    pci_get_device(sc->dev) == 0x440a ? "uwire" : t4_cfg_file);
-		if (strncmp(sc->cfg_file, "default", sizeof(sc->cfg_file))) {
-			char s[32];
+	if (state == DEV_STATE_UNINIT) {
 
-			snprintf(s, sizeof(s), "t4fw_cfg_%s", sc->cfg_file);
-			cfg = firmware_get(s);
-			if (cfg == NULL) {
-				device_printf(sc->dev,
-				    "unable to locate %s module, "
-				    "will use default config file.\n", s);
-				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
-				    "%s", "default");
-			}
-		}
+		KASSERT(sc->flags & MASTER_PF,
+		    ("%s: trying to change chip settings when not master.",
+		    __func__));
 
-		rc = partition_resources(sc, cfg ? cfg : default_cfg);
+		rc = partition_resources(sc, default_cfg, fw_info->kld_name);
 		if (rc != 0)
 			goto done;	/* error message displayed already */
+
+		t4_tweak_chip_settings(sc);
+
+		/* get basic stuff going */
+		rc = -t4_fw_initialize(sc, sc->mbox);
+		if (rc != 0) {
+			device_printf(sc->dev, "fw init failed: %d.\n", rc);
+			goto done;
+		}
 	} else {
-		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", "notme");
-		sc->cfcsum = (u_int)-1;
+		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "pf%d", pf);
+		sc->cfcsum = 0;
 	}
 
-	sc->flags |= FW_OK;
-
 done:
 	free(card_fw, M_CXGBE);
 	if (fw != NULL)
 		firmware_put(fw, FIRMWARE_UNLOAD);
-	if (cfg != NULL)
-		firmware_put(cfg, FIRMWARE_UNLOAD);
 	if (default_cfg != NULL)
 		firmware_put(default_cfg, FIRMWARE_UNLOAD);
 
@@ -1707,115 +2950,127 @@
 	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
 
 /*
- * Upload configuration file to card's memory.
+ * Partition chip resources for use between various PFs, VFs, etc.
  */
 static int
-upload_config_file(struct adapter *sc, const struct firmware *fw, uint32_t *mt,
-    uint32_t *ma)
+partition_resources(struct adapter *sc, const struct firmware *default_cfg,
+    const char *name_prefix)
 {
-	int rc, i;
-	uint32_t param, val, mtype, maddr, bar, off, win, remaining;
-	const uint32_t *b;
+	const struct firmware *cfg = NULL;
+	int rc = 0;
+	struct fw_caps_config_cmd caps;
+	uint32_t mtype, moff, finicsum, cfcsum;
 
-	/* Figure out where the firmware wants us to upload it. */
-	param = FW_PARAM_DEV(CF);
-	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
-	if (rc != 0) {
-		/* Firmwares without config file support will fail this way */
-		device_printf(sc->dev,
-		    "failed to query config file location: %d.\n", rc);
-		return (rc);
+	/*
+	 * Figure out what configuration file to use.  Pick the default config
+	 * file for the card if the user hasn't specified one explicitly.
+	 */
+	snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", t4_cfg_file);
+	if (strncmp(t4_cfg_file, DEFAULT_CF, sizeof(t4_cfg_file)) == 0) {
+		/* Card specific overrides go here. */
+		if (pci_get_device(sc->dev) == 0x440a)
+			snprintf(sc->cfg_file, sizeof(sc->cfg_file), UWIRE_CF);
+		if (is_fpga(sc))
+			snprintf(sc->cfg_file, sizeof(sc->cfg_file), FPGA_CF);
 	}
-	*mt = mtype = G_FW_PARAMS_PARAM_Y(val);
-	*ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
 
-	if (maddr & 3) {
-		device_printf(sc->dev,
-		    "cannot upload config file (type %u, addr %x).\n",
-		    mtype, maddr);
-		return (EFAULT);
+	/*
+	 * We need to load another module if the profile is anything except
+	 * "default" or "flash".
+	 */
+	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) != 0 &&
+	    strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
+		char s[32];
+
+		snprintf(s, sizeof(s), "%s_%s", name_prefix, sc->cfg_file);
+		cfg = firmware_get(s);
+		if (cfg == NULL) {
+			if (default_cfg != NULL) {
+				device_printf(sc->dev,
+				    "unable to load module \"%s\" for "
+				    "configuration profile \"%s\", will use "
+				    "the default config file instead.\n",
+				    s, sc->cfg_file);
+				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
+				    "%s", DEFAULT_CF);
+			} else {
+				device_printf(sc->dev,
+				    "unable to load module \"%s\" for "
+				    "configuration profile \"%s\", will use "
+				    "the config file on the card's flash "
+				    "instead.\n", s, sc->cfg_file);
+				snprintf(sc->cfg_file, sizeof(sc->cfg_file),
+				    "%s", FLASH_CF);
+			}
+		}
 	}
 
-	/* Translate mtype/maddr to an address suitable for the PCIe window */
-	val = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
-	val &= F_EDRAM0_ENABLE | F_EDRAM1_ENABLE | F_EXT_MEM_ENABLE;
-	switch (mtype) {
-	case FW_MEMTYPE_CF_EDC0:
-		if (!(val & F_EDRAM0_ENABLE))
-			goto err;
-		bar = t4_read_reg(sc, A_MA_EDRAM0_BAR);
-		maddr += G_EDRAM0_BASE(bar) << 20;
-		break;
-
-	case FW_MEMTYPE_CF_EDC1:
-		if (!(val & F_EDRAM1_ENABLE))
-			goto err;
-		bar = t4_read_reg(sc, A_MA_EDRAM1_BAR);
-		maddr += G_EDRAM1_BASE(bar) << 20;
-		break;
-
-	case FW_MEMTYPE_CF_EXTMEM:
-		if (!(val & F_EXT_MEM_ENABLE))
-			goto err;
-		bar = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
-		maddr += G_EXT_MEM_BASE(bar) << 20;
-		break;
-
-	default:
-err:
+	if (strncmp(sc->cfg_file, DEFAULT_CF, sizeof(sc->cfg_file)) == 0 &&
+	    default_cfg == NULL) {
 		device_printf(sc->dev,
-		    "cannot upload config file (type %u, enabled %u).\n",
-		    mtype, val);
-		return (EFAULT);
+		    "default config file not available, will use the config "
+		    "file on the card's flash instead.\n");
+		snprintf(sc->cfg_file, sizeof(sc->cfg_file), "%s", FLASH_CF);
 	}
 
-	/*
-	 * Position the PCIe window (we use memwin2) to the 16B aligned area
-	 * just at/before the upload location.
-	 */
-	win = maddr & ~0xf;
-	off = maddr - win;  /* offset from the start of the window. */
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
-	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
+	if (strncmp(sc->cfg_file, FLASH_CF, sizeof(sc->cfg_file)) != 0) {
+		u_int cflen;
+		const uint32_t *cfdata;
+		uint32_t param, val, addr;
 
-	remaining = fw->datasize;
-	if (remaining > FLASH_CFG_MAX_SIZE ||
-	    remaining > MEMWIN2_APERTURE - off) {
-		device_printf(sc->dev, "cannot upload config file all at once "
-		    "(size %u, max %u, room %u).\n",
-		    remaining, FLASH_CFG_MAX_SIZE, MEMWIN2_APERTURE - off);
-		return (EFBIG);
-	}
+		KASSERT(cfg != NULL || default_cfg != NULL,
+		    ("%s: no config to upload", __func__));
 
-	/*
-	 * XXX: sheer laziness.  We deliberately added 4 bytes of useless
-	 * stuffing/comments at the end of the config file so it's ok to simply
-	 * throw away the last remaining bytes when the config file is not an
-	 * exact multiple of 4.
-	 */
-	b = fw->data;
-	for (i = 0; remaining >= 4; i += 4, remaining -= 4)
-		t4_write_reg(sc, MEMWIN2_BASE + off + i, *b++);
+		/*
+		 * Ask the firmware where it wants us to upload the config file.
+		 */
+		param = FW_PARAM_DEV(CF);
+		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+		if (rc != 0) {
+			/* No support for config file?  Shouldn't happen. */
+			device_printf(sc->dev,
+			    "failed to query config file location: %d.\n", rc);
+			goto done;
+		}
+		mtype = G_FW_PARAMS_PARAM_Y(val);
+		moff = G_FW_PARAMS_PARAM_Z(val) << 16;
 
-	return (rc);
-}
+		/*
+		 * XXX: sheer laziness.  We deliberately added 4 bytes of
+		 * useless stuffing/comments at the end of the config file so
+		 * it's ok to simply throw away the last remaining bytes when
+		 * the config file is not an exact multiple of 4.  This also
+		 * helps with the validate_mt_off_len check.
+		 */
+		if (cfg != NULL) {
+			cflen = cfg->datasize & ~3;
+			cfdata = cfg->data;
+		} else {
+			cflen = default_cfg->datasize & ~3;
+			cfdata = default_cfg->data;
+		}
 
-/*
- * Partition chip resources for use between various PFs, VFs, etc.  This is done
- * by uploading the firmware configuration file to the adapter and instructing
- * the firmware to process it.
- */
-static int
-partition_resources(struct adapter *sc, const struct firmware *cfg)
-{
-	int rc;
-	struct fw_caps_config_cmd caps;
-	uint32_t mtype, maddr, finicsum, cfcsum;
+		if (cflen > FLASH_CFG_MAX_SIZE) {
+			device_printf(sc->dev,
+			    "config file too long (%d, max allowed is %d).  "
+			    "Will try to use the config on the card, if any.\n",
+			    cflen, FLASH_CFG_MAX_SIZE);
+			goto use_config_on_flash;
+		}
 
-	rc = cfg ? upload_config_file(sc, cfg, &mtype, &maddr) : ENOENT;
-	if (rc != 0) {
-		mtype = FW_MEMTYPE_CF_FLASH;
-		maddr = t4_flash_cfg_addr(sc);
+		rc = validate_mt_off_len(sc, mtype, moff, cflen, &addr);
+		if (rc != 0) {
+			device_printf(sc->dev,
+			    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
+			    "Will try to use the config on the card, if any.\n",
+			    __func__, mtype, moff, cflen, rc);
+			goto use_config_on_flash;
+		}
+		write_via_memwin(sc, 2, addr, cfdata, cflen);
+	} else {
+use_config_on_flash:
+		mtype = FW_MEMTYPE_FLASH;
+		moff = t4_flash_cfg_addr(sc);
 	}
 
 	bzero(&caps, sizeof(caps));
@@ -1823,12 +3078,13 @@
 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
 	caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
-	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
+	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) | FW_LEN16(caps));
 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof(caps), &caps);
 	if (rc != 0) {
 		device_printf(sc->dev,
-		    "failed to pre-process config file: %d.\n", rc);
-		return (rc);
+		    "failed to pre-process config file: %d "
+		    "(mtype %d, moff 0x%x).\n", rc, mtype, moff);
+		goto done;
 	}
 
 	finicsum = be32toh(caps.finicsum);
@@ -1842,7 +3098,6 @@
 
 #define LIMIT_CAPS(x) do { \
 	caps.x &= htobe16(t4_##x##_allowed); \
-	sc->x = htobe16(caps.x); \
 } while (0)
 
 	/*
@@ -1849,10 +3104,13 @@
 	 * Let the firmware know what features will (not) be used so it can tune
 	 * things accordingly.
 	 */
+	LIMIT_CAPS(nbmcaps);
 	LIMIT_CAPS(linkcaps);
+	LIMIT_CAPS(switchcaps);
 	LIMIT_CAPS(niccaps);
 	LIMIT_CAPS(toecaps);
 	LIMIT_CAPS(rdmacaps);
+	LIMIT_CAPS(cryptocaps);
 	LIMIT_CAPS(iscsicaps);
 	LIMIT_CAPS(fcoecaps);
 #undef LIMIT_CAPS
@@ -1864,15 +3122,15 @@
 	if (rc != 0) {
 		device_printf(sc->dev,
 		    "failed to process config file: %d.\n", rc);
-		return (rc);
 	}
-
-	return (0);
+done:
+	if (cfg != NULL)
+		firmware_put(cfg, FIRMWARE_UNLOAD);
+	return (rc);
 }
 
 /*
- * Retrieve parameters that are needed (or nice to have) prior to calling
- * t4_sge_init and t4_fw_initialize.
+ * Retrieve parameters that are needed (or nice to have) very early.
  */
 static int
 get_params__pre_init(struct adapter *sc)
@@ -1879,9 +3137,33 @@
 {
 	int rc;
 	uint32_t param[2], val[2];
-	struct fw_devlog_cmd cmd;
-	struct devlog_params *dlog = &sc->params.devlog;
 
+	t4_get_version_info(sc);
+
+	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
+
+	snprintf(sc->bs_version, sizeof(sc->bs_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.bs_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.bs_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.bs_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.bs_vers));
+
+	snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
+
+	snprintf(sc->er_version, sizeof(sc->er_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.er_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.er_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.er_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.er_vers));
+
 	param[0] = FW_PARAM_DEV(PORTVEC);
 	param[1] = FW_PARAM_DEV(CCLK);
 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
@@ -1896,21 +3178,13 @@
 	sc->params.vpd.cclk = val[1];
 
 	/* Read device log parameters. */
-	bzero(&cmd, sizeof(cmd));
-	cmd.op_to_write = htobe32(V_FW_CMD_OP(FW_DEVLOG_CMD) |
-	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
-	cmd.retval_len16 = htobe32(FW_LEN16(cmd));
-	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof(cmd), &cmd);
-	if (rc != 0) {
+	rc = -t4_init_devlog_params(sc, 1);
+	if (rc == 0)
+		fixup_devlog_params(sc);
+	else {
 		device_printf(sc->dev,
 		    "failed to get devlog parameters: %d.\n", rc);
-		bzero(dlog, sizeof (*dlog));
 		rc = 0;	/* devlog isn't critical for device operation */
-	} else {
-		val[0] = be32toh(cmd.memtype_devlog_memaddr16_devlog);
-		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
-		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
-		dlog->size = be32toh(cmd.memsize_devlog);
 	}
 
 	return (rc);
@@ -1944,6 +3218,8 @@
 	sc->sge.eq_start = val[1];
 	sc->tids.ftid_base = val[2];
 	sc->tids.nftids = val[3] - val[2] + 1;
+	sc->params.ftid_min = val[2];
+	sc->params.ftid_max = val[3];
 	sc->vres.l2t.start = val[4];
 	sc->vres.l2t.size = val[5] - val[4] + 1;
 	KASSERT(sc->vres.l2t.size <= L2T_SIZE,
@@ -1962,7 +3238,38 @@
 		return (rc);
 	}
 
-	if (caps.toecaps) {
+#define READ_CAPS(x) do { \
+	sc->x = htobe16(caps.x); \
+} while (0)
+	READ_CAPS(nbmcaps);
+	READ_CAPS(linkcaps);
+	READ_CAPS(switchcaps);
+	READ_CAPS(niccaps);
+	READ_CAPS(toecaps);
+	READ_CAPS(rdmacaps);
+	READ_CAPS(cryptocaps);
+	READ_CAPS(iscsicaps);
+	READ_CAPS(fcoecaps);
+
+	if (sc->niccaps & FW_CAPS_CONFIG_NIC_ETHOFLD) {
+		param[0] = FW_PARAM_PFVF(ETHOFLD_START);
+		param[1] = FW_PARAM_PFVF(ETHOFLD_END);
+		param[2] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 3, param, val);
+		if (rc != 0) {
+			device_printf(sc->dev,
+			    "failed to query NIC parameters: %d.\n", rc);
+			return (rc);
+		}
+		sc->tids.etid_base = val[0];
+		sc->params.etid_min = val[0];
+		sc->tids.netids = val[1] - val[0] + 1;
+		sc->params.netids = sc->tids.netids;
+		sc->params.eo_wr_cred = val[2];
+		sc->params.ethoffload = 1;
+	}
+
+	if (sc->toecaps) {
 		/* query offload-related parameters */
 		param[0] = FW_PARAM_DEV(NTID);
 		param[1] = FW_PARAM_PFVF(SERVER_START);
@@ -1985,7 +3292,7 @@
 		sc->params.ofldq_wr_cred = val[5];
 		sc->params.offload = 1;
 	}
-	if (caps.rdmacaps) {
+	if (sc->rdmacaps) {
 		param[0] = FW_PARAM_PFVF(STAG_START);
 		param[1] = FW_PARAM_PFVF(STAG_END);
 		param[2] = FW_PARAM_PFVF(RQ_START);
@@ -2011,7 +3318,7 @@
 		param[3] = FW_PARAM_PFVF(CQ_END);
 		param[4] = FW_PARAM_PFVF(OCQ_START);
 		param[5] = FW_PARAM_PFVF(OCQ_END);
-		rc = -t4_query_params(sc, 0, 0, 0, 6, param, val);
+		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
 		if (rc != 0) {
 			device_printf(sc->dev,
 			    "failed to query RDMA parameters(2): %d.\n", rc);
@@ -2023,8 +3330,23 @@
 		sc->vres.cq.size = val[3] - val[2] + 1;
 		sc->vres.ocq.start = val[4];
 		sc->vres.ocq.size = val[5] - val[4] + 1;
+
+		param[0] = FW_PARAM_PFVF(SRQ_START);
+		param[1] = FW_PARAM_PFVF(SRQ_END);
+		param[2] = FW_PARAM_DEV(MAXORDIRD_QP);
+		param[3] = FW_PARAM_DEV(MAXIRD_ADAPTER);
+		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 4, param, val);
+		if (rc != 0) {
+			device_printf(sc->dev,
+			    "failed to query RDMA parameters(3): %d.\n", rc);
+			return (rc);
+		}
+		sc->vres.srq.start = val[0];
+		sc->vres.srq.size = val[1] - val[0] + 1;
+		sc->params.max_ordird_qp = val[2];
+		sc->params.max_ird_adapter = val[3];
 	}
-	if (caps.iscsicaps) {
+	if (sc->iscsicaps) {
 		param[0] = FW_PARAM_PFVF(ISCSI_START);
 		param[1] = FW_PARAM_PFVF(ISCSI_END);
 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
@@ -2037,12 +3359,14 @@
 		sc->vres.iscsi.size = val[1] - val[0] + 1;
 	}
 
-	/* These are finalized by FW initialization, load their values now */
-	val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
-	sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
-	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
-	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
+	t4_init_sge_params(sc);
 
+	/*
+	 * We've got the params we wanted to query via the firmware.  Now grab
+	 * some others directly from the chip.
+	 */
+	rc = t4_read_chip_settings(sc);
+
 	return (rc);
 }
 
@@ -2050,27 +3374,13 @@
 set_params__post_init(struct adapter *sc)
 {
 	uint32_t param, val;
-	int rc;
 
+	/* ask for encapsulated CPLs */
 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
-	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
-	if (rc == 0) {
-		/* ask for encapsulated CPLs */
-		param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
-		val = 1;
-		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
-		if (rc != 0) {
-			device_printf(sc->dev,
-			    "failed to set parameter (post_init): %d.\n", rc);
-			return (rc);
-		}
-	} else if (rc != FW_EINVAL) {
-		device_printf(sc->dev,
-		    "failed to check for encapsulated CPLs: %d.\n", rc);
-	} else
-		rc = 0;	/* the firmware doesn't support the param, no worries */
+	val = 1;
+	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
 
-	return (rc);
+	return (0);
 }
 
 #undef FW_PARAM_PFVF
@@ -2082,17 +3392,15 @@
 	char buf[128];
 	struct adapter_params *p = &sc->params;
 
-	snprintf(buf, sizeof(buf), "Chelsio %s %sNIC (rev %d), S/N:%s, E/C:%s",
-	    p->vpd.id, is_offload(sc) ? "R" : "", p->rev, p->vpd.sn, p->vpd.ec);
+	snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
 
 	device_set_desc_copy(sc->dev, buf);
 }
 
 static void
-build_medialist(struct port_info *pi)
+build_medialist(struct port_info *pi, struct ifmedia *media)
 {
-	struct ifmedia *media = &pi->media;
-	int data, m;
+	int m;
 
 	PORT_LOCK(pi);
 
@@ -2099,29 +3407,26 @@
 	ifmedia_removeall(media);
 
 	m = IFM_ETHER | IFM_FDX;
-	data = (pi->port_type << 8) | pi->mod_type;
 
 	switch(pi->port_type) {
 	case FW_PORT_TYPE_BT_XFI:
-		ifmedia_add(media, m | IFM_10G_T, data, NULL);
-		break;
-
 	case FW_PORT_TYPE_BT_XAUI:
-		ifmedia_add(media, m | IFM_10G_T, data, NULL);
+		ifmedia_add(media, m | IFM_10G_T, 0, NULL);
 		/* fall through */
 
 	case FW_PORT_TYPE_BT_SGMII:
-		ifmedia_add(media, m | IFM_1000_T, data, NULL);
-		ifmedia_add(media, m | IFM_100_TX, data, NULL);
-		ifmedia_add(media, IFM_ETHER | IFM_AUTO, data, NULL);
+		ifmedia_add(media, m | IFM_1000_T, 0, NULL);
+		ifmedia_add(media, m | IFM_100_TX, 0, NULL);
+		ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
 		ifmedia_set(media, IFM_ETHER | IFM_AUTO);
 		break;
 
 	case FW_PORT_TYPE_CX4:
-		ifmedia_add(media, m | IFM_10G_CX4, data, NULL);
+		ifmedia_add(media, m | IFM_10G_CX4, 0, NULL);
 		ifmedia_set(media, m | IFM_10G_CX4);
 		break;
 
+	case FW_PORT_TYPE_QSFP_10G:
 	case FW_PORT_TYPE_SFP:
 	case FW_PORT_TYPE_FIBER_XFI:
 	case FW_PORT_TYPE_FIBER_XAUI:
@@ -2128,29 +3433,29 @@
 		switch (pi->mod_type) {
 
 		case FW_PORT_MOD_TYPE_LR:
-			ifmedia_add(media, m | IFM_10G_LR, data, NULL);
+			ifmedia_add(media, m | IFM_10G_LR, 0, NULL);
 			ifmedia_set(media, m | IFM_10G_LR);
 			break;
 
 		case FW_PORT_MOD_TYPE_SR:
-			ifmedia_add(media, m | IFM_10G_SR, data, NULL);
+			ifmedia_add(media, m | IFM_10G_SR, 0, NULL);
 			ifmedia_set(media, m | IFM_10G_SR);
 			break;
 
 		case FW_PORT_MOD_TYPE_LRM:
-			ifmedia_add(media, m | IFM_10G_LRM, data, NULL);
+			ifmedia_add(media, m | IFM_10G_LRM, 0, NULL);
 			ifmedia_set(media, m | IFM_10G_LRM);
 			break;
 
 		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
 		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
-			ifmedia_add(media, m | IFM_10G_TWINAX, data, NULL);
+			ifmedia_add(media, m | IFM_10G_TWINAX, 0, NULL);
 			ifmedia_set(media, m | IFM_10G_TWINAX);
 			break;
 
 		case FW_PORT_MOD_TYPE_NONE:
 			m &= ~IFM_FDX;
-			ifmedia_add(media, m | IFM_NONE, data, NULL);
+			ifmedia_add(media, m | IFM_NONE, 0, NULL);
 			ifmedia_set(media, m | IFM_NONE);
 			break;
 
@@ -2157,17 +3462,123 @@
 		case FW_PORT_MOD_TYPE_NA:
 		case FW_PORT_MOD_TYPE_ER:
 		default:
-			ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
+			device_printf(pi->dev,
+			    "unknown port_type (%d), mod_type (%d)\n",
+			    pi->port_type, pi->mod_type);
+			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
 			ifmedia_set(media, m | IFM_UNKNOWN);
 			break;
 		}
 		break;
 
-	case FW_PORT_TYPE_KX4:
-	case FW_PORT_TYPE_KX:
-	case FW_PORT_TYPE_KR:
+	case FW_PORT_TYPE_CR_QSFP:
+	case FW_PORT_TYPE_SFP28:
+	case FW_PORT_TYPE_KR_SFP28:
+		switch (pi->mod_type) {
+
+		case FW_PORT_MOD_TYPE_SR:
+			ifmedia_add(media, m | IFM_25G_SR, 0, NULL);
+			ifmedia_set(media, m | IFM_25G_SR);
+			break;
+
+		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
+		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
+			ifmedia_add(media, m | IFM_25G_CR, 0, NULL);
+			ifmedia_set(media, m | IFM_25G_CR);
+			break;
+
+		case FW_PORT_MOD_TYPE_NONE:
+			m &= ~IFM_FDX;
+			ifmedia_add(media, m | IFM_NONE, 0, NULL);
+			ifmedia_set(media, m | IFM_NONE);
+			break;
+
+		default:
+			device_printf(pi->dev,
+			    "unknown port_type (%d), mod_type (%d)\n",
+			    pi->port_type, pi->mod_type);
+			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
+			ifmedia_set(media, m | IFM_UNKNOWN);
+			break;
+		}
+		break;
+
+	case FW_PORT_TYPE_QSFP:
+		switch (pi->mod_type) {
+
+		case FW_PORT_MOD_TYPE_LR:
+			ifmedia_add(media, m | IFM_40G_LR4, 0, NULL);
+			ifmedia_set(media, m | IFM_40G_LR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_SR:
+			ifmedia_add(media, m | IFM_40G_SR4, 0, NULL);
+			ifmedia_set(media, m | IFM_40G_SR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
+		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
+			ifmedia_add(media, m | IFM_40G_CR4, 0, NULL);
+			ifmedia_set(media, m | IFM_40G_CR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_NONE:
+			m &= ~IFM_FDX;
+			ifmedia_add(media, m | IFM_NONE, 0, NULL);
+			ifmedia_set(media, m | IFM_NONE);
+			break;
+
+		default:
+			device_printf(pi->dev,
+			    "unknown port_type (%d), mod_type (%d)\n",
+			    pi->port_type, pi->mod_type);
+			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
+			ifmedia_set(media, m | IFM_UNKNOWN);
+			break;
+		}
+		break;
+
+	case FW_PORT_TYPE_KR4_100G:
+	case FW_PORT_TYPE_CR4_QSFP:
+		switch (pi->mod_type) {
+
+		case FW_PORT_MOD_TYPE_LR:
+			ifmedia_add(media, m | IFM_100G_LR4, 0, NULL);
+			ifmedia_set(media, m | IFM_100G_LR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_SR:
+			ifmedia_add(media, m | IFM_100G_SR4, 0, NULL);
+			ifmedia_set(media, m | IFM_100G_SR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_TWINAX_PASSIVE:
+		case FW_PORT_MOD_TYPE_TWINAX_ACTIVE:
+			ifmedia_add(media, m | IFM_100G_CR4, 0, NULL);
+			ifmedia_set(media, m | IFM_100G_CR4);
+			break;
+
+		case FW_PORT_MOD_TYPE_NONE:
+			m &= ~IFM_FDX;
+			ifmedia_add(media, m | IFM_NONE, 0, NULL);
+			ifmedia_set(media, m | IFM_NONE);
+			break;
+
+		default:
+			device_printf(pi->dev,
+			    "unknown port_type (%d), mod_type (%d)\n",
+			    pi->port_type, pi->mod_type);
+			ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
+			ifmedia_set(media, m | IFM_UNKNOWN);
+			break;
+		}
+		break;
+
 	default:
-		ifmedia_add(media, m | IFM_UNKNOWN, data, NULL);
+		device_printf(pi->dev,
+		    "unknown port_type (%d), mod_type (%d)\n", pi->port_type,
+		    pi->mod_type);
+		ifmedia_add(media, m | IFM_UNKNOWN, 0, NULL);
 		ifmedia_set(media, m | IFM_UNKNOWN);
 		break;
 	}
@@ -2181,11 +3592,12 @@
  * Program the port's XGMAC based on parameters in ifnet.  The caller also
  * indicates which parameters should be programmed (the rest are left alone).
  */
-static int
-update_mac_settings(struct port_info *pi, int flags)
+int
+update_mac_settings(struct ifnet *ifp, int flags)
 {
-	int rc;
-	struct ifnet *ifp = pi->ifp;
+	int rc = 0;
+	struct vi_info *vi = ifp->if_softc;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
 
@@ -2204,11 +3616,14 @@
 	if (flags & XGMAC_VLANEX)
 		vlanex = ifp->if_capenable & IFCAP_VLAN_HWTAGGING ? 1 : 0;
 
-	rc = -t4_set_rxmode(sc, sc->mbox, pi->viid, mtu, promisc, allmulti, 1,
-	    vlanex, false);
-	if (rc) {
-		if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags, rc);
-		return (rc);
+	if (flags & (XGMAC_MTU|XGMAC_PROMISC|XGMAC_ALLMULTI|XGMAC_VLANEX)) {
+		rc = -t4_set_rxmode(sc, sc->mbox, vi->viid, mtu, promisc,
+		    allmulti, 1, vlanex, false);
+		if (rc) {
+			if_printf(ifp, "set_rxmode (%x) failed: %d\n", flags,
+			    rc);
+			return (rc);
+		}
 	}
 
 	if (flags & XGMAC_UCADDR) {
@@ -2215,7 +3630,7 @@
 		uint8_t ucaddr[ETHER_ADDR_LEN];
 
 		bcopy(IF_LLADDR(ifp), ucaddr, sizeof(ucaddr));
-		rc = t4_change_mac(sc, sc->mbox, pi->viid, pi->xact_addr_filt,
+		rc = t4_change_mac(sc, sc->mbox, vi->viid, vi->xact_addr_filt,
 		    ucaddr, true, true);
 		if (rc < 0) {
 			rc = -rc;
@@ -2222,7 +3637,7 @@
 			if_printf(ifp, "change_mac failed: %d\n", rc);
 			return (rc);
 		} else {
-			pi->xact_addr_filt = rc;
+			vi->xact_addr_filt = rc;
 			rc = 0;
 		}
 	}
@@ -2238,11 +3653,13 @@
 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 			if (ifma->ifma_addr->sa_family != AF_LINK)
 				continue;
-			mcaddr[i++] =
+			mcaddr[i] =
 			    LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+			MPASS(ETHER_IS_MULTICAST(mcaddr[i]));
+			i++;
 
 			if (i == FW_MAC_EXACT_CHUNK) {
-				rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
+				rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid,
 				    del, i, mcaddr, NULL, &hash, 0);
 				if (rc < 0) {
 					rc = -rc;
@@ -2263,8 +3680,8 @@
 			}
 		}
 		if (i > 0) {
-			rc = t4_alloc_mac_filt(sc, sc->mbox, pi->viid,
-			    del, i, mcaddr, NULL, &hash, 0);
+			rc = t4_alloc_mac_filt(sc, sc->mbox, vi->viid, del, i,
+			    mcaddr, NULL, &hash, 0);
 			if (rc < 0) {
 				rc = -rc;
 				for (j = 0; j < i; j++) {
@@ -2281,7 +3698,7 @@
 			}
 		}
 
-		rc = -t4_set_addr_hash(sc, sc->mbox, pi->viid, 0, hash, 0);
+		rc = -t4_set_addr_hash(sc, sc->mbox, vi->viid, 0, hash, 0);
 		if (rc != 0)
 			if_printf(ifp, "failed to set mc address hash: %d", rc);
 mcfail:
@@ -2291,8 +3708,11 @@
 	return (rc);
 }
 
+/*
+ * {begin|end}_synchronized_op must be called from the same thread.
+ */
 int
-begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
+begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
     char *wmesg)
 {
 	int rc, pri;
@@ -2300,7 +3720,8 @@
 #ifdef WITNESS
 	/* the caller thinks it's ok to sleep, but is it really? */
 	if (flags & SLEEP_OK)
-		pause("t4slptst", 1);
+		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+		    "begin_synchronized_op");
 #endif
 
 	if (INTR_OK)
@@ -2311,7 +3732,7 @@
 	ADAPTER_LOCK(sc);
 	for (;;) {
 
-		if (pi && IS_DOOMED(pi)) {
+		if (vi && IS_DOOMED(vi)) {
 			rc = ENXIO;
 			goto done;
 		}
@@ -2337,6 +3758,7 @@
 #ifdef INVARIANTS
 	sc->last_op = wmesg;
 	sc->last_op_thr = curthread;
+	sc->last_op_flags = flags;
 #endif
 
 done:
@@ -2346,7 +3768,33 @@
 	return (rc);
 }
 
+/*
+ * Tell if_ioctl and if_init that the VI is going away.  This is
+ * special variant of begin_synchronized_op and must be paired with a
+ * call to end_synchronized_op.
+ */
 void
+doom_vi(struct adapter *sc, struct vi_info *vi)
+{
+
+	ADAPTER_LOCK(sc);
+	SET_DOOMED(vi);
+	wakeup(&sc->flags);
+	while (IS_BUSY(sc))
+		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
+	SET_BUSY(sc);
+#ifdef INVARIANTS
+	sc->last_op = "t4detach";
+	sc->last_op_thr = curthread;
+	sc->last_op_flags = 0;
+#endif
+	ADAPTER_UNLOCK(sc);
+}
+
+/*
+ * {begin|end}_synchronized_op must be called from the same thread.
+ */
+void
 end_synchronized_op(struct adapter *sc, int flags)
 {
 
@@ -2362,54 +3810,72 @@
 }
 
 static int
-cxgbe_init_synchronized(struct port_info *pi)
+cxgbe_init_synchronized(struct vi_info *vi)
 {
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
-	struct ifnet *ifp = pi->ifp;
-	int rc = 0;
+	struct ifnet *ifp = vi->ifp;
+	int rc = 0, i;
+	struct sge_txq *txq;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
-	if (isset(&sc->open_device_map, pi->port_id)) {
-		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
-		    ("mismatch between open_device_map and if_drv_flags"));
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 		return (0);	/* already running */
-	}
 
 	if (!(sc->flags & FULL_INIT_DONE) &&
 	    ((rc = adapter_full_init(sc)) != 0))
 		return (rc);	/* error message displayed already */
 
-	if (!(pi->flags & PORT_INIT_DONE) &&
-	    ((rc = port_full_init(pi)) != 0))
+	if (!(vi->flags & VI_INIT_DONE) &&
+	    ((rc = vi_full_init(vi)) != 0))
 		return (rc); /* error message displayed already */
 
-	rc = update_mac_settings(pi, XGMAC_ALL);
+	rc = update_mac_settings(ifp, XGMAC_ALL);
 	if (rc)
 		goto done;	/* error message displayed already */
 
-	rc = -t4_link_start(sc, sc->mbox, pi->tx_chan, &pi->link_cfg);
+	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, true, true);
 	if (rc != 0) {
-		if_printf(ifp, "start_link failed: %d\n", rc);
+		if_printf(ifp, "enable_vi failed: %d\n", rc);
 		goto done;
 	}
 
-	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, true, true);
-	if (rc != 0) {
-		if_printf(ifp, "enable_vi failed: %d\n", rc);
-		goto done;
+	/*
+	 * Can't fail from this point onwards.  Review cxgbe_uninit_synchronized
+	 * if this changes.
+	 */
+
+	for_each_txq(vi, i, txq) {
+		TXQ_LOCK(txq);
+		txq->eq.flags |= EQ_ENABLED;
+		TXQ_UNLOCK(txq);
 	}
 
+	/*
+	 * The first iq of the first port to come up is used for tracing.
+	 */
+	if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
+		sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
+		t4_write_reg(sc, is_t4(sc) ?  A_MPS_TRC_RSS_CONTROL :
+		    A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
+		    V_QUEUENUMBER(sc->traceq));
+		pi->flags |= HAS_TRACEQ;
+	}
+
 	/* all ok */
-	setbit(&sc->open_device_map, pi->port_id);
 	PORT_LOCK(pi);
 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	pi->up_vis++;
+
+	if (pi->nvi > 1 || sc->flags & IS_VF)
+		callout_reset(&vi->tick, hz, vi_tick, vi);
+	else
+		callout_reset(&pi->tick, hz, cxgbe_tick, pi);
 	PORT_UNLOCK(pi);
-
-	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
 done:
 	if (rc != 0)
-		cxgbe_uninit_synchronized(pi);
+		cxgbe_uninit_synchronized(vi);
 
 	return (rc);
 }
@@ -2418,14 +3884,22 @@
  * Idempotent.
  */
 static int
-cxgbe_uninit_synchronized(struct port_info *pi)
+cxgbe_uninit_synchronized(struct vi_info *vi)
 {
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
-	struct ifnet *ifp = pi->ifp;
-	int rc;
+	struct ifnet *ifp = vi->ifp;
+	int rc, i;
+	struct sge_txq *txq;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
+	if (!(vi->flags & VI_INIT_DONE)) {
+		KASSERT(!(ifp->if_drv_flags & IFF_DRV_RUNNING),
+		    ("uninited VI is running"));
+		return (0);
+	}
+
 	/*
 	 * Disable the VI so that all its data in either direction is discarded
 	 * by the MPS.  Leave everything else (the queues, interrupts, and 1Hz
@@ -2433,19 +3907,38 @@
 	 * holding in its RAM (for an offloaded connection) even after the VI is
 	 * disabled.
 	 */
-	rc = -t4_enable_vi(sc, sc->mbox, pi->viid, false, false);
+	rc = -t4_enable_vi(sc, sc->mbox, vi->viid, false, false);
 	if (rc) {
 		if_printf(ifp, "disable_vi failed: %d\n", rc);
 		return (rc);
 	}
 
-	clrbit(&sc->open_device_map, pi->port_id);
+	for_each_txq(vi, i, txq) {
+		TXQ_LOCK(txq);
+		txq->eq.flags &= ~EQ_ENABLED;
+		TXQ_UNLOCK(txq);
+	}
+
 	PORT_LOCK(pi);
+	if (pi->nvi > 1 || sc->flags & IS_VF)
+		callout_stop(&vi->tick);
+	else
+		callout_stop(&pi->tick);
+	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+		PORT_UNLOCK(pi);
+		return (0);
+	}
 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+	pi->up_vis--;
+	if (pi->up_vis > 0) {
+		PORT_UNLOCK(pi);
+		return (0);
+	}
 	PORT_UNLOCK(pi);
 
 	pi->link_cfg.link_ok = 0;
 	pi->link_cfg.speed = 0;
+	pi->link_cfg.link_down_rc = 255;
 	t4_os_link_changed(sc, pi->port_id, 0);
 
 	return (0);
@@ -2455,17 +3948,22 @@
  * It is ok for this function to fail midway and return right away.  t4_detach
  * will walk the entire sc->irq list and clean up whatever is valid.
  */
-static int
-setup_intr_handlers(struct adapter *sc)
+int
+t4_setup_intr_handlers(struct adapter *sc)
 {
-	int rc, rid, p, q;
+	int rc, rid, p, q, v;
 	char s[8];
 	struct irq *irq;
 	struct port_info *pi;
+	struct vi_info *vi;
+	struct sge *sge = &sc->sge;
 	struct sge_rxq *rxq;
 #ifdef TCP_OFFLOAD
 	struct sge_ofld_rxq *ofld_rxq;
 #endif
+#ifdef DEV_NETMAP
+	struct sge_nm_rxq *nm_rxq;
+#endif
 
 	/*
 	 * Setup interrupts.
@@ -2472,94 +3970,109 @@
 	 */
 	irq = &sc->irq[0];
 	rid = sc->intr_type == INTR_INTX ? 0 : 1;
-	if (sc->intr_count == 1) {
-		KASSERT(!(sc->flags & INTR_DIRECT),
-		    ("%s: single interrupt && INTR_DIRECT?", __func__));
+	if (sc->intr_count == 1)
+		return (t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all"));
 
-		rc = t4_alloc_irq(sc, irq, rid, t4_intr_all, sc, "all");
-		if (rc != 0)
-			return (rc);
-	} else {
-		/* Multiple interrupts. */
+	/* Multiple interrupts. */
+	if (sc->flags & IS_VF)
+		KASSERT(sc->intr_count >= T4VF_EXTRA_INTR + sc->params.nports,
+		    ("%s: too few intr.", __func__));
+	else
 		KASSERT(sc->intr_count >= T4_EXTRA_INTR + sc->params.nports,
 		    ("%s: too few intr.", __func__));
 
-		/* The first one is always error intr */
+	/* The first one is always error intr on PFs */
+	if (!(sc->flags & IS_VF)) {
 		rc = t4_alloc_irq(sc, irq, rid, t4_intr_err, sc, "err");
 		if (rc != 0)
 			return (rc);
 		irq++;
 		rid++;
+	}
 
-		/* The second one is always the firmware event queue */
-		rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sc->sge.fwq,
-		    "evt");
-		if (rc != 0)
-			return (rc);
-		irq++;
-		rid++;
+	/* The second one is always the firmware event queue (first on VFs) */
+	rc = t4_alloc_irq(sc, irq, rid, t4_intr_evt, &sge->fwq, "evt");
+	if (rc != 0)
+		return (rc);
+	irq++;
+	rid++;
 
-		/*
-		 * Note that if INTR_DIRECT is not set then either the NIC rx
-		 * queues or (exclusive or) the TOE rx queueus will be taking
-		 * direct interrupts.
-		 *
-		 * There is no need to check for is_offload(sc) as nofldrxq
-		 * will be 0 if offload is disabled.
-		 */
-		for_each_port(sc, p) {
-			pi = sc->port[p];
+	for_each_port(sc, p) {
+		pi = sc->port[p];
+		for_each_vi(pi, v, vi) {
+			vi->first_intr = rid - 1;
 
-#ifdef TCP_OFFLOAD
-			/*
-			 * Skip over the NIC queues if they aren't taking direct
-			 * interrupts.
-			 */
-			if (!(sc->flags & INTR_DIRECT) &&
-			    pi->nofldrxq > pi->nrxq)
-				goto ofld_queues;
+			if (vi->nnmrxq > 0) {
+				int n = max(vi->nrxq, vi->nnmrxq);
+
+				MPASS(vi->flags & INTR_RXQ);
+
+				rxq = &sge->rxq[vi->first_rxq];
+#ifdef DEV_NETMAP
+				nm_rxq = &sge->nm_rxq[vi->first_nm_rxq];
 #endif
-			rxq = &sc->sge.rxq[pi->first_rxq];
-			for (q = 0; q < pi->nrxq; q++, rxq++) {
-				snprintf(s, sizeof(s), "%d.%d", p, q);
-				rc = t4_alloc_irq(sc, irq, rid, t4_intr, rxq,
-				    s);
-				if (rc != 0)
-					return (rc);
-				irq++;
-				rid++;
+				for (q = 0; q < n; q++) {
+					snprintf(s, sizeof(s), "%x%c%x", p,
+					    'a' + v, q);
+					if (q < vi->nrxq)
+						irq->rxq = rxq++;
+#ifdef DEV_NETMAP
+					if (q < vi->nnmrxq)
+						irq->nm_rxq = nm_rxq++;
+#endif
+					rc = t4_alloc_irq(sc, irq, rid,
+					    t4_vi_intr, irq, s);
+					if (rc != 0)
+						return (rc);
+					irq++;
+					rid++;
+					vi->nintr++;
+				}
+			} else if (vi->flags & INTR_RXQ) {
+				for_each_rxq(vi, q, rxq) {
+					snprintf(s, sizeof(s), "%x%c%x", p,
+					    'a' + v, q);
+					rc = t4_alloc_irq(sc, irq, rid,
+					    t4_intr, rxq, s);
+					if (rc != 0)
+						return (rc);
+					irq++;
+					rid++;
+					vi->nintr++;
+				}
 			}
-
 #ifdef TCP_OFFLOAD
-			/*
-			 * Skip over the offload queues if they aren't taking
-			 * direct interrupts.
-			 */
-			if (!(sc->flags & INTR_DIRECT))
-				continue;
-ofld_queues:
-			ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
-			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
-				snprintf(s, sizeof(s), "%d,%d", p, q);
-				rc = t4_alloc_irq(sc, irq, rid, t4_intr,
-				    ofld_rxq, s);
-				if (rc != 0)
-					return (rc);
-				irq++;
-				rid++;
+			if (vi->flags & INTR_OFLD_RXQ) {
+				for_each_ofld_rxq(vi, q, ofld_rxq) {
+					snprintf(s, sizeof(s), "%x%c%x", p,
+					    'A' + v, q);
+					rc = t4_alloc_irq(sc, irq, rid,
+					    t4_intr, ofld_rxq, s);
+					if (rc != 0)
+						return (rc);
+					irq++;
+					rid++;
+					vi->nintr++;
+				}
 			}
 #endif
 		}
 	}
+	MPASS(irq == &sc->irq[sc->intr_count]);
 
 	return (0);
 }
 
-static int
+int
 adapter_full_init(struct adapter *sc)
 {
 	int rc, i;
+#ifdef RSS
+	uint32_t raw_rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
+	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
+#endif
 
+	ASSERT_SYNCHRONIZED_OP(sc);
 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
 	KASSERT((sc->flags & FULL_INIT_DONE) == 0,
 	    ("%s: FULL_INIT_DONE already", __func__));
@@ -2583,8 +4096,17 @@
 		taskqueue_start_threads(&sc->tq[i], 1, PI_NET, "%s tq%d",
 		    device_get_nameunit(sc->dev), i);
 	}
+#ifdef RSS
+	MPASS(RSS_KEYSIZE == 40);
+	rss_getkey((void *)&raw_rss_key[0]);
+	for (i = 0; i < nitems(rss_key); i++) {
+		rss_key[i] = htobe32(raw_rss_key[nitems(rss_key) - 1 - i]);
+	}
+	t4_write_rss_key(sc, &rss_key[0], -1);
+#endif
 
-	t4_intr_enable(sc);
+	if (!(sc->flags & IS_VF))
+		t4_intr_enable(sc);
 	sc->flags |= FULL_INIT_DONE;
 done:
 	if (rc != 0)
@@ -2593,7 +4115,7 @@
 	return (rc);
 }
 
-static int
+int
 adapter_full_uninit(struct adapter *sc)
 {
 	int i;
@@ -2612,49 +4134,193 @@
 	return (0);
 }
 
+#ifdef RSS
+#define SUPPORTED_RSS_HASHTYPES (RSS_HASHTYPE_RSS_IPV4 | \
+    RSS_HASHTYPE_RSS_TCP_IPV4 | RSS_HASHTYPE_RSS_IPV6 | \
+    RSS_HASHTYPE_RSS_TCP_IPV6 | RSS_HASHTYPE_RSS_UDP_IPV4 | \
+    RSS_HASHTYPE_RSS_UDP_IPV6)
+
+/* Translates kernel hash types to hardware. */
 static int
-port_full_init(struct port_info *pi)
+hashconfig_to_hashen(int hashconfig)
 {
-	struct adapter *sc = pi->adapter;
-	struct ifnet *ifp = pi->ifp;
+	int hashen = 0;
+
+	if (hashconfig & RSS_HASHTYPE_RSS_IPV4)
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+	if (hashconfig & RSS_HASHTYPE_RSS_IPV6)
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV4) {
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
+		    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+	}
+	if (hashconfig & RSS_HASHTYPE_RSS_UDP_IPV6) {
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_UDPEN |
+		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+	}
+	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV4)
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+	if (hashconfig & RSS_HASHTYPE_RSS_TCP_IPV6)
+		hashen |= F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+	return (hashen);
+}
+
+/* Translates hardware hash types to kernel. */
+static int
+hashen_to_hashconfig(int hashen)
+{
+	int hashconfig = 0;
+
+	if (hashen & F_FW_RSS_VI_CONFIG_CMD_UDPEN) {
+		/*
+		 * If UDP hashing was enabled it must have been enabled for
+		 * either IPv4 or IPv6 (inclusive or).  Enabling UDP without
+		 * enabling any 4-tuple hash is nonsense configuration.
+		 */
+		MPASS(hashen & (F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+		    F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN));
+
+		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV4;
+		if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+			hashconfig |= RSS_HASHTYPE_RSS_UDP_IPV6;
+	}
+	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV4;
+	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+		hashconfig |= RSS_HASHTYPE_RSS_TCP_IPV6;
+	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+		hashconfig |= RSS_HASHTYPE_RSS_IPV4;
+	if (hashen & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+		hashconfig |= RSS_HASHTYPE_RSS_IPV6;
+
+	return (hashconfig);
+}
+#endif
+
+int
+vi_full_init(struct vi_info *vi)
+{
+	struct adapter *sc = vi->pi->adapter;
+	struct ifnet *ifp = vi->ifp;
 	uint16_t *rss;
 	struct sge_rxq *rxq;
-	int rc, i;
+	int rc, i, j, hashen;
+#ifdef RSS
+	int nbuckets = rss_getnumbuckets();
+	int hashconfig = rss_gethashconfig();
+	int extra;
+#endif
 
 	ASSERT_SYNCHRONIZED_OP(sc);
-	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
-	    ("%s: PORT_INIT_DONE already", __func__));
+	KASSERT((vi->flags & VI_INIT_DONE) == 0,
+	    ("%s: VI_INIT_DONE already", __func__));
 
-	sysctl_ctx_init(&pi->ctx);
-	pi->flags |= PORT_SYSCTL_CTX;
+	sysctl_ctx_init(&vi->ctx);
+	vi->flags |= VI_SYSCTL_CTX;
 
 	/*
-	 * Allocate tx/rx/fl queues for this port.
+	 * Allocate tx/rx/fl queues for this VI.
 	 */
-	rc = t4_setup_port_queues(pi);
+	rc = t4_setup_vi_queues(vi);
 	if (rc != 0)
 		goto done;	/* error message displayed already */
 
 	/*
-	 * Setup RSS for this port.
+	 * Setup RSS for this VI.  Save a copy of the RSS table for later use.
 	 */
-	rss = malloc(pi->nrxq * sizeof (*rss), M_CXGBE,
-	    M_ZERO | M_WAITOK);
-	for_each_rxq(pi, i, rxq) {
-		rss[i] = rxq->iq.abs_id;
+	if (vi->nrxq > vi->rss_size) {
+		if_printf(ifp, "nrxq (%d) > hw RSS table size (%d); "
+		    "some queues will never receive traffic.\n", vi->nrxq,
+		    vi->rss_size);
+	} else if (vi->rss_size % vi->nrxq) {
+		if_printf(ifp, "nrxq (%d), hw RSS table size (%d); "
+		    "expect uneven traffic distribution.\n", vi->nrxq,
+		    vi->rss_size);
 	}
-	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
-	    pi->rss_size, rss, pi->nrxq);
-	free(rss, M_CXGBE);
+#ifdef RSS
+	if (vi->nrxq != nbuckets) {
+		if_printf(ifp, "nrxq (%d) != kernel RSS buckets (%d);"
+		    "performance will be impacted.\n", vi->nrxq, nbuckets);
+	}
+#endif
+	rss = malloc(vi->rss_size * sizeof (*rss), M_CXGBE, M_ZERO | M_WAITOK);
+	for (i = 0; i < vi->rss_size;) {
+#ifdef RSS
+		j = rss_get_indirection_to_bucket(i);
+		j %= vi->nrxq;
+		rxq = &sc->sge.rxq[vi->first_rxq + j];
+		rss[i++] = rxq->iq.abs_id;
+#else
+		for_each_rxq(vi, j, rxq) {
+			rss[i++] = rxq->iq.abs_id;
+			if (i == vi->rss_size)
+				break;
+		}
+#endif
+	}
+
+	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss,
+	    vi->rss_size);
 	if (rc != 0) {
 		if_printf(ifp, "rss_config failed: %d\n", rc);
 		goto done;
 	}
 
-	pi->flags |= PORT_INIT_DONE;
+#ifdef RSS
+	hashen = hashconfig_to_hashen(hashconfig);
+
+	/*
+	 * We may have had to enable some hashes even though the global config
+	 * wants them disabled.  This is a potential problem that must be
+	 * reported to the user.
+	 */
+	extra = hashen_to_hashconfig(hashen) ^ hashconfig;
+
+	/*
+	 * If we consider only the supported hash types, then the enabled hashes
+	 * are a superset of the requested hashes.  In other words, there cannot
+	 * be any supported hash that was requested but not enabled, but there
+	 * can be hashes that were not requested but had to be enabled.
+	 */
+	extra &= SUPPORTED_RSS_HASHTYPES;
+	MPASS((extra & hashconfig) == 0);
+
+	if (extra) {
+		if_printf(ifp,
+		    "global RSS config (0x%x) cannot be accomodated.\n",
+		    hashconfig);
+	}
+	if (extra & RSS_HASHTYPE_RSS_IPV4)
+		if_printf(ifp, "IPv4 2-tuple hashing forced on.\n");
+	if (extra & RSS_HASHTYPE_RSS_TCP_IPV4)
+		if_printf(ifp, "TCP/IPv4 4-tuple hashing forced on.\n");
+	if (extra & RSS_HASHTYPE_RSS_IPV6)
+		if_printf(ifp, "IPv6 2-tuple hashing forced on.\n");
+	if (extra & RSS_HASHTYPE_RSS_TCP_IPV6)
+		if_printf(ifp, "TCP/IPv6 4-tuple hashing forced on.\n");
+	if (extra & RSS_HASHTYPE_RSS_UDP_IPV4)
+		if_printf(ifp, "UDP/IPv4 4-tuple hashing forced on.\n");
+	if (extra & RSS_HASHTYPE_RSS_UDP_IPV6)
+		if_printf(ifp, "UDP/IPv6 4-tuple hashing forced on.\n");
+#else
+	hashen = F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+	    F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+	    F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+	    F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN | F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+#endif
+	rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, hashen, rss[0], 0, 0);
+	if (rc != 0) {
+		if_printf(ifp, "rss hash/defaultq config failed: %d\n", rc);
+		goto done;
+	}
+
+	vi->rss = rss;
+	vi->flags |= VI_INIT_DONE;
 done:
 	if (rc != 0)
-		port_full_uninit(pi);
+		vi_full_uninit(vi);
 
 	return (rc);
 }
@@ -2662,9 +4328,10 @@
 /*
  * Idempotent.
  */
-static int
-port_full_uninit(struct port_info *pi)
+int
+vi_full_uninit(struct vi_info *vi)
 {
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 	int i;
 	struct sge_rxq *rxq;
@@ -2674,60 +4341,82 @@
 	struct sge_wrq *ofld_txq;
 #endif
 
-	if (pi->flags & PORT_INIT_DONE) {
+	if (vi->flags & VI_INIT_DONE) {
 
-		/* Need to quiesce queues.  XXX: ctrl queues? */
+		/* Need to quiesce queues.  */
 
-		for_each_txq(pi, i, txq) {
-			quiesce_eq(sc, &txq->eq);
+		/* XXX: Only for the first VI? */
+		if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
+			quiesce_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
+
+		for_each_txq(vi, i, txq) {
+			quiesce_txq(sc, txq);
 		}
 
 #ifdef TCP_OFFLOAD
-		for_each_ofld_txq(pi, i, ofld_txq) {
-			quiesce_eq(sc, &ofld_txq->eq);
+		for_each_ofld_txq(vi, i, ofld_txq) {
+			quiesce_wrq(sc, ofld_txq);
 		}
 #endif
 
-		for_each_rxq(pi, i, rxq) {
+		for_each_rxq(vi, i, rxq) {
 			quiesce_iq(sc, &rxq->iq);
 			quiesce_fl(sc, &rxq->fl);
 		}
 
 #ifdef TCP_OFFLOAD
-		for_each_ofld_rxq(pi, i, ofld_rxq) {
+		for_each_ofld_rxq(vi, i, ofld_rxq) {
 			quiesce_iq(sc, &ofld_rxq->iq);
 			quiesce_fl(sc, &ofld_rxq->fl);
 		}
 #endif
+		free(vi->rss, M_CXGBE);
+		free(vi->nm_rss, M_CXGBE);
 	}
 
-	t4_teardown_port_queues(pi);
-	pi->flags &= ~PORT_INIT_DONE;
+	t4_teardown_vi_queues(vi);
+	vi->flags &= ~VI_INIT_DONE;
 
 	return (0);
 }
 
 static void
-quiesce_eq(struct adapter *sc, struct sge_eq *eq)
+quiesce_txq(struct adapter *sc, struct sge_txq *txq)
 {
-	EQ_LOCK(eq);
-	eq->flags |= EQ_DOOMED;
+	struct sge_eq *eq = &txq->eq;
+	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
 
-	/*
-	 * Wait for the response to a credit flush if one's
-	 * pending.
-	 */
-	while (eq->flags & EQ_CRFLUSHED)
-		mtx_sleep(eq, &eq->eq_lock, 0, "crflush", 0);
-	EQ_UNLOCK(eq);
+	(void) sc;	/* unused */
 
-	callout_drain(&eq->tx_callout);	/* XXX: iffy */
-	pause("callout", 10);		/* Still iffy */
+#ifdef INVARIANTS
+	TXQ_LOCK(txq);
+	MPASS((eq->flags & EQ_ENABLED) == 0);
+	TXQ_UNLOCK(txq);
+#endif
 
-	taskqueue_drain(sc->tq[eq->tx_chan], &eq->tx_task);
+	/* Wait for the mp_ring to empty. */
+	while (!mp_ring_is_idle(txq->r)) {
+		mp_ring_check_drainage(txq->r, 0);
+		pause("rquiesce", 1);
+	}
+
+	/* Then wait for the hardware to finish. */
+	while (spg->cidx != htobe16(eq->pidx))
+		pause("equiesce", 1);
+
+	/* Finally, wait for the driver to reclaim all descriptors. */
+	while (eq->cidx != eq->pidx)
+		pause("dquiesce", 1);
 }
 
 static void
+quiesce_wrq(struct adapter *sc, struct sge_wrq *wrq)
+{
+
+	/* XXXTX */
+}
+
+static void
 quiesce_iq(struct adapter *sc, struct sge_iq *iq)
 {
 	(void) sc;	/* unused */
@@ -2744,9 +4433,9 @@
 	FL_LOCK(fl);
 	fl->flags |= FL_DOOMED;
 	FL_UNLOCK(fl);
+	callout_stop(&sc->sfl_callout);
 	mtx_unlock(&sc->sfl_lock);
 
-	callout_drain(&sc->sfl_callout);
 	KASSERT((fl->flags & FL_STARVING) == 0,
 	    ("%s: still starving", __func__));
 }
@@ -2792,404 +4481,249 @@
 }
 
 static void
-reg_block_dump(struct adapter *sc, uint8_t *buf, unsigned int start,
-    unsigned int end)
+get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
 {
-	uint32_t *p = (uint32_t *)(buf + start);
 
-	for ( ; start <= end; start += sizeof(uint32_t))
-		*p++ = t4_read_reg(sc, start);
+	regs->version = chip_id(sc) | chip_rev(sc) << 10;
+	t4_get_regs(sc, buf, regs->len);
 }
 
-static void
-t4_get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
+#define	A_PL_INDIR_CMD	0x1f8
+
+#define	S_PL_AUTOINC	31
+#define	M_PL_AUTOINC	0x1U
+#define	V_PL_AUTOINC(x)	((x) << S_PL_AUTOINC)
+#define	G_PL_AUTOINC(x)	(((x) >> S_PL_AUTOINC) & M_PL_AUTOINC)
+
+#define	S_PL_VFID	20
+#define	M_PL_VFID	0xffU
+#define	V_PL_VFID(x)	((x) << S_PL_VFID)
+#define	G_PL_VFID(x)	(((x) >> S_PL_VFID) & M_PL_VFID)
+
+#define	S_PL_ADDR	0
+#define	M_PL_ADDR	0xfffffU
+#define	V_PL_ADDR(x)	((x) << S_PL_ADDR)
+#define	G_PL_ADDR(x)	(((x) >> S_PL_ADDR) & M_PL_ADDR)
+
+#define	A_PL_INDIR_DATA	0x1fc
+
+static uint64_t
+read_vf_stat(struct adapter *sc, unsigned int viid, int reg)
 {
-	int i;
-	static const unsigned int reg_ranges[] = {
-		0x1008, 0x1108,
-		0x1180, 0x11b4,
-		0x11fc, 0x123c,
-		0x1300, 0x173c,
-		0x1800, 0x18fc,
-		0x3000, 0x30d8,
-		0x30e0, 0x5924,
-		0x5960, 0x59d4,
-		0x5a00, 0x5af8,
-		0x6000, 0x6098,
-		0x6100, 0x6150,
-		0x6200, 0x6208,
-		0x6240, 0x6248,
-		0x6280, 0x6338,
-		0x6370, 0x638c,
-		0x6400, 0x643c,
-		0x6500, 0x6524,
-		0x6a00, 0x6a38,
-		0x6a60, 0x6a78,
-		0x6b00, 0x6b84,
-		0x6bf0, 0x6c84,
-		0x6cf0, 0x6d84,
-		0x6df0, 0x6e84,
-		0x6ef0, 0x6f84,
-		0x6ff0, 0x7084,
-		0x70f0, 0x7184,
-		0x71f0, 0x7284,
-		0x72f0, 0x7384,
-		0x73f0, 0x7450,
-		0x7500, 0x7530,
-		0x7600, 0x761c,
-		0x7680, 0x76cc,
-		0x7700, 0x7798,
-		0x77c0, 0x77fc,
-		0x7900, 0x79fc,
-		0x7b00, 0x7c38,
-		0x7d00, 0x7efc,
-		0x8dc0, 0x8e1c,
-		0x8e30, 0x8e78,
-		0x8ea0, 0x8f6c,
-		0x8fc0, 0x9074,
-		0x90fc, 0x90fc,
-		0x9400, 0x9458,
-		0x9600, 0x96bc,
-		0x9800, 0x9808,
-		0x9820, 0x983c,
-		0x9850, 0x9864,
-		0x9c00, 0x9c6c,
-		0x9c80, 0x9cec,
-		0x9d00, 0x9d6c,
-		0x9d80, 0x9dec,
-		0x9e00, 0x9e6c,
-		0x9e80, 0x9eec,
-		0x9f00, 0x9f6c,
-		0x9f80, 0x9fec,
-		0xd004, 0xd03c,
-		0xdfc0, 0xdfe0,
-		0xe000, 0xea7c,
-		0xf000, 0x11190,
-		0x19040, 0x1906c,
-		0x19078, 0x19080,
-		0x1908c, 0x19124,
-		0x19150, 0x191b0,
-		0x191d0, 0x191e8,
-		0x19238, 0x1924c,
-		0x193f8, 0x19474,
-		0x19490, 0x194f8,
-		0x19800, 0x19f30,
-		0x1a000, 0x1a06c,
-		0x1a0b0, 0x1a120,
-		0x1a128, 0x1a138,
-		0x1a190, 0x1a1c4,
-		0x1a1fc, 0x1a1fc,
-		0x1e040, 0x1e04c,
-		0x1e284, 0x1e28c,
-		0x1e2c0, 0x1e2c0,
-		0x1e2e0, 0x1e2e0,
-		0x1e300, 0x1e384,
-		0x1e3c0, 0x1e3c8,
-		0x1e440, 0x1e44c,
-		0x1e684, 0x1e68c,
-		0x1e6c0, 0x1e6c0,
-		0x1e6e0, 0x1e6e0,
-		0x1e700, 0x1e784,
-		0x1e7c0, 0x1e7c8,
-		0x1e840, 0x1e84c,
-		0x1ea84, 0x1ea8c,
-		0x1eac0, 0x1eac0,
-		0x1eae0, 0x1eae0,
-		0x1eb00, 0x1eb84,
-		0x1ebc0, 0x1ebc8,
-		0x1ec40, 0x1ec4c,
-		0x1ee84, 0x1ee8c,
-		0x1eec0, 0x1eec0,
-		0x1eee0, 0x1eee0,
-		0x1ef00, 0x1ef84,
-		0x1efc0, 0x1efc8,
-		0x1f040, 0x1f04c,
-		0x1f284, 0x1f28c,
-		0x1f2c0, 0x1f2c0,
-		0x1f2e0, 0x1f2e0,
-		0x1f300, 0x1f384,
-		0x1f3c0, 0x1f3c8,
-		0x1f440, 0x1f44c,
-		0x1f684, 0x1f68c,
-		0x1f6c0, 0x1f6c0,
-		0x1f6e0, 0x1f6e0,
-		0x1f700, 0x1f784,
-		0x1f7c0, 0x1f7c8,
-		0x1f840, 0x1f84c,
-		0x1fa84, 0x1fa8c,
-		0x1fac0, 0x1fac0,
-		0x1fae0, 0x1fae0,
-		0x1fb00, 0x1fb84,
-		0x1fbc0, 0x1fbc8,
-		0x1fc40, 0x1fc4c,
-		0x1fe84, 0x1fe8c,
-		0x1fec0, 0x1fec0,
-		0x1fee0, 0x1fee0,
-		0x1ff00, 0x1ff84,
-		0x1ffc0, 0x1ffc8,
-		0x20000, 0x2002c,
-		0x20100, 0x2013c,
-		0x20190, 0x201c8,
-		0x20200, 0x20318,
-		0x20400, 0x20528,
-		0x20540, 0x20614,
-		0x21000, 0x21040,
-		0x2104c, 0x21060,
-		0x210c0, 0x210ec,
-		0x21200, 0x21268,
-		0x21270, 0x21284,
-		0x212fc, 0x21388,
-		0x21400, 0x21404,
-		0x21500, 0x21518,
-		0x2152c, 0x2153c,
-		0x21550, 0x21554,
-		0x21600, 0x21600,
-		0x21608, 0x21628,
-		0x21630, 0x2163c,
-		0x21700, 0x2171c,
-		0x21780, 0x2178c,
-		0x21800, 0x21c38,
-		0x21c80, 0x21d7c,
-		0x21e00, 0x21e04,
-		0x22000, 0x2202c,
-		0x22100, 0x2213c,
-		0x22190, 0x221c8,
-		0x22200, 0x22318,
-		0x22400, 0x22528,
-		0x22540, 0x22614,
-		0x23000, 0x23040,
-		0x2304c, 0x23060,
-		0x230c0, 0x230ec,
-		0x23200, 0x23268,
-		0x23270, 0x23284,
-		0x232fc, 0x23388,
-		0x23400, 0x23404,
-		0x23500, 0x23518,
-		0x2352c, 0x2353c,
-		0x23550, 0x23554,
-		0x23600, 0x23600,
-		0x23608, 0x23628,
-		0x23630, 0x2363c,
-		0x23700, 0x2371c,
-		0x23780, 0x2378c,
-		0x23800, 0x23c38,
-		0x23c80, 0x23d7c,
-		0x23e00, 0x23e04,
-		0x24000, 0x2402c,
-		0x24100, 0x2413c,
-		0x24190, 0x241c8,
-		0x24200, 0x24318,
-		0x24400, 0x24528,
-		0x24540, 0x24614,
-		0x25000, 0x25040,
-		0x2504c, 0x25060,
-		0x250c0, 0x250ec,
-		0x25200, 0x25268,
-		0x25270, 0x25284,
-		0x252fc, 0x25388,
-		0x25400, 0x25404,
-		0x25500, 0x25518,
-		0x2552c, 0x2553c,
-		0x25550, 0x25554,
-		0x25600, 0x25600,
-		0x25608, 0x25628,
-		0x25630, 0x2563c,
-		0x25700, 0x2571c,
-		0x25780, 0x2578c,
-		0x25800, 0x25c38,
-		0x25c80, 0x25d7c,
-		0x25e00, 0x25e04,
-		0x26000, 0x2602c,
-		0x26100, 0x2613c,
-		0x26190, 0x261c8,
-		0x26200, 0x26318,
-		0x26400, 0x26528,
-		0x26540, 0x26614,
-		0x27000, 0x27040,
-		0x2704c, 0x27060,
-		0x270c0, 0x270ec,
-		0x27200, 0x27268,
-		0x27270, 0x27284,
-		0x272fc, 0x27388,
-		0x27400, 0x27404,
-		0x27500, 0x27518,
-		0x2752c, 0x2753c,
-		0x27550, 0x27554,
-		0x27600, 0x27600,
-		0x27608, 0x27628,
-		0x27630, 0x2763c,
-		0x27700, 0x2771c,
-		0x27780, 0x2778c,
-		0x27800, 0x27c38,
-		0x27c80, 0x27d7c,
-		0x27e00, 0x27e04
-	};
+	u32 stats[2];
 
-	regs->version = 4 | (sc->params.rev << 10);
-	for (i = 0; i < nitems(reg_ranges); i += 2)
-		reg_block_dump(sc, buf, reg_ranges[i], reg_ranges[i + 1]);
+	mtx_assert(&sc->reg_lock, MA_OWNED);
+	if (sc->flags & IS_VF) {
+		stats[0] = t4_read_reg(sc, VF_MPS_REG(reg));
+		stats[1] = t4_read_reg(sc, VF_MPS_REG(reg + 4));
+	} else {
+		t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
+		    V_PL_VFID(G_FW_VIID_VIN(viid)) |
+		    V_PL_ADDR(VF_MPS_REG(reg)));
+		stats[0] = t4_read_reg(sc, A_PL_INDIR_DATA);
+		stats[1] = t4_read_reg(sc, A_PL_INDIR_DATA);
+	}
+	return (((uint64_t)stats[1]) << 32 | stats[0]);
 }
 
 static void
-cxgbe_tick(void *arg)
+t4_get_vi_stats(struct adapter *sc, unsigned int viid,
+    struct fw_vi_stats_vf *stats)
 {
-	struct port_info *pi = arg;
-	struct ifnet *ifp = pi->ifp;
-	struct sge_txq *txq;
-	int i, drops;
-	struct port_stats *s = &pi->stats;
 
-	PORT_LOCK(pi);
-	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
-		PORT_UNLOCK(pi);
-		return;	/* without scheduling another callout */
-	}
+#define GET_STAT(name) \
+	read_vf_stat(sc, viid, A_MPS_VF_STAT_##name##_L)
 
-	t4_get_port_stats(pi->adapter, pi->tx_chan, s);
+	stats->tx_bcast_bytes    = GET_STAT(TX_VF_BCAST_BYTES);
+	stats->tx_bcast_frames   = GET_STAT(TX_VF_BCAST_FRAMES);
+	stats->tx_mcast_bytes    = GET_STAT(TX_VF_MCAST_BYTES);
+	stats->tx_mcast_frames   = GET_STAT(TX_VF_MCAST_FRAMES);
+	stats->tx_ucast_bytes    = GET_STAT(TX_VF_UCAST_BYTES);
+	stats->tx_ucast_frames   = GET_STAT(TX_VF_UCAST_FRAMES);
+	stats->tx_drop_frames    = GET_STAT(TX_VF_DROP_FRAMES);
+	stats->tx_offload_bytes  = GET_STAT(TX_VF_OFFLOAD_BYTES);
+	stats->tx_offload_frames = GET_STAT(TX_VF_OFFLOAD_FRAMES);
+	stats->rx_bcast_bytes    = GET_STAT(RX_VF_BCAST_BYTES);
+	stats->rx_bcast_frames   = GET_STAT(RX_VF_BCAST_FRAMES);
+	stats->rx_mcast_bytes    = GET_STAT(RX_VF_MCAST_BYTES);
+	stats->rx_mcast_frames   = GET_STAT(RX_VF_MCAST_FRAMES);
+	stats->rx_ucast_bytes    = GET_STAT(RX_VF_UCAST_BYTES);
+	stats->rx_ucast_frames   = GET_STAT(RX_VF_UCAST_FRAMES);
+	stats->rx_err_frames     = GET_STAT(RX_VF_ERR_FRAMES);
 
-	ifp->if_opackets = s->tx_frames - s->tx_pause;
-	ifp->if_ipackets = s->rx_frames - s->rx_pause;
-	ifp->if_obytes = s->tx_octets - s->tx_pause * 64;
-	ifp->if_ibytes = s->rx_octets - s->rx_pause * 64;
-	ifp->if_omcasts = s->tx_mcast_frames - s->tx_pause;
-	ifp->if_imcasts = s->rx_mcast_frames - s->rx_pause;
-	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
-	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
-	    s->rx_trunc3;
+#undef GET_STAT
+}
 
-	drops = s->tx_drop;
-	for_each_txq(pi, i, txq)
-		drops += txq->br->br_drops;
-	ifp->if_snd.ifq_drops = drops;
+static void
+t4_clr_vi_stats(struct adapter *sc, unsigned int viid)
+{
+	int reg;
 
-	ifp->if_oerrors = s->tx_error_frames;
-	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
-	    s->rx_fcs_err + s->rx_len_err;
-
-	callout_schedule(&pi->tick, hz);
-	PORT_UNLOCK(pi);
+	t4_write_reg(sc, A_PL_INDIR_CMD, V_PL_AUTOINC(1) |
+	    V_PL_VFID(G_FW_VIID_VIN(viid)) |
+	    V_PL_ADDR(VF_MPS_REG(A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L)));
+	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
+	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
+		t4_write_reg(sc, A_PL_INDIR_DATA, 0);
 }
 
 static void
-cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
+vi_refresh_stats(struct adapter *sc, struct vi_info *vi)
 {
-	struct ifnet *vlan;
+	struct ifnet *ifp = vi->ifp;
+	struct sge_txq *txq;
+	int i, drops;
+	struct fw_vi_stats_vf *s = &vi->stats;
+	struct timeval tv;
+	const struct timeval interval = {0, 250000};	/* 250ms */
 
-	if (arg != ifp || ifp->if_type != IFT_ETHER)
+	if (!(vi->flags & VI_INIT_DONE))
 		return;
 
-	vlan = VLAN_DEVAT(ifp, vid);
-	VLAN_SETCOOKIE(vlan, ifp);
-}
+	getmicrotime(&tv);
+	timevalsub(&tv, &interval);
+	if (timevalcmp(&tv, &vi->last_refreshed, <))
+		return;
 
-static int
-cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
-{
+	mtx_lock(&sc->reg_lock);
+	t4_get_vi_stats(sc, vi->viid, &vi->stats);
 
-#ifdef INVARIANTS
-	panic("%s: opcode 0x%02x on iq %p with payload %p",
-	    __func__, rss->opcode, iq, m);
-#else
-	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
-	    __func__, rss->opcode, iq, m);
-	m_freem(m);
-#endif
-	return (EDOOFUS);
+	ifp->if_ipackets = s->rx_bcast_frames + s->rx_mcast_frames +
+	    s->rx_ucast_frames;
+	ifp->if_ierrors = s->rx_err_frames;
+	ifp->if_opackets = s->tx_bcast_frames + s->tx_mcast_frames +
+	    s->tx_ucast_frames + s->tx_offload_frames;
+	ifp->if_oerrors = s->tx_drop_frames;
+	ifp->if_ibytes = s->rx_bcast_bytes + s->rx_mcast_bytes +
+	    s->rx_ucast_bytes;
+	ifp->if_obytes = s->tx_bcast_bytes + s->tx_mcast_bytes +
+	    s->tx_ucast_bytes + s->tx_offload_bytes;
+	ifp->if_imcasts = s->rx_mcast_frames;
+	ifp->if_omcasts = s->tx_mcast_frames;
+
+	drops = 0;
+	for_each_txq(vi, i, txq)
+		drops += counter_u64_fetch(txq->r->drops);
+	ifp->if_snd.ifq_drops = drops;
+
+	getmicrotime(&vi->last_refreshed);
+	mtx_unlock(&sc->reg_lock);
 }
 
-int
-t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
+static void
+cxgbe_refresh_stats(struct adapter *sc, struct port_info *pi)
 {
-	uintptr_t *loc, new;
+	struct vi_info *vi = &pi->vi[0];
+	struct ifnet *ifp = vi->ifp;
+	struct sge_txq *txq;
+	int i, drops;
+	struct port_stats *s = &pi->stats;
+	struct timeval tv;
+	const struct timeval interval = {0, 250000};	/* 250ms */
 
-	if (opcode >= nitems(sc->cpl_handler))
-		return (EINVAL);
+	getmicrotime(&tv);
+	timevalsub(&tv, &interval);
+	if (timevalcmp(&tv, &pi->last_refreshed, <))
+		return;
 
-	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
-	loc = (uintptr_t *) &sc->cpl_handler[opcode];
-	atomic_store_rel_ptr(loc, new);
+	t4_get_port_stats(sc, pi->tx_chan, s);
 
-	return (0);
-}
+	ifp->if_opackets = s->tx_frames;
+	ifp->if_ipackets = s->rx_frames;
+	ifp->if_obytes = s->tx_octets;
+	ifp->if_ibytes = s->rx_octets;
+	ifp->if_omcasts = s->tx_mcast_frames;
+	ifp->if_imcasts = s->rx_mcast_frames;
+	ifp->if_iqdrops = s->rx_ovflow0 + s->rx_ovflow1 + s->rx_ovflow2 +
+	    s->rx_ovflow3 + s->rx_trunc0 + s->rx_trunc1 + s->rx_trunc2 +
+	    s->rx_trunc3;
+	for (i = 0; i < sc->chip_params->nchan; i++) {
+		if (pi->rx_chan_map & (1 << i)) {
+			uint32_t v;
 
-static int
-an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
-{
+			mtx_lock(&sc->reg_lock);
+			t4_read_indirect(sc, A_TP_MIB_INDEX, A_TP_MIB_DATA, &v,
+			    1, A_TP_MIB_TNL_CNG_DROP_0 + i);
+			mtx_unlock(&sc->reg_lock);
+			ifp->if_iqdrops += v;
+		}
+	}
 
-#ifdef INVARIANTS
-	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
-#else
-	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
-	    __func__, iq, ctrl);
-#endif
-	return (EDOOFUS);
+	drops = s->tx_drop;
+	for_each_txq(vi, i, txq)
+		drops += counter_u64_fetch(txq->r->drops);
+	ifp->if_snd.ifq_drops = drops;
+
+	ifp->if_oerrors = s->tx_error_frames;
+	ifp->if_ierrors = s->rx_jabber + s->rx_runt + s->rx_too_long +
+	    s->rx_fcs_err + s->rx_len_err;
+
+	getmicrotime(&pi->last_refreshed);
 }
 
-int
-t4_register_an_handler(struct adapter *sc, an_handler_t h)
+static void
+cxgbe_tick(void *arg)
 {
-	uintptr_t *loc, new;
+	struct port_info *pi = arg;
+	struct adapter *sc = pi->adapter;
 
-	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
-	loc = (uintptr_t *) &sc->an_handler;
-	atomic_store_rel_ptr(loc, new);
+	PORT_LOCK_ASSERT_OWNED(pi);
+	cxgbe_refresh_stats(sc, pi);
 
-	return (0);
+	callout_schedule(&pi->tick, hz);
 }
 
-static int
-fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
+void
+vi_tick(void *arg)
 {
-	const struct cpl_fw6_msg *cpl =
-	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
+	struct vi_info *vi = arg;
+	struct adapter *sc = vi->pi->adapter;
 
-#ifdef INVARIANTS
-	panic("%s: fw_msg type %d", __func__, cpl->type);
-#else
-	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
-#endif
-	return (EDOOFUS);
+	vi_refresh_stats(sc, vi);
+
+	callout_schedule(&vi->tick, hz);
 }
 
-int
-t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
+static void
+cxgbe_vlan_config(void *arg, struct ifnet *ifp, uint16_t vid)
 {
-	uintptr_t *loc, new;
+	struct ifnet *vlan;
 
-	if (type >= nitems(sc->fw_msg_handler))
-		return (EINVAL);
+	if (arg != ifp || ifp->if_type != IFT_ETHER)
+		return;
 
-	/*
-	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
-	 * handler dispatch table.  Reject any attempt to install a handler for
-	 * this subtype.
-	 */
-	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
-		return (EINVAL);
+	vlan = VLAN_DEVAT(ifp, vid);
+	VLAN_SETCOOKIE(vlan, ifp);
+}
 
-	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
-	loc = (uintptr_t *) &sc->fw_msg_handler[type];
-	atomic_store_rel_ptr(loc, new);
+/*
+ * Should match fw_caps_config_<foo> enums in t4fw_interface.h
+ */
+static char *caps_decoder[] = {
+	"\20\001IPMI\002NCSI",				/* 0: NBM */
+	"\20\001PPP\002QFC\003DCBX",			/* 1: link */
+	"\20\001INGRESS\002EGRESS",			/* 2: switch */
+	"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL"	/* 3: NIC */
+	    "\006HASHFILTER\007ETHOFLD",
+	"\20\001TOE",					/* 4: TOE */
+	"\20\001RDDP\002RDMAC",				/* 5: RDMA */
+	"\20\001INITIATOR_PDU\002TARGET_PDU"		/* 6: iSCSI */
+	    "\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
+	    "\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
+	    "\007T10DIF"
+	    "\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
+	"\20\001LOOKASIDE\002TLSKEYS",			/* 7: Crypto */
+	"\20\001INITIATOR\002TARGET\003CTRL_OFLD"	/* 8: FCoE */
+		    "\004PO_INITIATOR\005PO_TARGET",
+};
 
-	return (0);
-}
-
-static int
+void
 t4_sysctls(struct adapter *sc)
 {
 	struct sysctl_ctx_list *ctx;
 	struct sysctl_oid *oid;
 	struct sysctl_oid_list *children, *c0;
-	static char *caps[] = {
-		"\20\1PPP\2QFC\3DCBX",			/* caps[0] linkcaps */
-		"\20\1NIC\2VM\3IDS\4UM\5UM_ISGL",	/* caps[1] niccaps */
-		"\20\1TOE",				/* caps[2] toecaps */
-		"\20\1RDDP\2RDMAC",			/* caps[3] rdmacaps */
-		"\20\1INITIATOR_PDU\2TARGET_PDU"	/* caps[4] iscsicaps */
-		    "\3INITIATOR_CNXOFLD\4TARGET_CNXOFLD"
-		    "\5INITIATOR_SSNOFLD\6TARGET_SSNOFLD",
-		"\20\1INITIATOR\2TARGET\3CTRL_OFLD"	/* caps[5] fcoecaps */
-	};
+	static char *doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"};
 
 	ctx = device_get_sysctl_ctx(sc->dev);
 
@@ -3199,58 +4733,104 @@
 	oid = device_get_sysctl_tree(sc->dev);
 	c0 = children = SYSCTL_CHILDREN(oid);
 
-	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD,
-	    &sc->params.nports, 0, "# of ports");
+	sc->sc_do_rxcopy = 1;
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "do_rx_copy", CTLFLAG_RW,
+	    &sc->sc_do_rxcopy, 1, "Do RX copy of small frames");
 
-	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
-	    &sc->params.rev, 0, "chip hardware revision");
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nports", CTLFLAG_RD, NULL,
+	    sc->params.nports, "# of ports");
 
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "doorbells",
+	    CTLTYPE_STRING | CTLFLAG_RD, doorbells, sc->doorbells,
+	    sysctl_bitfield, "A", "available doorbells");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD, NULL,
+	    sc->params.vpd.cclk, "core clock frequency (in KHz)");
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.timer_val,
+	    sizeof(sc->params.sge.timer_val), sysctl_int_array, "A",
+	    "interrupt holdoff timer values (us)");
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc->params.sge.counter_val,
+	    sizeof(sc->params.sge.counter_val), sysctl_int_array, "A",
+	    "interrupt holdoff packet counter values");
+
+	t4_sge_sysctls(sc, ctx, children);
+
+	sc->lro_timeout = 100;
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "lro_timeout", CTLFLAG_RW,
+	    &sc->lro_timeout, 0, "lro inactive-flush timeout (in us)");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dflags", CTLFLAG_RW,
+	    &sc->debug_flags, 0, "flags to enable runtime debugging");
+
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "tp_version",
+	    CTLFLAG_RD, sc->tp_version, 0, "TP microcode version");
+
 	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version",
-	    CTLFLAG_RD, &sc->fw_version, 0, "firmware version");
+	    CTLFLAG_RD, sc->fw_version, 0, "firmware version");
 
-	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
-	    CTLFLAG_RD, &sc->cfg_file, 0, "configuration file");
+	if (sc->flags & IS_VF)
+		return;
 
-	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD,
-	    &sc->cfcsum, 0, "config file checksum");
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "hw_revision", CTLFLAG_RD,
+	    NULL, chip_rev(sc), "chip hardware revision");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkcaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[0], sc->linkcaps,
-	    sysctl_bitfield, "A", "available link capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "sn",
+	    CTLFLAG_RD, sc->params.vpd.sn, 0, "serial number");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "niccaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[1], sc->niccaps,
-	    sysctl_bitfield, "A", "available NIC capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "pn",
+	    CTLFLAG_RD, sc->params.vpd.pn, 0, "part number");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "toecaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[2], sc->toecaps,
-	    sysctl_bitfield, "A", "available TCP offload capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "ec",
+	    CTLFLAG_RD, sc->params.vpd.ec, 0, "engineering change");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdmacaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[3], sc->rdmacaps,
-	    sysctl_bitfield, "A", "available RDMA capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "na",
+	    CTLFLAG_RD, sc->params.vpd.na, 0, "network address");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "iscsicaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[4], sc->iscsicaps,
-	    sysctl_bitfield, "A", "available iSCSI capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "er_version", CTLFLAG_RD,
+	    sc->er_version, 0, "expansion ROM version");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoecaps",
-	    CTLTYPE_STRING | CTLFLAG_RD, caps[5], sc->fcoecaps,
-	    sysctl_bitfield, "A", "available FCoE capabilities");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "bs_version", CTLFLAG_RD,
+	    sc->bs_version, 0, "bootstrap firmware version");
 
-	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "core_clock", CTLFLAG_RD,
-	    &sc->params.vpd.cclk, 0, "core clock frequency (in KHz)");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "scfg_version", CTLFLAG_RD,
+	    NULL, sc->params.scfg_vers, "serial config version");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_timers",
-	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.timer_val,
-	    sizeof(sc->sge.timer_val), sysctl_int_array, "A",
-	    "interrupt holdoff timer values (us)");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "vpd_version", CTLFLAG_RD,
+	    NULL, sc->params.vpd_vers, "VPD version");
 
-	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pkt_counts",
-	    CTLTYPE_STRING | CTLFLAG_RD, sc->sge.counter_val,
-	    sizeof(sc->sge.counter_val), sysctl_int_array, "A",
-	    "interrupt holdoff packet counter values");
+	SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "cf",
+	    CTLFLAG_RD, sc->cfg_file, 0, "configuration file");
 
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cfcsum", CTLFLAG_RD, NULL,
+	    sc->cfcsum, "config file checksum");
+
+#define SYSCTL_CAP(name, n, text) \
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, #name, \
+	    CTLTYPE_STRING | CTLFLAG_RD, caps_decoder[n], sc->name, \
+	    sysctl_bitfield, "A", "available " text " capabilities")
+
+	SYSCTL_CAP(nbmcaps, 0, "NBM");
+	SYSCTL_CAP(linkcaps, 1, "link");
+	SYSCTL_CAP(switchcaps, 2, "switch");
+	SYSCTL_CAP(niccaps, 3, "NIC");
+	SYSCTL_CAP(toecaps, 4, "TCP offload");
+	SYSCTL_CAP(rdmacaps, 5, "RDMA");
+	SYSCTL_CAP(iscsicaps, 6, "iSCSI");
+	SYSCTL_CAP(cryptocaps, 7, "crypto");
+	SYSCTL_CAP(fcoecaps, 8, "FCoE");
+#undef SYSCTL_CAP
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nfilters", CTLFLAG_RD,
+	    NULL, sc->tids.nftids, "number of filters");
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature", CTLTYPE_INT |
+	    CTLFLAG_RD, sc, 0, sysctl_temperature, "I",
+	    "chip temperature (in Celsius)");
+
 #ifdef SBUF_DRAIN
 	/*
 	 * dev.t4nex.X.misc.  Marked CTLFLAG_SKIP to avoid information overload.
@@ -3290,8 +4870,13 @@
 
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
-	    sysctl_cim_la, "A", "CIM logic analyzer");
+	    chip_id(sc) <= CHELSIO_T5 ? sysctl_cim_la : sysctl_cim_la_t6,
+	    "A", "CIM logic analyzer");
 
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+	    sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
+
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0 + CIM_NUM_IBQ,
 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
@@ -3316,6 +4901,20 @@
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 5 + CIM_NUM_IBQ,
 	    sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
 
+	if (chip_id(sc) > CHELSIO_T4) {
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 6 + CIM_NUM_IBQ,
+		    sysctl_cim_ibq_obq, "A", "CIM OBQ 6 (SGE0-RX)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 7 + CIM_NUM_IBQ,
+		    sysctl_cim_ibq_obq, "A", "CIM OBQ 7 (SGE1-RX)");
+	}
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+	    sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
+
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 	    sysctl_cim_qcfg, "A", "CIM queue configuration");
@@ -3326,7 +4925,7 @@
 
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ddp_stats",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
-	    sysctl_ddp_stats, "A", "DDP statistics");
+	    sysctl_ddp_stats, "A", "non-TCP DDP statistics");
 
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
@@ -3352,6 +4951,11 @@
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 	    sysctl_meminfo, "A", "memory regions");
 
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+	    chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
+	    "A", "MPS TCAM entries");
+
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 	    sysctl_path_mtus, "A", "path MTUs");
@@ -3376,9 +4980,27 @@
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 	    sysctl_tp_err_stats, "A", "TP error statistics");
 
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la_mask",
+	    CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_tp_la_mask, "I",
+	    "TP logic analyzer event capture mask");
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tp_la",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+	    sysctl_tp_la, "A", "TP logic analyzer");
+
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_rate",
 	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
 	    sysctl_tx_rate, "A", "Tx rate");
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "ulprx_la",
+	    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+	    sysctl_ulprx_la, "A", "ULPRX logic analyzer");
+
+	if (chip_id(sc) >= CHELSIO_T5) {
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "wcwr_stats",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
+		    sysctl_wcwr_stats, "A", "write combined work requests");
+	}
 #endif
 
 #ifdef TCP_OFFLOAD
@@ -3406,78 +5028,227 @@
 		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2));
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ddp_thres", CTLFLAG_RW,
 		    &sc->tt.ddp_thres, 0, "DDP threshold");
+
+		sc->tt.rx_coalesce = 1;
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
+		    CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
+
+		sc->tt.tx_align = 1;
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
+		    CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
+		    "TP timer tick (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timestamp_tick",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 1, sysctl_tp_tick, "A",
+		    "TCP timestamp tick (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_tick",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, 2, sysctl_tp_tick, "A",
+		    "DACK tick (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dack_timer",
+		    CTLTYPE_UINT | CTLFLAG_RD, sc, 0, sysctl_tp_dack_timer,
+		    "IU", "DACK timer (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_min",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MIN,
+		    sysctl_tp_timer, "LU", "Retransmit min (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rexmt_max",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_RXT_MAX,
+		    sysctl_tp_timer, "LU", "Retransmit max (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_min",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MIN,
+		    sysctl_tp_timer, "LU", "Persist timer min (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "persist_max",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_PERS_MAX,
+		    sysctl_tp_timer, "LU", "Persist timer max (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_idle",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_IDLE,
+		    sysctl_tp_timer, "LU", "Keepidle idle timer (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "keepalive_intvl",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_KEEP_INTVL,
+		    sysctl_tp_timer, "LU", "Keepidle interval (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "initial_srtt",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_INIT_SRTT,
+		    sysctl_tp_timer, "LU", "Initial SRTT (us)");
+
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "finwait2_timer",
+		    CTLTYPE_ULONG | CTLFLAG_RD, sc, A_TP_FINWAIT2_TIMER,
+		    sysctl_tp_timer, "LU", "FINWAIT2 timer (us)");
 	}
 #endif
-
-
-	return (0);
 }
 
-static int
-cxgbe_sysctls(struct port_info *pi)
+void
+vi_sysctls(struct vi_info *vi)
 {
 	struct sysctl_ctx_list *ctx;
 	struct sysctl_oid *oid;
 	struct sysctl_oid_list *children;
 
-	ctx = device_get_sysctl_ctx(pi->dev);
+	ctx = device_get_sysctl_ctx(vi->dev);
 
 	/*
-	 * dev.cxgbe.X.
+	 * dev.v?(cxgbe|cxl).X.
 	 */
-	oid = device_get_sysctl_tree(pi->dev);
+	oid = device_get_sysctl_tree(vi->dev);
 	children = SYSCTL_CHILDREN(oid);
 
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "viid", CTLFLAG_RD, NULL,
+	    vi->viid, "VI identifer");
 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nrxq", CTLFLAG_RD,
-	    &pi->nrxq, 0, "# of rx queues");
+	    &vi->nrxq, 0, "# of rx queues");
 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "ntxq", CTLFLAG_RD,
-	    &pi->ntxq, 0, "# of tx queues");
+	    &vi->ntxq, 0, "# of tx queues");
 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_rxq", CTLFLAG_RD,
-	    &pi->first_rxq, 0, "index of first rx queue");
+	    &vi->first_rxq, 0, "index of first rx queue");
 	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_txq", CTLFLAG_RD,
-	    &pi->first_txq, 0, "index of first tx queue");
+	    &vi->first_txq, 0, "index of first tx queue");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rss_size", CTLFLAG_RD, NULL,
+	    vi->rss_size, "size of RSS indirection table");
 
+	if (IS_MAIN_VI(vi)) {
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rsrv_noflowq",
+		    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_noflowq, "IU",
+		    "Reserve queue 0 for non-flowid packets");
+	}
+
 #ifdef TCP_OFFLOAD
-	if (is_offload(pi->adapter)) {
+	if (vi->nofldrxq != 0) {
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldrxq", CTLFLAG_RD,
-		    &pi->nofldrxq, 0,
+		    &vi->nofldrxq, 0,
 		    "# of rx queues for offloaded TCP connections");
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nofldtxq", CTLFLAG_RD,
-		    &pi->nofldtxq, 0,
+		    &vi->nofldtxq, 0,
 		    "# of tx queues for offloaded TCP connections");
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_rxq",
-		    CTLFLAG_RD, &pi->first_ofld_rxq, 0,
+		    CTLFLAG_RD, &vi->first_ofld_rxq, 0,
 		    "index of first TOE rx queue");
 		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_ofld_txq",
-		    CTLFLAG_RD, &pi->first_ofld_txq, 0,
+		    CTLFLAG_RD, &vi->first_ofld_txq, 0,
 		    "index of first TOE tx queue");
 	}
 #endif
+#ifdef DEV_NETMAP
+	if (vi->nnmrxq != 0) {
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmrxq", CTLFLAG_RD,
+		    &vi->nnmrxq, 0, "# of netmap rx queues");
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "nnmtxq", CTLFLAG_RD,
+		    &vi->nnmtxq, 0, "# of netmap tx queues");
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_rxq",
+		    CTLFLAG_RD, &vi->first_nm_rxq, 0,
+		    "index of first netmap rx queue");
+		SYSCTL_ADD_INT(ctx, children, OID_AUTO, "first_nm_txq",
+		    CTLFLAG_RD, &vi->first_nm_txq, 0,
+		    "index of first netmap tx queue");
+	}
+#endif
 
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_tmr_idx",
-	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_tmr_idx, "I",
+	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_tmr_idx, "I",
 	    "holdoff timer index");
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "holdoff_pktc_idx",
-	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_holdoff_pktc_idx, "I",
+	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_holdoff_pktc_idx, "I",
 	    "holdoff packet counter index");
 
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_rxq",
-	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_rxq, "I",
+	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_rxq, "I",
 	    "rx queue size");
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "qsize_txq",
-	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_qsize_txq, "I",
+	    CTLTYPE_INT | CTLFLAG_RW, vi, 0, sysctl_qsize_txq, "I",
 	    "tx queue size");
+}
 
+static void
+cxgbe_sysctls(struct port_info *pi)
+{
+	struct sysctl_ctx_list *ctx;
+	struct sysctl_oid *oid;
+	struct sysctl_oid_list *children, *children2;
+	struct adapter *sc = pi->adapter;
+	int i;
+	char name[16];
+
+	ctx = device_get_sysctl_ctx(pi->dev);
+
 	/*
+	 * dev.cxgbe.X.
+	 */
+	oid = device_get_sysctl_tree(pi->dev);
+	children = SYSCTL_CHILDREN(oid);
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "linkdnrc", CTLTYPE_STRING |
+	   CTLFLAG_RD, pi, 0, sysctl_linkdnrc, "A", "reason why link is down");
+	if (pi->port_type == FW_PORT_TYPE_BT_XAUI) {
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "temperature",
+		    CTLTYPE_INT | CTLFLAG_RD, pi, 0, sysctl_btphy, "I",
+		    "PHY temperature (in Celsius)");
+		SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fw_version",
+		    CTLTYPE_INT | CTLFLAG_RD, pi, 1, sysctl_btphy, "I",
+		    "PHY firmware version");
+	}
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pause_settings",
+	    CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_pause_settings, "A",
+	    "PAUSE settings (bit 0 = rx_pause, bit 1 = tx_pause)");
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fec",
+	    CTLTYPE_STRING | CTLFLAG_RW, pi, 0, sysctl_fec, "A",
+	    "Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "autoneg",
+	    CTLTYPE_INT | CTLFLAG_RW, pi, 0, sysctl_autoneg, "I",
+	    "autonegotiation (-1 = not supported)");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "max_speed", CTLFLAG_RD, NULL,
+	    port_top_speed(pi), "max speed (in Gbps)");
+
+	if (sc->flags & IS_VF)
+		return;
+
+	/*
+	 * dev.(cxgbe|cxl).X.tc.
+	 */
+	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "tc", CTLFLAG_RD, NULL,
+	    "Tx scheduler traffic classes (cl_rl)");
+	for (i = 0; i < sc->chip_params->nsched_cls; i++) {
+		struct tx_cl_rl_params *tc = &pi->sched_params->cl_rl[i];
+
+		snprintf(name, sizeof(name), "%d", i);
+		children2 = SYSCTL_CHILDREN(SYSCTL_ADD_NODE(ctx,
+		    SYSCTL_CHILDREN(oid), OID_AUTO, name, CTLFLAG_RD, NULL,
+		    "traffic class"));
+		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "flags", CTLFLAG_RD,
+		    &tc->flags, 0, "flags");
+		SYSCTL_ADD_UINT(ctx, children2, OID_AUTO, "refcount",
+		    CTLFLAG_RD, &tc->refcount, 0, "references to this class");
+#ifdef SBUF_DRAIN
+		SYSCTL_ADD_PROC(ctx, children2, OID_AUTO, "params",
+		    CTLTYPE_STRING | CTLFLAG_RD, sc, (pi->port_id << 16) | i,
+		    sysctl_tc_params, "A", "traffic class parameters");
+#endif
+	}
+
+	/*
 	 * dev.cxgbe.X.stats.
 	 */
 	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD,
 	    NULL, "port statistics");
 	children = SYSCTL_CHILDREN(oid);
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "tx_parse_error", CTLFLAG_RD,
+	    &pi->tx_parse_error, 0,
+	    "# of tx packets with invalid length or # of segments");
 
 #define SYSCTL_ADD_T4_REG64(pi, name, desc, reg) \
 	SYSCTL_ADD_OID(ctx, children, OID_AUTO, name, \
-	    CTLTYPE_U64 | CTLFLAG_RD, pi->adapter, reg, \
+	    CTLTYPE_U64 | CTLFLAG_RD, sc, reg, \
 	    sysctl_handle_t4_reg64, "QU", desc)
 
 	SYSCTL_ADD_T4_REG64(pi, "tx_octets", "# of octets in good frames",
@@ -3623,20 +5394,21 @@
 	    "# of buffer-group 3 truncated packets");
 
 #undef SYSCTL_ADD_T4_PORTSTAT
-
-	return (0);
 }
 
 static int
 sysctl_int_array(SYSCTL_HANDLER_ARGS)
 {
-	int rc, *i;
+	int rc, *i, space = 0;
 	struct sbuf sb;
 
 	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
-	for (i = arg1; arg2; arg2 -= sizeof(int), i++)
-		sbuf_printf(&sb, "%d ", *i);
-	sbuf_trim(&sb);
+	for (i = arg1; arg2; arg2 -= sizeof(int), i++) {
+		if (space)
+			sbuf_printf(&sb, " ");
+		sbuf_printf(&sb, "%d", *i);
+		space = 1;
+	}
 	sbuf_finish(&sb);
 	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
 	sbuf_delete(&sb);
@@ -3665,15 +5437,62 @@
 }
 
 static int
-sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
+sysctl_btphy(SYSCTL_HANDLER_ARGS)
 {
 	struct port_info *pi = arg1;
+	int op = arg2;
 	struct adapter *sc = pi->adapter;
+	u_int v;
+	int rc;
+
+	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
+	if (rc)
+		return (rc);
+	/* XXX: magic numbers */
+	rc = -t4_mdio_rd(sc, sc->mbox, pi->mdio_addr, 0x1e, op ? 0x20 : 0xc820,
+	    &v);
+	end_synchronized_op(sc, 0);
+	if (rc)
+		return (rc);
+	if (op == 0)
+		v /= 256;
+
+	rc = sysctl_handle_int(oidp, &v, 0, req);
+	return (rc);
+}
+
+static int
+sysctl_noflowq(SYSCTL_HANDLER_ARGS)
+{
+	struct vi_info *vi = arg1;
+	int rc, val;
+
+	val = vi->rsrv_noflowq;
+	rc = sysctl_handle_int(oidp, &val, 0, req);
+	if (rc != 0 || req->newptr == NULL)
+		return (rc);
+
+	if ((val >= 1) && (vi->ntxq > 1))
+		vi->rsrv_noflowq = 1;
+	else
+		vi->rsrv_noflowq = 0;
+
+	return (rc);
+}
+
+static int
+sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_ARGS)
+{
+	struct vi_info *vi = arg1;
+	struct adapter *sc = vi->pi->adapter;
 	int idx, rc, i;
 	struct sge_rxq *rxq;
+#ifdef TCP_OFFLOAD
+	struct sge_ofld_rxq *ofld_rxq;
+#endif
 	uint8_t v;
 
-	idx = pi->tmr_idx;
+	idx = vi->tmr_idx;
 
 	rc = sysctl_handle_int(oidp, &idx, 0, req);
 	if (rc != 0 || req->newptr == NULL)
@@ -3682,13 +5501,13 @@
 	if (idx < 0 || idx >= SGE_NTIMERS)
 		return (EINVAL);
 
-	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 	    "t4tmr");
 	if (rc)
 		return (rc);
 
-	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
-	for_each_rxq(pi, i, rxq) {
+	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(vi->pktc_idx != -1);
+	for_each_rxq(vi, i, rxq) {
 #ifdef atomic_store_rel_8
 		atomic_store_rel_8(&rxq->iq.intr_params, v);
 #else
@@ -3695,7 +5514,16 @@
 		rxq->iq.intr_params = v;
 #endif
 	}
-	pi->tmr_idx = idx;
+#ifdef TCP_OFFLOAD
+	for_each_ofld_rxq(vi, i, ofld_rxq) {
+#ifdef atomic_store_rel_8
+		atomic_store_rel_8(&ofld_rxq->iq.intr_params, v);
+#else
+		ofld_rxq->iq.intr_params = v;
+#endif
+	}
+#endif
+	vi->tmr_idx = idx;
 
 	end_synchronized_op(sc, LOCK_HELD);
 	return (0);
@@ -3704,11 +5532,11 @@
 static int
 sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_ARGS)
 {
-	struct port_info *pi = arg1;
-	struct adapter *sc = pi->adapter;
+	struct vi_info *vi = arg1;
+	struct adapter *sc = vi->pi->adapter;
 	int idx, rc;
 
-	idx = pi->pktc_idx;
+	idx = vi->pktc_idx;
 
 	rc = sysctl_handle_int(oidp, &idx, 0, req);
 	if (rc != 0 || req->newptr == NULL)
@@ -3717,15 +5545,15 @@
 	if (idx < -1 || idx >= SGE_NCOUNTERS)
 		return (EINVAL);
 
-	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 	    "t4pktc");
 	if (rc)
 		return (rc);
 
-	if (pi->flags & PORT_INIT_DONE)
+	if (vi->flags & VI_INIT_DONE)
 		rc = EBUSY; /* cannot be changed once the queues are created */
 	else
-		pi->pktc_idx = idx;
+		vi->pktc_idx = idx;
 
 	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
@@ -3734,11 +5562,11 @@
 static int
 sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
 {
-	struct port_info *pi = arg1;
-	struct adapter *sc = pi->adapter;
+	struct vi_info *vi = arg1;
+	struct adapter *sc = vi->pi->adapter;
 	int qsize, rc;
 
-	qsize = pi->qsize_rxq;
+	qsize = vi->qsize_rxq;
 
 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
 	if (rc != 0 || req->newptr == NULL)
@@ -3747,15 +5575,15 @@
 	if (qsize < 128 || (qsize & 7))
 		return (EINVAL);
 
-	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 	    "t4rxqs");
 	if (rc)
 		return (rc);
 
-	if (pi->flags & PORT_INIT_DONE)
+	if (vi->flags & VI_INIT_DONE)
 		rc = EBUSY; /* cannot be changed once the queues are created */
 	else
-		pi->qsize_rxq = qsize;
+		vi->qsize_rxq = qsize;
 
 	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
@@ -3764,29 +5592,28 @@
 static int
 sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
 {
-	struct port_info *pi = arg1;
-	struct adapter *sc = pi->adapter;
+	struct vi_info *vi = arg1;
+	struct adapter *sc = vi->pi->adapter;
 	int qsize, rc;
 
-	qsize = pi->qsize_txq;
+	qsize = vi->qsize_txq;
 
 	rc = sysctl_handle_int(oidp, &qsize, 0, req);
 	if (rc != 0 || req->newptr == NULL)
 		return (rc);
 
-	/* bufring size must be powerof2 */
-	if (qsize < 128 || !powerof2(qsize))
+	if (qsize < 128 || qsize > 65536)
 		return (EINVAL);
 
-	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	rc = begin_synchronized_op(sc, vi, HOLD_LOCK | SLEEP_OK | INTR_OK,
 	    "t4txqs");
 	if (rc)
 		return (rc);
 
-	if (pi->flags & PORT_INIT_DONE)
+	if (vi->flags & VI_INIT_DONE)
 		rc = EBUSY; /* cannot be changed once the queues are created */
 	else
-		pi->qsize_txq = qsize;
+		vi->qsize_txq = qsize;
 
 	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
@@ -3793,6 +5620,160 @@
 }
 
 static int
+sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
+{
+	struct port_info *pi = arg1;
+	struct adapter *sc = pi->adapter;
+	struct link_config *lc = &pi->link_cfg;
+	int rc;
+
+	if (req->newptr == NULL) {
+		struct sbuf *sb;
+		static char *bits = "\20\1PAUSE_RX\2PAUSE_TX";
+
+		rc = sysctl_wire_old_buffer(req, 0);
+		if (rc != 0)
+			return(rc);
+
+		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+		if (sb == NULL)
+			return (ENOMEM);
+
+		sbuf_printf(sb, "%b", lc->fc & (PAUSE_TX | PAUSE_RX), bits);
+		rc = sbuf_finish(sb);
+		sbuf_delete(sb);
+	} else {
+		char s[2];
+		int n;
+
+		s[0] = '0' + (lc->requested_fc & (PAUSE_TX | PAUSE_RX));
+		s[1] = 0;
+
+		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
+		if (rc != 0)
+			return(rc);
+
+		if (s[1] != 0)
+			return (EINVAL);
+		if (s[0] < '0' || s[0] > '9')
+			return (EINVAL);	/* not a number */
+		n = s[0] - '0';
+		if (n & ~(PAUSE_TX | PAUSE_RX))
+			return (EINVAL);	/* some other bit is set too */
+
+		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
+		    "t4PAUSE");
+		if (rc)
+			return (rc);
+		if ((lc->requested_fc & (PAUSE_TX | PAUSE_RX)) != n) {
+			lc->requested_fc &= ~(PAUSE_TX | PAUSE_RX);
+			lc->requested_fc |= n;
+			rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+		}
+		end_synchronized_op(sc, 0);
+	}
+
+	return (rc);
+}
+
+static int
+sysctl_fec(SYSCTL_HANDLER_ARGS)
+{
+	struct port_info *pi = arg1;
+	struct adapter *sc = pi->adapter;
+	struct link_config *lc = &pi->link_cfg;
+	int rc;
+
+	if (req->newptr == NULL) {
+		struct sbuf *sb;
+		static char *bits = "\20\1RS\2BASER_RS\3RESERVED";
+
+		rc = sysctl_wire_old_buffer(req, 0);
+		if (rc != 0)
+			return(rc);
+
+		sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+		if (sb == NULL)
+			return (ENOMEM);
+
+		sbuf_printf(sb, "%b", lc->fec & M_FW_PORT_CAP_FEC, bits);
+		rc = sbuf_finish(sb);
+		sbuf_delete(sb);
+	} else {
+		char s[2];
+		int n;
+
+		s[0] = '0' + (lc->requested_fec & M_FW_PORT_CAP_FEC);
+		s[1] = 0;
+
+		rc = sysctl_handle_string(oidp, s, sizeof(s), req);
+		if (rc != 0)
+			return(rc);
+
+		if (s[1] != 0)
+			return (EINVAL);
+		if (s[0] < '0' || s[0] > '9')
+			return (EINVAL);	/* not a number */
+		n = s[0] - '0';
+		if (n & ~M_FW_PORT_CAP_FEC)
+			return (EINVAL);	/* some other bit is set too */
+
+		rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
+		    "t4fec");
+		if (rc)
+			return (rc);
+		if ((lc->requested_fec & M_FW_PORT_CAP_FEC) != n) {
+			lc->requested_fec = n &
+			    G_FW_PORT_CAP_FEC(lc->supported);
+			rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+		}
+		end_synchronized_op(sc, 0);
+	}
+
+	return (rc);
+}
+
+static int
+sysctl_autoneg(SYSCTL_HANDLER_ARGS)
+{
+	struct port_info *pi = arg1;
+	struct adapter *sc = pi->adapter;
+	struct link_config *lc = &pi->link_cfg;
+	int rc, val, old;
+
+	if (lc->supported & FW_PORT_CAP_ANEG)
+		val = lc->autoneg == AUTONEG_ENABLE ? 1 : 0;
+	else
+		val = -1;
+	rc = sysctl_handle_int(oidp, &val, 0, req);
+	if (rc != 0 || req->newptr == NULL)
+		return (rc);
+	if ((lc->supported & FW_PORT_CAP_ANEG) == 0)
+		return (ENOTSUP);
+
+	if (val == 0)
+		val = AUTONEG_DISABLE;
+	else if (val == 1)
+		val = AUTONEG_ENABLE;
+	else
+		return (EINVAL);
+	if (lc->autoneg == val)
+		return (0);	/* no change */
+
+	rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK,
+	    "t4aneg");
+	if (rc)
+		return (rc);
+	old = lc->autoneg;
+	lc->autoneg = val;
+	rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
+	if (rc != 0)
+		lc->autoneg = old;
+	end_synchronized_op(sc, 0);
+	return (rc);
+}
+
+static int
 sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
 {
 	struct adapter *sc = arg1;
@@ -3804,6 +5785,31 @@
 	return (sysctl_handle_64(oidp, &val, 0, req));
 }
 
+static int
+sysctl_temperature(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	int rc, t;
+	uint32_t param, val;
+
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
+	if (rc)
+		return (rc);
+	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
+	    V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_DIAG_TMP);
+	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+	end_synchronized_op(sc, 0);
+	if (rc)
+		return (rc);
+
+	/* unknown is returned as 0 but we display -1 in that case */
+	t = val == 0 ? -1 : val;
+
+	rc = sysctl_handle_int(oidp, &t, 0, req);
+	return (rc);
+}
+
 #ifdef SBUF_DRAIN
 static int
 sysctl_cctrl(SYSCTL_HANDLER_ARGS)
@@ -3843,9 +5849,10 @@
 	return (rc);
 }
 
-static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ] = {
+static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
 	"TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI",	/* ibq's */
-	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI"	/* obq's */
+	"ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI",	/* obq's */
+	"SGE0-RX", "SGE1-RX"	/* additional obq's (T5 onwards) */
 };
 
 static int
@@ -3856,8 +5863,9 @@
 	int rc, i, n, qid = arg2;
 	uint32_t *buf, *p;
 	char *qtype;
+	u_int cim_num_obq = sc->chip_params->cim_num_obq;
 
-	KASSERT(qid >= 0 && qid < nitems(qname),
+	KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
 	    ("%s: bad qid %d\n", __func__, qid));
 
 	if (qid < CIM_NUM_IBQ) {
@@ -3870,7 +5878,7 @@
 		/* outbound queue */
 		qtype = "OBQ";
 		qid -= CIM_NUM_IBQ;
-		n = 4 * 6 * CIM_OBQ_SIZE;
+		n = 4 * cim_num_obq * CIM_OBQ_SIZE;
 		buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
 		rc = t4_read_cim_obq(sc, qid, buf, n);
 	}
@@ -3885,7 +5893,7 @@
 	if (rc != 0)
 		goto done;
 
-	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
 	if (sb == NULL) {
 		rc = ENOMEM;
 		goto done;
@@ -3912,6 +5920,8 @@
 	uint32_t *buf, *p;
 	int rc;
 
+	MPASS(chip_id(sc) <= CHELSIO_T5);
+
 	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
 	if (rc != 0)
 		return (rc);
@@ -3935,10 +5945,7 @@
 	    cfg & F_UPDBGLACAPTPCONLY ? "" :
 	    "     LS0Stat  LS0Addr             LS0Data");
 
-	KASSERT((sc->params.cim_la_size & 7) == 0,
-	    ("%s: p will walk off the end of buf", __func__));
-
-	for (p = buf; p < &buf[sc->params.cim_la_size]; p += 8) {
+	for (p = buf; p <= &buf[sc->params.cim_la_size - 8]; p += 8) {
 		if (cfg & F_UPDBGLACAPTPCONLY) {
 			sbuf_printf(sb, "\n  %02x   %08x %08x", p[5] & 0xff,
 			    p[6], p[7]);
@@ -3966,21 +5973,180 @@
 }
 
 static int
+sysctl_cim_la_t6(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	u_int cfg;
+	struct sbuf *sb;
+	uint32_t *buf, *p;
+	int rc;
+
+	MPASS(chip_id(sc) > CHELSIO_T5);
+
+	rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+	if (rc != 0)
+		return (rc);
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	buf = malloc(sc->params.cim_la_size * sizeof(uint32_t), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	rc = -t4_cim_read_la(sc, buf, NULL);
+	if (rc != 0)
+		goto done;
+
+	sbuf_printf(sb, "Status   Inst    Data      PC%s",
+	    cfg & F_UPDBGLACAPTPCONLY ? "" :
+	    "     LS0Stat  LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data");
+
+	for (p = buf; p <= &buf[sc->params.cim_la_size - 10]; p += 10) {
+		if (cfg & F_UPDBGLACAPTPCONLY) {
+			sbuf_printf(sb, "\n  %02x   %08x %08x %08x",
+			    p[3] & 0xff, p[2], p[1], p[0]);
+			sbuf_printf(sb, "\n  %02x   %02x%06x %02x%06x %02x%06x",
+			    (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+			    p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x",
+			    (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+			    p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+			    p[6] >> 16);
+		} else {
+			sbuf_printf(sb, "\n  %02x   %04x%04x %04x%04x %04x%04x "
+			    "%08x %08x %08x %08x %08x %08x",
+			    (p[9] >> 16) & 0xff,
+			    p[9] & 0xffff, p[8] >> 16,
+			    p[8] & 0xffff, p[7] >> 16,
+			    p[7] & 0xffff, p[6] >> 16,
+			    p[2], p[1], p[0], p[5], p[4], p[3]);
+		}
+	}
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+done:
+	free(buf, M_CXGBE);
+	return (rc);
+}
+
+static int
+sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	u_int i;
+	struct sbuf *sb;
+	uint32_t *buf, *p;
+	int rc;
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	t4_cim_read_ma_la(sc, buf, buf + 5 * CIM_MALA_SIZE);
+	p = buf;
+
+	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
+		sbuf_printf(sb, "\n%02x%08x%08x%08x%08x", p[4], p[3], p[2],
+		    p[1], p[0]);
+	}
+
+	sbuf_printf(sb, "\n\nCnt ID Tag UE       Data       RDY VLD");
+	for (i = 0; i < CIM_MALA_SIZE; i++, p += 5) {
+		sbuf_printf(sb, "\n%3u %2u  %x   %u %08x%08x  %u   %u",
+		    (p[2] >> 10) & 0xff, (p[2] >> 7) & 7,
+		    (p[2] >> 3) & 0xf, (p[2] >> 2) & 1,
+		    (p[1] >> 2) | ((p[2] & 3) << 30),
+		    (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1,
+		    p[0] & 1);
+	}
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+	free(buf, M_CXGBE);
+	return (rc);
+}
+
+static int
+sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	u_int i;
+	struct sbuf *sb;
+	uint32_t *buf, *p;
+	int rc;
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	t4_cim_read_pif_la(sc, buf, buf + 6 * CIM_PIFLA_SIZE, NULL, NULL);
+	p = buf;
+
+	sbuf_printf(sb, "Cntl ID DataBE   Addr                 Data");
+	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
+		sbuf_printf(sb, "\n %02x  %02x  %04x  %08x %08x%08x%08x%08x",
+		    (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, p[5] & 0xffff,
+		    p[4], p[3], p[2], p[1], p[0]);
+	}
+
+	sbuf_printf(sb, "\n\nCntl ID               Data");
+	for (i = 0; i < CIM_PIFLA_SIZE; i++, p += 6) {
+		sbuf_printf(sb, "\n %02x  %02x %08x%08x%08x%08x",
+		    (p[4] >> 6) & 0xff, p[4] & 0x3f, p[3], p[2], p[1], p[0]);
+	}
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+	free(buf, M_CXGBE);
+	return (rc);
+}
+
+static int
 sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
 {
 	struct adapter *sc = arg1;
 	struct sbuf *sb;
 	int rc, i;
-	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ];
-	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ];
+	uint16_t base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
+	uint16_t size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5];
 	uint16_t thres[CIM_NUM_IBQ];
-	uint32_t obq_wr[2 * CIM_NUM_OBQ], *wr = obq_wr;
-	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ)], *p = stat;
+	uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
+	uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
+	u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
 
-	rc = -t4_cim_read(sc, A_UP_IBQ_0_RDADDR, nitems(stat), stat);
+	cim_num_obq = sc->chip_params->cim_num_obq;
+	if (is_t4(sc)) {
+		ibq_rdaddr = A_UP_IBQ_0_RDADDR;
+		obq_rdaddr = A_UP_OBQ_0_REALADDR;
+	} else {
+		ibq_rdaddr = A_UP_IBQ_0_SHADOW_RDADDR;
+		obq_rdaddr = A_UP_OBQ_0_SHADOW_REALADDR;
+	}
+	nq = CIM_NUM_IBQ + cim_num_obq;
+
+	rc = -t4_cim_read(sc, ibq_rdaddr, 4 * nq, stat);
 	if (rc == 0)
-		rc = -t4_cim_read(sc, A_UP_OBQ_0_REALADDR, nitems(obq_wr),
-		    obq_wr);
+		rc = -t4_cim_read(sc, obq_rdaddr, 2 * cim_num_obq, obq_wr);
 	if (rc != 0)
 		return (rc);
 
@@ -3990,19 +6156,20 @@
 	if (rc != 0)
 		return (rc);
 
-	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
 	if (sb == NULL)
 		return (ENOMEM);
 
-	sbuf_printf(sb, "Queue  Base  Size Thres RdPtr WrPtr  SOP  EOP Avail");
+	sbuf_printf(sb,
+	    "  Queue  Base  Size Thres  RdPtr WrPtr  SOP  EOP Avail");
 
 	for (i = 0; i < CIM_NUM_IBQ; i++, p += 4)
-		sbuf_printf(sb, "\n%5s %5x %5u %4u %6x  %4x %4u %4u %5u",
+		sbuf_printf(sb, "\n%7s %5x %5u %5u %6x  %4x %4u %4u %5u",
 		    qname[i], base[i], size[i], thres[i], G_IBQRDADDR(p[0]),
 		    G_IBQWRADDR(p[1]), G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
 		    G_QUEREMFLITS(p[2]) * 16);
-	for ( ; i < CIM_NUM_IBQ + CIM_NUM_OBQ; i++, p += 4, wr += 2)
-		sbuf_printf(sb, "\n%5s %5x %5u %11x  %4x %4u %4u %5u", qname[i],
+	for ( ; i < nq; i++, p += 4, wr += 2)
+		sbuf_printf(sb, "\n%7s %5x %5u %12x  %4x %4u %4u %5u", qname[i],
 		    base[i], size[i], G_QUERDADDR(p[0]) & 0x3fff,
 		    wr[0] - base[i], G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
 		    G_QUEREMFLITS(p[2]) * 16);
@@ -4029,14 +6196,24 @@
 	if (sb == NULL)
 		return (ENOMEM);
 
+	mtx_lock(&sc->reg_lock);
 	t4_tp_get_cpl_stats(sc, &stats);
+	mtx_unlock(&sc->reg_lock);
 
-	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
-	    "channel 3\n");
-	sbuf_printf(sb, "CPL requests:   %10u %10u %10u %10u\n",
-		   stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
-	sbuf_printf(sb, "CPL responses:  %10u %10u %10u %10u",
-		   stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
+	if (sc->chip_params->nchan > 2) {
+		sbuf_printf(sb, "                 channel 0  channel 1"
+		    "  channel 2  channel 3");
+		sbuf_printf(sb, "\nCPL requests:   %10u %10u %10u %10u",
+		    stats.req[0], stats.req[1], stats.req[2], stats.req[3]);
+		sbuf_printf(sb, "\nCPL responses:   %10u %10u %10u %10u",
+		    stats.rsp[0], stats.rsp[1], stats.rsp[2], stats.rsp[3]);
+	} else {
+		sbuf_printf(sb, "                 channel 0  channel 1");
+		sbuf_printf(sb, "\nCPL requests:   %10u %10u",
+		    stats.req[0], stats.req[1]);
+		sbuf_printf(sb, "\nCPL responses:   %10u %10u",
+		    stats.rsp[0], stats.rsp[1]);
+	}
 
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
@@ -4072,7 +6249,7 @@
 	return (rc);
 }
 
-const char *devlog_level_strings[] = {
+static const char * const devlog_level_strings[] = {
 	[FW_DEVLOG_LEVEL_EMERG]		= "EMERG",
 	[FW_DEVLOG_LEVEL_CRIT]		= "CRIT",
 	[FW_DEVLOG_LEVEL_ERR]		= "ERR",
@@ -4081,8 +6258,9 @@
 	[FW_DEVLOG_LEVEL_DEBUG]		= "DEBUG"
 };
 
-const char *devlog_facility_strings[] = {
+static const char * const devlog_facility_strings[] = {
 	[FW_DEVLOG_FACILITY_CORE]	= "CORE",
+	[FW_DEVLOG_FACILITY_CF]		= "CF",
 	[FW_DEVLOG_FACILITY_SCHED]	= "SCHED",
 	[FW_DEVLOG_FACILITY_TIMER]	= "TIMER",
 	[FW_DEVLOG_FACILITY_RES]	= "RES",
@@ -4104,7 +6282,8 @@
 	[FW_DEVLOG_FACILITY_ISCSI]	= "ISCSI",
 	[FW_DEVLOG_FACILITY_FCOE]	= "FCOE",
 	[FW_DEVLOG_FACILITY_FOISCSI]	= "FOISCSI",
-	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE"
+	[FW_DEVLOG_FACILITY_FOFCOE]	= "FOFCOE",
+	[FW_DEVLOG_FACILITY_CHNET]	= "CHNET",
 };
 
 static int
@@ -4117,20 +6296,18 @@
 	struct sbuf *sb;
 	uint64_t ftstamp = UINT64_MAX;
 
-	if (dparams->start == 0)
+	if (dparams->addr == 0)
 		return (ENXIO);
 
-	nentries = dparams->size / sizeof(struct fw_devlog_e);
-
 	buf = malloc(dparams->size, M_CXGBE, M_NOWAIT);
 	if (buf == NULL)
 		return (ENOMEM);
 
-	rc = -t4_mem_read(sc, dparams->memtype, dparams->start, dparams->size,
-	    (void *)buf);
+	rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf, dparams->size);
 	if (rc != 0)
 		goto done;
 
+	nentries = dparams->size / sizeof(struct fw_devlog_e);
 	for (i = 0; i < nentries; i++) {
 		e = &buf[i];
 
@@ -4196,7 +6373,8 @@
 	struct adapter *sc = arg1;
 	struct sbuf *sb;
 	int rc;
-	struct tp_fcoe_stats stats[4];
+	struct tp_fcoe_stats stats[MAX_NCHAN];
+	int i, nchan = sc->chip_params->nchan;
 
 	rc = sysctl_wire_old_buffer(req, 0);
 	if (rc != 0)
@@ -4206,21 +6384,30 @@
 	if (sb == NULL)
 		return (ENOMEM);
 
-	t4_get_fcoe_stats(sc, 0, &stats[0]);
-	t4_get_fcoe_stats(sc, 1, &stats[1]);
-	t4_get_fcoe_stats(sc, 2, &stats[2]);
-	t4_get_fcoe_stats(sc, 3, &stats[3]);
+	for (i = 0; i < nchan; i++)
+		t4_get_fcoe_stats(sc, i, &stats[i]);
 
-	sbuf_printf(sb, "                   channel 0        channel 1        "
-	    "channel 2        channel 3\n");
-	sbuf_printf(sb, "octetsDDP:  %16ju %16ju %16ju %16ju\n",
-	    stats[0].octetsDDP, stats[1].octetsDDP, stats[2].octetsDDP,
-	    stats[3].octetsDDP);
-	sbuf_printf(sb, "framesDDP:  %16u %16u %16u %16u\n", stats[0].framesDDP,
-	    stats[1].framesDDP, stats[2].framesDDP, stats[3].framesDDP);
-	sbuf_printf(sb, "framesDrop: %16u %16u %16u %16u",
-	    stats[0].framesDrop, stats[1].framesDrop, stats[2].framesDrop,
-	    stats[3].framesDrop);
+	if (nchan > 2) {
+		sbuf_printf(sb, "                   channel 0        channel 1"
+		    "        channel 2        channel 3");
+		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju %16ju %16ju",
+		    stats[0].octets_ddp, stats[1].octets_ddp,
+		    stats[2].octets_ddp, stats[3].octets_ddp);
+		sbuf_printf(sb, "\nframesDDP:  %16u %16u %16u %16u",
+		    stats[0].frames_ddp, stats[1].frames_ddp,
+		    stats[2].frames_ddp, stats[3].frames_ddp);
+		sbuf_printf(sb, "\nframesDrop: %16u %16u %16u %16u",
+		    stats[0].frames_drop, stats[1].frames_drop,
+		    stats[2].frames_drop, stats[3].frames_drop);
+	} else {
+		sbuf_printf(sb, "                   channel 0        channel 1");
+		sbuf_printf(sb, "\noctetsDDP:  %16ju %16ju",
+		    stats[0].octets_ddp, stats[1].octets_ddp);
+		sbuf_printf(sb, "\nframesDDP:  %16u %16u",
+		    stats[0].frames_ddp, stats[1].frames_ddp);
+		sbuf_printf(sb, "\nframesDrop: %16u %16u",
+		    stats[0].frames_drop, stats[1].frames_drop);
+	}
 
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
@@ -4306,7 +6493,7 @@
 
 	memset(s, 0, sizeof(s));
 
-	for (i = 0; i < 4; i += 2) {
+	for (i = 0; i < sc->chip_params->nchan; i += 2) {
 		t4_get_lb_stats(sc, i, &s[0]);
 		t4_get_lb_stats(sc, i + 1, &s[1]);
 
@@ -4326,6 +6513,32 @@
 	return (rc);
 }
 
+static int
+sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
+{
+	int rc = 0;
+	struct port_info *pi = arg1;
+	struct link_config *lc = &pi->link_cfg;
+	struct sbuf *sb;
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return(rc);
+	sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	if (lc->link_ok || lc->link_down_rc == 255)
+		sbuf_printf(sb, "n/a");
+	else
+		sbuf_printf(sb, "%s", t4_link_down_rc_str(lc->link_down_rc));
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+
+	return (rc);
+}
+
 struct mem_desc {
 	unsigned int base;
 	unsigned int limit;
@@ -4345,6 +6558,9 @@
 {
 	unsigned int size;
 
+	if (from == to)
+		return;
+
 	size = to - from + 1;
 	if (size == 0)
 		return;
@@ -4359,17 +6575,18 @@
 	struct adapter *sc = arg1;
 	struct sbuf *sb;
 	int rc, i, n;
-	uint32_t lo, hi;
-	static const char *memory[] = { "EDC0:", "EDC1:", "MC:" };
+	uint32_t lo, hi, used, alloc;
+	static const char *memory[] = {"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:"};
 	static const char *region[] = {
 		"DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
 		"Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
 		"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
 		"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
-		"RQUDP region:", "PBL region:", "TXPBL region:", "ULPRX state:",
-		"ULPTX state:", "On-chip queues:"
+		"RQUDP region:", "PBL region:", "TXPBL region:",
+		"DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+		"On-chip queues:"
 	};
-	struct mem_desc avail[3];
+	struct mem_desc avail[4];
 	struct mem_desc mem[nitems(region) + 3];	/* up to 3 holes */
 	struct mem_desc *md = mem;
 
@@ -4406,10 +6623,19 @@
 	if (lo & F_EXT_MEM_ENABLE) {
 		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
 		avail[i].base = G_EXT_MEM_BASE(hi) << 20;
-		avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
-		avail[i].idx = 2;
+		avail[i].limit = avail[i].base +
+		    (G_EXT_MEM_SIZE(hi) << 20);
+		avail[i].idx = is_t5(sc) ? 3 : 2;	/* Call it MC0 for T5 */
 		i++;
 	}
+	if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
+		hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+		avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+		avail[i].limit = avail[i].base +
+		    (G_EXT_MEM1_SIZE(hi) << 20);
+		avail[i].idx = 4;
+		i++;
+	}
 	if (!i)                                    /* no memory available */
 		return 0;
 	qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
@@ -4438,9 +6664,11 @@
 	md++;
 
 	if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
-		hi = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
-		md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
-		md->limit = (sc->tids.ntids - hi) * 16 + md->base - 1;
+		if (chip_id(sc) <= CHELSIO_T5)
+			md->base = t4_read_reg(sc, A_LE_DB_HASH_TID_BASE);
+		else
+			md->base = t4_read_reg(sc, A_LE_DB_HASH_TBL_BASE_ADDR);
+		md->limit = 0;
 	} else {
 		md->base = 0;
 		md->idx = nitems(region);  /* hide it */
@@ -4461,11 +6689,32 @@
 	ulp_region(TX_PBL);
 #undef ulp_region
 
+	md->base = 0;
+	md->idx = nitems(region);
+	if (!is_t4(sc)) {
+		uint32_t size = 0;
+		uint32_t sge_ctrl = t4_read_reg(sc, A_SGE_CONTROL2);
+		uint32_t fifo_size = t4_read_reg(sc, A_SGE_DBVFIFO_SIZE);
+
+		if (is_t5(sc)) {
+			if (sge_ctrl & F_VFIFO_ENABLE)
+				size = G_DBVFIFO_SIZE(fifo_size);
+		} else
+			size = G_T6_DBVFIFO_SIZE(fifo_size);
+
+		if (size) {
+			md->base = G_BASEADDR(t4_read_reg(sc,
+			    A_SGE_DBVFIFO_BADDR));
+			md->limit = md->base + (size << 2) - 1;
+		}
+	}
+	md++;
+
 	md->base = t4_read_reg(sc, A_ULP_RX_CTX_BASE);
-	md->limit = md->base + sc->tids.ntids - 1;
+	md->limit = 0;
 	md++;
 	md->base = t4_read_reg(sc, A_ULP_TX_ERR_TABLE_BASE);
-	md->limit = md->base + sc->tids.ntids - 1;
+	md->limit = 0;
 	md++;
 
 	md->base = sc->vres.ocq.start;
@@ -4524,15 +6773,37 @@
 		   t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT));
 
 	for (i = 0; i < 4; i++) {
-		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
+		if (chip_id(sc) > CHELSIO_T5)
+			lo = t4_read_reg(sc, A_MPS_RX_MAC_BG_PG_CNT0 + i * 4);
+		else
+			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV0 + i * 4);
+		if (is_t5(sc)) {
+			used = G_T5_USED(lo);
+			alloc = G_T5_ALLOC(lo);
+		} else {
+			used = G_USED(lo);
+			alloc = G_ALLOC(lo);
+		}
+		/* For T6 these are MAC buffer groups */
 		sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
-			   i, G_USED(lo), G_ALLOC(lo));
+		    i, used, alloc);
 	}
-	for (i = 0; i < 4; i++) {
-		lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
+	for (i = 0; i < sc->chip_params->nchan; i++) {
+		if (chip_id(sc) > CHELSIO_T5)
+			lo = t4_read_reg(sc, A_MPS_RX_LPBK_BG_PG_CNT0 + i * 4);
+		else
+			lo = t4_read_reg(sc, A_MPS_RX_PG_RSV4 + i * 4);
+		if (is_t5(sc)) {
+			used = G_T5_USED(lo);
+			alloc = G_T5_ALLOC(lo);
+		} else {
+			used = G_USED(lo);
+			alloc = G_ALLOC(lo);
+		}
+		/* For T6 these are MAC buffer groups */
 		sbuf_printf(sb,
-			   "\nLoopback %d using %u pages out of %u allocated",
-			   i, G_USED(lo), G_ALLOC(lo));
+		    "\nLoopback %d using %u pages out of %u allocated",
+		    i, used, alloc);
 	}
 
 	rc = sbuf_finish(sb);
@@ -4541,7 +6812,258 @@
 	return (rc);
 }
 
+static inline void
+tcamxy2valmask(uint64_t x, uint64_t y, uint8_t *addr, uint64_t *mask)
+{
+	*mask = x | y;
+	y = htobe64(y);
+	memcpy(addr, (char *)&y + 2, ETHER_ADDR_LEN);
+}
+
 static int
+sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	struct sbuf *sb;
+	int rc, i;
+
+	MPASS(chip_id(sc) <= CHELSIO_T5);
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	sbuf_printf(sb,
+	    "Idx  Ethernet address     Mask     Vld Ports PF"
+	    "  VF              Replication             P0 P1 P2 P3  ML");
+	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+		uint64_t tcamx, tcamy, mask;
+		uint32_t cls_lo, cls_hi;
+		uint8_t addr[ETHER_ADDR_LEN];
+
+		tcamy = t4_read_reg64(sc, MPS_CLS_TCAM_Y_L(i));
+		tcamx = t4_read_reg64(sc, MPS_CLS_TCAM_X_L(i));
+		if (tcamx & tcamy)
+			continue;
+		tcamxy2valmask(tcamx, tcamy, addr, &mask);
+		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+		sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x %012jx"
+			   "  %c   %#x%4u%4d", i, addr[0], addr[1], addr[2],
+			   addr[3], addr[4], addr[5], (uintmax_t)mask,
+			   (cls_lo & F_SRAM_VLD) ? 'Y' : 'N',
+			   G_PORTMAP(cls_hi), G_PF(cls_lo),
+			   (cls_lo & F_VF_VALID) ? G_VF(cls_lo) : -1);
+
+		if (cls_lo & F_REPLICATE) {
+			struct fw_ldst_cmd ldst_cmd;
+
+			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+			ldst_cmd.op_to_addrspace =
+			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+				F_FW_CMD_REQUEST | F_FW_CMD_READ |
+				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+			ldst_cmd.u.mps.rplc.fid_idx =
+			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+				V_FW_LDST_CMD_IDX(i));
+
+			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+			    "t4mps");
+			if (rc)
+				break;
+			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+			    sizeof(ldst_cmd), &ldst_cmd);
+			end_synchronized_op(sc, 0);
+
+			if (rc != 0) {
+				sbuf_printf(sb, "%36d", rc);
+				rc = 0;
+			} else {
+				sbuf_printf(sb, " %08x %08x %08x %08x",
+				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+			}
+		} else
+			sbuf_printf(sb, "%36s", "");
+
+		sbuf_printf(sb, "%4u%3u%3u%3u %#3x", G_SRAM_PRIO0(cls_lo),
+		    G_SRAM_PRIO1(cls_lo), G_SRAM_PRIO2(cls_lo),
+		    G_SRAM_PRIO3(cls_lo), (cls_lo >> S_MULTILISTEN0) & 0xf);
+	}
+
+	if (rc)
+		(void) sbuf_finish(sb);
+	else
+		rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+
+	return (rc);
+}
+
+static int
+sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	struct sbuf *sb;
+	int rc, i;
+
+	MPASS(chip_id(sc) > CHELSIO_T5);
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	sbuf_printf(sb, "Idx  Ethernet address     Mask       VNI   Mask"
+	    "   IVLAN Vld DIP_Hit   Lookup  Port Vld Ports PF  VF"
+	    "                           Replication"
+	    "                                    P0 P1 P2 P3  ML\n");
+
+	for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+		uint8_t dip_hit, vlan_vld, lookup_type, port_num;
+		uint16_t ivlan;
+		uint64_t tcamx, tcamy, val, mask;
+		uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
+		uint8_t addr[ETHER_ADDR_LEN];
+
+		ctl = V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0);
+		if (i < 256)
+			ctl |= V_CTLTCAMINDEX(i) | V_CTLTCAMSEL(0);
+		else
+			ctl |= V_CTLTCAMINDEX(i - 256) | V_CTLTCAMSEL(1);
+		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+		tcamy = G_DMACH(val) << 32;
+		tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+		lookup_type = G_DATALKPTYPE(data2);
+		port_num = G_DATAPORTNUM(data2);
+		if (lookup_type && lookup_type != M_DATALKPTYPE) {
+			/* Inner header VNI */
+			vniy = ((data2 & F_DATAVIDH2) << 23) |
+				       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
+			dip_hit = data2 & F_DATADIPHIT;
+			vlan_vld = 0;
+		} else {
+			vniy = 0;
+			dip_hit = 0;
+			vlan_vld = data2 & F_DATAVIDH2;
+			ivlan = G_VIDL(val);
+		}
+
+		ctl |= V_CTLXYBITSEL(1);
+		t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+		val = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA1_REQ_ID1);
+		tcamx = G_DMACH(val) << 32;
+		tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA0_REQ_ID1);
+		data2 = t4_read_reg(sc, A_MPS_CLS_TCAM_RDATA2_REQ_ID1);
+		if (lookup_type && lookup_type != M_DATALKPTYPE) {
+			/* Inner header VNI mask */
+			vnix = ((data2 & F_DATAVIDH2) << 23) |
+			       (G_DATAVIDH1(data2) << 16) | G_VIDL(val);
+		} else
+			vnix = 0;
+
+		if (tcamx & tcamy)
+			continue;
+		tcamxy2valmask(tcamx, tcamy, addr, &mask);
+
+		cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+		cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+
+		if (lookup_type && lookup_type != M_DATALKPTYPE) {
+			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+			    "%012jx %06x %06x    -    -   %3c"
+			    "      'I'  %4x   %3c   %#x%4u%4d", i, addr[0],
+			    addr[1], addr[2], addr[3], addr[4], addr[5],
+			    (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
+			    port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+		} else {
+			sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+			    "%012jx    -       -   ", i, addr[0], addr[1],
+			    addr[2], addr[3], addr[4], addr[5],
+			    (uintmax_t)mask);
+
+			if (vlan_vld)
+				sbuf_printf(sb, "%4u   Y     ", ivlan);
+			else
+				sbuf_printf(sb, "  -    N     ");
+
+			sbuf_printf(sb, "-      %3c  %4x   %3c   %#x%4u%4d",
+			    lookup_type ? 'I' : 'O', port_num,
+			    cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+			    G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+			    cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+		}
+
+
+		if (cls_lo & F_T6_REPLICATE) {
+			struct fw_ldst_cmd ldst_cmd;
+
+			memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+			ldst_cmd.op_to_addrspace =
+			    htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+				F_FW_CMD_REQUEST | F_FW_CMD_READ |
+				V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+			ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+			ldst_cmd.u.mps.rplc.fid_idx =
+			    htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+				V_FW_LDST_CMD_IDX(i));
+
+			rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+			    "t6mps");
+			if (rc)
+				break;
+			rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+			    sizeof(ldst_cmd), &ldst_cmd);
+			end_synchronized_op(sc, 0);
+
+			if (rc != 0) {
+				sbuf_printf(sb, "%72d", rc);
+				rc = 0;
+			} else {
+				sbuf_printf(sb, " %08x %08x %08x %08x"
+				    " %08x %08x %08x %08x",
+				    be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+				    be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+			}
+		} else
+			sbuf_printf(sb, "%72s", "");
+
+		sbuf_printf(sb, "%4u%3u%3u%3u %#x",
+		    G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
+		    G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
+		    (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
+	}
+
+	if (rc)
+		(void) sbuf_finish(sb);
+	else
+		rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+
+	return (rc);
+}
+
+static int
 sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
 {
 	struct adapter *sc = arg1;
@@ -4576,11 +7098,16 @@
 	struct adapter *sc = arg1;
 	struct sbuf *sb;
 	int rc, i;
-	uint32_t tx_cnt[PM_NSTATS], rx_cnt[PM_NSTATS];
-	uint64_t tx_cyc[PM_NSTATS], rx_cyc[PM_NSTATS];
-	static const char *pm_stats[] = {
-		"Read:", "Write bypass:", "Write mem:", "Flush:", "FIFO wait:"
+	uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
+	uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
+	static const char *tx_stats[MAX_PM_NSTATS] = {
+		"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
+		"Tx FIFO wait", NULL, "Tx latency"
 	};
+	static const char *rx_stats[MAX_PM_NSTATS] = {
+		"Read:", "Write bypass:", "Write mem:", "Flush:",
+		"Rx FIFO wait", NULL, "Rx latency"
+	};
 
 	rc = sysctl_wire_old_buffer(req, 0);
 	if (rc != 0)
@@ -4593,12 +7120,37 @@
 	t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
 	t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
 
-	sbuf_printf(sb, "                Tx count            Tx cycles    "
-	    "Rx count            Rx cycles");
-	for (i = 0; i < PM_NSTATS; i++)
-		sbuf_printf(sb, "\n%-13s %10u %20ju  %10u %20ju",
-		    pm_stats[i], tx_cnt[i], tx_cyc[i], rx_cnt[i], rx_cyc[i]);
+	sbuf_printf(sb, "                Tx pcmds             Tx bytes");
+	for (i = 0; i < 4; i++) {
+		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
+		    tx_cyc[i]);
+	}
 
+	sbuf_printf(sb, "\n                Rx pcmds             Rx bytes");
+	for (i = 0; i < 4; i++) {
+		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
+		    rx_cyc[i]);
+	}
+
+	if (chip_id(sc) > CHELSIO_T5) {
+		sbuf_printf(sb,
+		    "\n              Total wait      Total occupancy");
+		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
+		    tx_cyc[i]);
+		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
+		    rx_cyc[i]);
+
+		i += 2;
+		MPASS(i < nitems(tx_stats));
+
+		sbuf_printf(sb,
+		    "\n                   Reads           Total wait");
+		sbuf_printf(sb, "\n%-13s %10u %20ju", tx_stats[i], tx_cnt[i],
+		    tx_cyc[i]);
+		sbuf_printf(sb, "\n%-13s %10u %20ju", rx_stats[i], rx_cnt[i],
+		    rx_cyc[i]);
+	}
+
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
 
@@ -4621,7 +7173,10 @@
 	if (sb == NULL)
 		return (ENOMEM);
 
+	mtx_lock(&sc->reg_lock);
 	t4_tp_get_rdma_stats(sc, &stats);
+	mtx_unlock(&sc->reg_lock);
+
 	sbuf_printf(sb, "NoRQEModDefferals: %u\n", stats.rqe_dfr_mod);
 	sbuf_printf(sb, "NoRQEPktDefferals: %u", stats.rqe_dfr_pkt);
 
@@ -4647,17 +7202,20 @@
 	if (sb == NULL)
 		return (ENOMEM);
 
+	mtx_lock(&sc->reg_lock);
 	t4_tp_get_tcp_stats(sc, &v4, &v6);
+	mtx_unlock(&sc->reg_lock);
+
 	sbuf_printf(sb,
 	    "                                IP                 IPv6\n");
 	sbuf_printf(sb, "OutRsts:      %20u %20u\n",
-	    v4.tcpOutRsts, v6.tcpOutRsts);
+	    v4.tcp_out_rsts, v6.tcp_out_rsts);
 	sbuf_printf(sb, "InSegs:       %20ju %20ju\n",
-	    v4.tcpInSegs, v6.tcpInSegs);
+	    v4.tcp_in_segs, v6.tcp_in_segs);
 	sbuf_printf(sb, "OutSegs:      %20ju %20ju\n",
-	    v4.tcpOutSegs, v6.tcpOutSegs);
+	    v4.tcp_out_segs, v6.tcp_out_segs);
 	sbuf_printf(sb, "RetransSegs:  %20ju %20ju",
-	    v4.tcpRetransSegs, v6.tcpRetransSegs);
+	    v4.tcp_retrans_segs, v6.tcp_retrans_segs);
 
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
@@ -4687,20 +7245,23 @@
 	}
 
 	if (t->ntids) {
+		sbuf_printf(sb, "TID range: ");
 		if (t4_read_reg(sc, A_LE_DB_CONFIG) & F_HASHEN) {
-			uint32_t b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
+			uint32_t b, hb;
 
-			if (b) {
-				sbuf_printf(sb, "TID range: 0-%u, %u-%u", b - 1,
-				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
-				    t->ntids - 1);
+			if (chip_id(sc) <= CHELSIO_T5) {
+				b = t4_read_reg(sc, A_LE_DB_SERVER_INDEX) / 4;
+				hb = t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4;
 			} else {
-				sbuf_printf(sb, "TID range: %u-%u",
-				    t4_read_reg(sc, A_LE_DB_TID_HASHBASE) / 4,
-				    t->ntids - 1);
+				b = t4_read_reg(sc, A_LE_DB_SRVR_START_INDEX);
+				hb = t4_read_reg(sc, A_T6_LE_DB_HASH_TID_BASE);
 			}
+
+			if (b)
+				sbuf_printf(sb, "0-%u, ", b - 1);
+			sbuf_printf(sb, "%u-%u", hb, t->ntids - 1);
 		} else
-			sbuf_printf(sb, "TID range: 0-%u", t->ntids - 1);
+			sbuf_printf(sb, "0-%u", t->ntids - 1);
 		sbuf_printf(sb, ", in use: %u\n",
 		    atomic_load_acq_int(&t->tids_in_use));
 	}
@@ -4715,6 +7276,11 @@
 		    t->ftid_base + t->nftids - 1);
 	}
 
+	if (t->netids) {
+		sbuf_printf(sb, "ETID range: %u-%u\n", t->etid_base,
+		    t->etid_base + t->netids - 1);
+	}
+
 	sbuf_printf(sb, "HW TID usage: %u IP users, %u IPv6 users",
 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV4),
 	    t4_read_reg(sc, A_LE_DB_ACT_CNT_IPV6));
@@ -4741,36 +7307,59 @@
 	if (sb == NULL)
 		return (ENOMEM);
 
+	mtx_lock(&sc->reg_lock);
 	t4_tp_get_err_stats(sc, &stats);
+	mtx_unlock(&sc->reg_lock);
 
-	sbuf_printf(sb, "                 channel 0  channel 1  channel 2  "
-		      "channel 3\n");
-	sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
-	    stats.macInErrs[0], stats.macInErrs[1], stats.macInErrs[2],
-	    stats.macInErrs[3]);
-	sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
-	    stats.hdrInErrs[0], stats.hdrInErrs[1], stats.hdrInErrs[2],
-	    stats.hdrInErrs[3]);
-	sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
-	    stats.tcpInErrs[0], stats.tcpInErrs[1], stats.tcpInErrs[2],
-	    stats.tcpInErrs[3]);
-	sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
-	    stats.tcp6InErrs[0], stats.tcp6InErrs[1], stats.tcp6InErrs[2],
-	    stats.tcp6InErrs[3]);
-	sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
-	    stats.tnlCongDrops[0], stats.tnlCongDrops[1], stats.tnlCongDrops[2],
-	    stats.tnlCongDrops[3]);
-	sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
-	    stats.tnlTxDrops[0], stats.tnlTxDrops[1], stats.tnlTxDrops[2],
-	    stats.tnlTxDrops[3]);
-	sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
-	    stats.ofldVlanDrops[0], stats.ofldVlanDrops[1],
-	    stats.ofldVlanDrops[2], stats.ofldVlanDrops[3]);
-	sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
-	    stats.ofldChanDrops[0], stats.ofldChanDrops[1],
-	    stats.ofldChanDrops[2], stats.ofldChanDrops[3]);
+	if (sc->chip_params->nchan > 2) {
+		sbuf_printf(sb, "                 channel 0  channel 1"
+		    "  channel 2  channel 3\n");
+		sbuf_printf(sb, "macInErrs:      %10u %10u %10u %10u\n",
+		    stats.mac_in_errs[0], stats.mac_in_errs[1],
+		    stats.mac_in_errs[2], stats.mac_in_errs[3]);
+		sbuf_printf(sb, "hdrInErrs:      %10u %10u %10u %10u\n",
+		    stats.hdr_in_errs[0], stats.hdr_in_errs[1],
+		    stats.hdr_in_errs[2], stats.hdr_in_errs[3]);
+		sbuf_printf(sb, "tcpInErrs:      %10u %10u %10u %10u\n",
+		    stats.tcp_in_errs[0], stats.tcp_in_errs[1],
+		    stats.tcp_in_errs[2], stats.tcp_in_errs[3]);
+		sbuf_printf(sb, "tcp6InErrs:     %10u %10u %10u %10u\n",
+		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1],
+		    stats.tcp6_in_errs[2], stats.tcp6_in_errs[3]);
+		sbuf_printf(sb, "tnlCongDrops:   %10u %10u %10u %10u\n",
+		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1],
+		    stats.tnl_cong_drops[2], stats.tnl_cong_drops[3]);
+		sbuf_printf(sb, "tnlTxDrops:     %10u %10u %10u %10u\n",
+		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1],
+		    stats.tnl_tx_drops[2], stats.tnl_tx_drops[3]);
+		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u %10u %10u\n",
+		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1],
+		    stats.ofld_vlan_drops[2], stats.ofld_vlan_drops[3]);
+		sbuf_printf(sb, "ofldChanDrops:  %10u %10u %10u %10u\n\n",
+		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1],
+		    stats.ofld_chan_drops[2], stats.ofld_chan_drops[3]);
+	} else {
+		sbuf_printf(sb, "                 channel 0  channel 1\n");
+		sbuf_printf(sb, "macInErrs:      %10u %10u\n",
+		    stats.mac_in_errs[0], stats.mac_in_errs[1]);
+		sbuf_printf(sb, "hdrInErrs:      %10u %10u\n",
+		    stats.hdr_in_errs[0], stats.hdr_in_errs[1]);
+		sbuf_printf(sb, "tcpInErrs:      %10u %10u\n",
+		    stats.tcp_in_errs[0], stats.tcp_in_errs[1]);
+		sbuf_printf(sb, "tcp6InErrs:     %10u %10u\n",
+		    stats.tcp6_in_errs[0], stats.tcp6_in_errs[1]);
+		sbuf_printf(sb, "tnlCongDrops:   %10u %10u\n",
+		    stats.tnl_cong_drops[0], stats.tnl_cong_drops[1]);
+		sbuf_printf(sb, "tnlTxDrops:     %10u %10u\n",
+		    stats.tnl_tx_drops[0], stats.tnl_tx_drops[1]);
+		sbuf_printf(sb, "ofldVlanDrops:  %10u %10u\n",
+		    stats.ofld_vlan_drops[0], stats.ofld_vlan_drops[1]);
+		sbuf_printf(sb, "ofldChanDrops:  %10u %10u\n\n",
+		    stats.ofld_chan_drops[0], stats.ofld_chan_drops[1]);
+	}
+
 	sbuf_printf(sb, "ofldNoNeigh:    %u\nofldCongDefer:  %u",
-	    stats.ofldNoNeigh, stats.ofldCongDefer);
+	    stats.ofld_no_neigh, stats.ofld_cong_defer);
 
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
@@ -4779,12 +7368,268 @@
 }
 
 static int
+sysctl_tp_la_mask(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	struct tp_params *tpp = &sc->params.tp;
+	u_int mask;
+	int rc;
+
+	mask = tpp->la_mask >> 16;
+	rc = sysctl_handle_int(oidp, &mask, 0, req);
+	if (rc != 0 || req->newptr == NULL)
+		return (rc);
+	if (mask > 0xffff)
+		return (EINVAL);
+	tpp->la_mask = mask << 16;
+	t4_set_reg_field(sc, A_TP_DBG_LA_CONFIG, 0xffff0000U, tpp->la_mask);
+
+	return (0);
+}
+
+struct field_desc {
+	const char *name;
+	u_int start;
+	u_int width;
+};
+
+static void
+field_desc_show(struct sbuf *sb, uint64_t v, const struct field_desc *f)
+{
+	char buf[32];
+	int line_size = 0;
+
+	while (f->name) {
+		uint64_t mask = (1ULL << f->width) - 1;
+		int len = snprintf(buf, sizeof(buf), "%s: %ju", f->name,
+		    ((uintmax_t)v >> f->start) & mask);
+
+		if (line_size + len >= 79) {
+			line_size = 8;
+			sbuf_printf(sb, "\n        ");
+		}
+		sbuf_printf(sb, "%s ", buf);
+		line_size += len + 1;
+		f++;
+	}
+	sbuf_printf(sb, "\n");
+}
+
+static const struct field_desc tp_la0[] = {
+	{ "RcfOpCodeOut", 60, 4 },
+	{ "State", 56, 4 },
+	{ "WcfState", 52, 4 },
+	{ "RcfOpcSrcOut", 50, 2 },
+	{ "CRxError", 49, 1 },
+	{ "ERxError", 48, 1 },
+	{ "SanityFailed", 47, 1 },
+	{ "SpuriousMsg", 46, 1 },
+	{ "FlushInputMsg", 45, 1 },
+	{ "FlushInputCpl", 44, 1 },
+	{ "RssUpBit", 43, 1 },
+	{ "RssFilterHit", 42, 1 },
+	{ "Tid", 32, 10 },
+	{ "InitTcb", 31, 1 },
+	{ "LineNumber", 24, 7 },
+	{ "Emsg", 23, 1 },
+	{ "EdataOut", 22, 1 },
+	{ "Cmsg", 21, 1 },
+	{ "CdataOut", 20, 1 },
+	{ "EreadPdu", 19, 1 },
+	{ "CreadPdu", 18, 1 },
+	{ "TunnelPkt", 17, 1 },
+	{ "RcfPeerFin", 16, 1 },
+	{ "RcfReasonOut", 12, 4 },
+	{ "TxCchannel", 10, 2 },
+	{ "RcfTxChannel", 8, 2 },
+	{ "RxEchannel", 6, 2 },
+	{ "RcfRxChannel", 5, 1 },
+	{ "RcfDataOutSrdy", 4, 1 },
+	{ "RxDvld", 3, 1 },
+	{ "RxOoDvld", 2, 1 },
+	{ "RxCongestion", 1, 1 },
+	{ "TxCongestion", 0, 1 },
+	{ NULL }
+};
+
+static const struct field_desc tp_la1[] = {
+	{ "CplCmdIn", 56, 8 },
+	{ "CplCmdOut", 48, 8 },
+	{ "ESynOut", 47, 1 },
+	{ "EAckOut", 46, 1 },
+	{ "EFinOut", 45, 1 },
+	{ "ERstOut", 44, 1 },
+	{ "SynIn", 43, 1 },
+	{ "AckIn", 42, 1 },
+	{ "FinIn", 41, 1 },
+	{ "RstIn", 40, 1 },
+	{ "DataIn", 39, 1 },
+	{ "DataInVld", 38, 1 },
+	{ "PadIn", 37, 1 },
+	{ "RxBufEmpty", 36, 1 },
+	{ "RxDdp", 35, 1 },
+	{ "RxFbCongestion", 34, 1 },
+	{ "TxFbCongestion", 33, 1 },
+	{ "TxPktSumSrdy", 32, 1 },
+	{ "RcfUlpType", 28, 4 },
+	{ "Eread", 27, 1 },
+	{ "Ebypass", 26, 1 },
+	{ "Esave", 25, 1 },
+	{ "Static0", 24, 1 },
+	{ "Cread", 23, 1 },
+	{ "Cbypass", 22, 1 },
+	{ "Csave", 21, 1 },
+	{ "CPktOut", 20, 1 },
+	{ "RxPagePoolFull", 18, 2 },
+	{ "RxLpbkPkt", 17, 1 },
+	{ "TxLpbkPkt", 16, 1 },
+	{ "RxVfValid", 15, 1 },
+	{ "SynLearned", 14, 1 },
+	{ "SetDelEntry", 13, 1 },
+	{ "SetInvEntry", 12, 1 },
+	{ "CpcmdDvld", 11, 1 },
+	{ "CpcmdSave", 10, 1 },
+	{ "RxPstructsFull", 8, 2 },
+	{ "EpcmdDvld", 7, 1 },
+	{ "EpcmdFlush", 6, 1 },
+	{ "EpcmdTrimPrefix", 5, 1 },
+	{ "EpcmdTrimPostfix", 4, 1 },
+	{ "ERssIp4Pkt", 3, 1 },
+	{ "ERssIp6Pkt", 2, 1 },
+	{ "ERssTcpUdpPkt", 1, 1 },
+	{ "ERssFceFipPkt", 0, 1 },
+	{ NULL }
+};
+
+static const struct field_desc tp_la2[] = {
+	{ "CplCmdIn", 56, 8 },
+	{ "MpsVfVld", 55, 1 },
+	{ "MpsPf", 52, 3 },
+	{ "MpsVf", 44, 8 },
+	{ "SynIn", 43, 1 },
+	{ "AckIn", 42, 1 },
+	{ "FinIn", 41, 1 },
+	{ "RstIn", 40, 1 },
+	{ "DataIn", 39, 1 },
+	{ "DataInVld", 38, 1 },
+	{ "PadIn", 37, 1 },
+	{ "RxBufEmpty", 36, 1 },
+	{ "RxDdp", 35, 1 },
+	{ "RxFbCongestion", 34, 1 },
+	{ "TxFbCongestion", 33, 1 },
+	{ "TxPktSumSrdy", 32, 1 },
+	{ "RcfUlpType", 28, 4 },
+	{ "Eread", 27, 1 },
+	{ "Ebypass", 26, 1 },
+	{ "Esave", 25, 1 },
+	{ "Static0", 24, 1 },
+	{ "Cread", 23, 1 },
+	{ "Cbypass", 22, 1 },
+	{ "Csave", 21, 1 },
+	{ "CPktOut", 20, 1 },
+	{ "RxPagePoolFull", 18, 2 },
+	{ "RxLpbkPkt", 17, 1 },
+	{ "TxLpbkPkt", 16, 1 },
+	{ "RxVfValid", 15, 1 },
+	{ "SynLearned", 14, 1 },
+	{ "SetDelEntry", 13, 1 },
+	{ "SetInvEntry", 12, 1 },
+	{ "CpcmdDvld", 11, 1 },
+	{ "CpcmdSave", 10, 1 },
+	{ "RxPstructsFull", 8, 2 },
+	{ "EpcmdDvld", 7, 1 },
+	{ "EpcmdFlush", 6, 1 },
+	{ "EpcmdTrimPrefix", 5, 1 },
+	{ "EpcmdTrimPostfix", 4, 1 },
+	{ "ERssIp4Pkt", 3, 1 },
+	{ "ERssIp6Pkt", 2, 1 },
+	{ "ERssTcpUdpPkt", 1, 1 },
+	{ "ERssFceFipPkt", 0, 1 },
+	{ NULL }
+};
+
+static void
+tp_la_show(struct sbuf *sb, uint64_t *p, int idx)
+{
+
+	field_desc_show(sb, *p, tp_la0);
+}
+
+static void
+tp_la_show2(struct sbuf *sb, uint64_t *p, int idx)
+{
+
+	if (idx)
+		sbuf_printf(sb, "\n");
+	field_desc_show(sb, p[0], tp_la0);
+	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+		field_desc_show(sb, p[1], tp_la0);
+}
+
+static void
+tp_la_show3(struct sbuf *sb, uint64_t *p, int idx)
+{
+
+	if (idx)
+		sbuf_printf(sb, "\n");
+	field_desc_show(sb, p[0], tp_la0);
+	if (idx < (TPLA_SIZE / 2 - 1) || p[1] != ~0ULL)
+		field_desc_show(sb, p[1], (p[0] & (1 << 17)) ? tp_la2 : tp_la1);
+}
+
+static int
+sysctl_tp_la(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	struct sbuf *sb;
+	uint64_t *buf, *p;
+	int rc;
+	u_int i, inc;
+	void (*show_func)(struct sbuf *, uint64_t *, int);
+
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	buf = malloc(TPLA_SIZE * sizeof(uint64_t), M_CXGBE, M_ZERO | M_WAITOK);
+
+	t4_tp_read_la(sc, buf, NULL);
+	p = buf;
+
+	switch (G_DBGLAMODE(t4_read_reg(sc, A_TP_DBG_LA_CONFIG))) {
+	case 2:
+		inc = 2;
+		show_func = tp_la_show2;
+		break;
+	case 3:
+		inc = 2;
+		show_func = tp_la_show3;
+		break;
+	default:
+		inc = 1;
+		show_func = tp_la_show;
+	}
+
+	for (i = 0; i < TPLA_SIZE / inc; i++, p += inc)
+		(*show_func)(sb, p, i);
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+	free(buf, M_CXGBE);
+	return (rc);
+}
+
+static int
 sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
 {
 	struct adapter *sc = arg1;
 	struct sbuf *sb;
 	int rc;
-	u64 nrate[NCHAN], orate[NCHAN];
+	u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
 
 	rc = sysctl_wire_old_buffer(req, 0);
 	if (rc != 0)
@@ -4795,90 +7640,268 @@
 		return (ENOMEM);
 
 	t4_get_chan_txrate(sc, nrate, orate);
-	sbuf_printf(sb, "              channel 0   channel 1   channel 2   "
-		 "channel 3\n");
-	sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
-	    nrate[0], nrate[1], nrate[2], nrate[3]);
-	sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
-	    orate[0], orate[1], orate[2], orate[3]);
 
+	if (sc->chip_params->nchan > 2) {
+		sbuf_printf(sb, "              channel 0   channel 1"
+		    "   channel 2   channel 3\n");
+		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju  %10ju  %10ju\n",
+		    nrate[0], nrate[1], nrate[2], nrate[3]);
+		sbuf_printf(sb, "Offload B/s: %10ju  %10ju  %10ju  %10ju",
+		    orate[0], orate[1], orate[2], orate[3]);
+	} else {
+		sbuf_printf(sb, "              channel 0   channel 1\n");
+		sbuf_printf(sb, "NIC B/s:     %10ju  %10ju\n",
+		    nrate[0], nrate[1]);
+		sbuf_printf(sb, "Offload B/s: %10ju  %10ju",
+		    orate[0], orate[1]);
+	}
+
 	rc = sbuf_finish(sb);
 	sbuf_delete(sb);
 
 	return (rc);
 }
-#endif
 
-static inline void
-txq_start(struct ifnet *ifp, struct sge_txq *txq)
+static int
+sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
 {
-	struct buf_ring *br;
-	struct mbuf *m;
+	struct adapter *sc = arg1;
+	struct sbuf *sb;
+	uint32_t *buf, *p;
+	int rc, i;
 
-	TXQ_LOCK_ASSERT_OWNED(txq);
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
 
-	br = txq->br;
-	m = txq->m ? txq->m : drbr_dequeue(ifp, br);
-	if (m)
-		t4_eth_tx(ifp, txq, m);
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	buf = malloc(ULPRX_LA_SIZE * 8 * sizeof(uint32_t), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	t4_ulprx_read_la(sc, buf);
+	p = buf;
+
+	sbuf_printf(sb, "      Pcmd        Type   Message"
+	    "                Data");
+	for (i = 0; i < ULPRX_LA_SIZE; i++, p += 8) {
+		sbuf_printf(sb, "\n%08x%08x  %4x  %08x  %08x%08x%08x%08x",
+		    p[1], p[0], p[2], p[3], p[7], p[6], p[5], p[4]);
+	}
+
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+	free(buf, M_CXGBE);
+	return (rc);
 }
 
-void
-t4_tx_callout(void *arg)
+static int
+sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
 {
-	struct sge_eq *eq = arg;
-	struct adapter *sc;
+	struct adapter *sc = arg1;
+	struct sbuf *sb;
+	int rc, v;
 
-	if (EQ_TRYLOCK(eq) == 0)
-		goto reschedule;
+	MPASS(chip_id(sc) >= CHELSIO_T5);
 
-	if (eq->flags & EQ_STALLED && !can_resume_tx(eq)) {
-		EQ_UNLOCK(eq);
-reschedule:
-		if (__predict_true(!(eq->flags && EQ_DOOMED)))
-			callout_schedule(&eq->tx_callout, 1);
-		return;
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
+
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
+
+	v = t4_read_reg(sc, A_SGE_STAT_CFG);
+	if (G_STATSOURCE_T5(v) == 7) {
+		int mode;
+
+		mode = is_t5(sc) ? G_STATMODE(v) : G_T6_STATMODE(v);
+		if (mode == 0) {
+			sbuf_printf(sb, "total %d, incomplete %d",
+			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
+			    t4_read_reg(sc, A_SGE_STAT_MATCH));
+		} else if (mode == 1) {
+			sbuf_printf(sb, "total %d, data overflow %d",
+			    t4_read_reg(sc, A_SGE_STAT_TOTAL),
+			    t4_read_reg(sc, A_SGE_STAT_MATCH));
+		} else {
+			sbuf_printf(sb, "unknown mode %d", mode);
+		}
 	}
+	rc = sbuf_finish(sb);
+	sbuf_delete(sb);
 
-	EQ_LOCK_ASSERT_OWNED(eq);
+	return (rc);
+}
 
-	if (__predict_true((eq->flags & EQ_DOOMED) == 0)) {
+static int
+sysctl_tc_params(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	struct tx_cl_rl_params tc;
+	struct sbuf *sb;
+	int i, rc, port_id, mbps, gbps;
 
-		if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
-			struct sge_txq *txq = arg;
-			struct port_info *pi = txq->ifp->if_softc;
+	rc = sysctl_wire_old_buffer(req, 0);
+	if (rc != 0)
+		return (rc);
 
-			sc = pi->adapter;
-		} else {
-			struct sge_wrq *wrq = arg;
+	sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+	if (sb == NULL)
+		return (ENOMEM);
 
-			sc = wrq->adapter;
+	port_id = arg2 >> 16;
+	MPASS(port_id < sc->params.nports);
+	MPASS(sc->port[port_id] != NULL);
+	i = arg2 & 0xffff;
+	MPASS(i < sc->chip_params->nsched_cls);
+
+	mtx_lock(&sc->tc_lock);
+	tc = sc->port[port_id]->sched_params->cl_rl[i];
+	mtx_unlock(&sc->tc_lock);
+
+	if (tc.flags & TX_CLRL_ERROR) {
+		sbuf_printf(sb, "error");
+		goto done;
+	}
+
+	if (tc.ratemode == SCHED_CLASS_RATEMODE_REL) {
+		/* XXX: top speed or actual link speed? */
+		gbps = port_top_speed(sc->port[port_id]);
+		sbuf_printf(sb, " %u%% of %uGbps", tc.maxrate, gbps);
+	} else if (tc.ratemode == SCHED_CLASS_RATEMODE_ABS) {
+		switch (tc.rateunit) {
+		case SCHED_CLASS_RATEUNIT_BITS:
+			mbps = tc.maxrate / 1000;
+			gbps = tc.maxrate / 1000000;
+			if (tc.maxrate == gbps * 1000000)
+				sbuf_printf(sb, " %uGbps", gbps);
+			else if (tc.maxrate == mbps * 1000)
+				sbuf_printf(sb, " %uMbps", mbps);
+			else
+				sbuf_printf(sb, " %uKbps", tc.maxrate);
+			break;
+		case SCHED_CLASS_RATEUNIT_PKTS:
+			sbuf_printf(sb, " %upps", tc.maxrate);
+			break;
+		default:
+			rc = ENXIO;
+			goto done;
 		}
+	}
 
-		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
+	switch (tc.mode) {
+	case SCHED_CLASS_MODE_CLASS:
+		sbuf_printf(sb, " aggregate");
+		break;
+	case SCHED_CLASS_MODE_FLOW:
+		sbuf_printf(sb, " per-flow");
+		break;
+	default:
+		rc = ENXIO;
+		goto done;
 	}
 
-	EQ_UNLOCK(eq);
+done:
+	if (rc == 0)
+		rc = sbuf_finish(sb);
+	sbuf_delete(sb);
+
+	return (rc);
 }
+#endif
 
-void
-t4_tx_task(void *arg, int count)
+#ifdef TCP_OFFLOAD
+static void
+unit_conv(char *buf, size_t len, u_int val, u_int factor)
 {
-	struct sge_eq *eq = arg;
+	u_int rem = val % factor;
 
-	EQ_LOCK(eq);
-	if ((eq->flags & EQ_TYPEMASK) == EQ_ETH) {
-		struct sge_txq *txq = arg;
-		txq_start(txq->ifp, txq);
-	} else {
-		struct sge_wrq *wrq = arg;
-		t4_wrq_tx_locked(wrq->adapter, wrq, NULL);
+	if (rem == 0)
+		snprintf(buf, len, "%u", val / factor);
+	else {
+		while (rem % 10 == 0)
+			rem /= 10;
+		snprintf(buf, len, "%u.%u", val / factor, rem);
 	}
-	EQ_UNLOCK(eq);
 }
 
+static int
+sysctl_tp_tick(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	char buf[16];
+	u_int res, re;
+	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
+
+	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
+	switch (arg2) {
+	case 0:
+		/* timer_tick */
+		re = G_TIMERRESOLUTION(res);
+		break;
+	case 1:
+		/* TCP timestamp tick */
+		re = G_TIMESTAMPRESOLUTION(res);
+		break;
+	case 2:
+		/* DACK tick */
+		re = G_DELAYEDACKRESOLUTION(res);
+		break;
+	default:
+		return (EDOOFUS);
+	}
+
+	unit_conv(buf, sizeof(buf), (cclk_ps << re), 1000000);
+
+	return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
+}
+
+static int
+sysctl_tp_dack_timer(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	u_int res, dack_re, v;
+	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
+
+	res = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
+	dack_re = G_DELAYEDACKRESOLUTION(res);
+	v = ((cclk_ps << dack_re) / 1000000) * t4_read_reg(sc, A_TP_DACK_TIMER);
+
+	return (sysctl_handle_int(oidp, &v, 0, req));
+}
+
+static int
+sysctl_tp_timer(SYSCTL_HANDLER_ARGS)
+{
+	struct adapter *sc = arg1;
+	int reg = arg2;
+	u_int tre;
+	u_long tp_tick_us, v;
+	u_int cclk_ps = 1000000000 / sc->params.vpd.cclk;
+
+	MPASS(reg == A_TP_RXT_MIN || reg == A_TP_RXT_MAX ||
+	    reg == A_TP_PERS_MIN  || reg == A_TP_PERS_MAX ||
+	    reg == A_TP_KEEP_IDLE || reg == A_TP_KEEP_INTVL ||
+	    reg == A_TP_INIT_SRTT || reg == A_TP_FINWAIT2_TIMER);
+
+	tre = G_TIMERRESOLUTION(t4_read_reg(sc, A_TP_TIMER_RESOLUTION));
+	tp_tick_us = (cclk_ps << tre) / 1000000;
+
+	if (reg == A_TP_INIT_SRTT)
+		v = tp_tick_us * G_INITSRTT(t4_read_reg(sc, reg));
+	else
+		v = tp_tick_us * t4_read_reg(sc, reg);
+
+	return (sysctl_handle_long(oidp, &v, 0, req));
+}
+#endif
+
 static uint32_t
-fconf_to_mode(uint32_t fconf)
+fconf_iconf_to_mode(uint32_t fconf, uint32_t iconf)
 {
 	uint32_t mode;
 
@@ -4906,8 +7929,11 @@
 	if (fconf & F_VLAN)
 		mode |= T4_FILTER_VLAN;
 
-	if (fconf & F_VNIC_ID)
+	if (fconf & F_VNIC_ID) {
 		mode |= T4_FILTER_VNIC;
+		if (iconf & F_VNIC)
+			mode |= T4_FILTER_IC_VNIC;
+	}
 
 	if (fconf & F_PORT)
 		mode |= T4_FILTER_PORT;
@@ -4957,8 +7983,18 @@
 }
 
 static uint32_t
-fspec_to_fconf(struct t4_filter_specification *fs)
+mode_to_iconf(uint32_t mode)
 {
+
+	if (mode & T4_FILTER_IC_VNIC)
+		return (F_VNIC);
+	return (0);
+}
+
+static int check_fspec_against_fconf_iconf(struct adapter *sc,
+    struct t4_filter_specification *fs)
+{
+	struct tp_params *tpp = &sc->params.tp;
 	uint32_t fconf = 0;
 
 	if (fs->val.frag || fs->mask.frag)
@@ -4982,9 +8018,18 @@
 	if (fs->val.vlan_vld || fs->mask.vlan_vld)
 		fconf |= F_VLAN;
 
-	if (fs->val.vnic_vld || fs->mask.vnic_vld)
+	if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
 		fconf |= F_VNIC_ID;
+		if (tpp->ingress_config & F_VNIC)
+			return (EINVAL);
+	}
 
+	if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+		fconf |= F_VNIC_ID;
+		if ((tpp->ingress_config & F_VNIC) == 0)
+			return (EINVAL);
+	}
+
 	if (fs->val.iport || fs->mask.iport)
 		fconf |= F_PORT;
 
@@ -4991,32 +8036,24 @@
 	if (fs->val.fcoe || fs->mask.fcoe)
 		fconf |= F_FCOE;
 
-	return (fconf);
+	if ((tpp->vlan_pri_map | fconf) != tpp->vlan_pri_map)
+		return (E2BIG);
+
+	return (0);
 }
 
 static int
 get_filter_mode(struct adapter *sc, uint32_t *mode)
 {
-	int rc;
-	uint32_t fconf;
+	struct tp_params *tpp = &sc->params.tp;
 
-	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
-	    "t4getfm");
-	if (rc)
-		return (rc);
+	/*
+	 * We trust the cached values of the relevant TP registers.  This means
+	 * things work reliably only if writes to those registers are always via
+	 * t4_set_filter_mode.
+	 */
+	*mode = fconf_iconf_to_mode(tpp->vlan_pri_map, tpp->ingress_config);
 
-	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
-	    A_TP_VLAN_PRI_MAP);
-
-	if (sc->filter_mode != fconf) {
-		log(LOG_WARNING, "%s: cached filter mode out of sync %x %x.\n",
-		    device_get_nameunit(sc->dev), sc->filter_mode, fconf);
-		sc->filter_mode = fconf;
-	}
-
-	*mode = fconf_to_mode(sc->filter_mode);
-
-	end_synchronized_op(sc, LOCK_HELD);
 	return (0);
 }
 
@@ -5023,9 +8060,21 @@
 static int
 set_filter_mode(struct adapter *sc, uint32_t mode)
 {
-	uint32_t fconf;
+	struct tp_params *tpp = &sc->params.tp;
+	uint32_t fconf, iconf;
 	int rc;
 
+	iconf = mode_to_iconf(mode);
+	if ((iconf ^ tpp->ingress_config) & F_VNIC) {
+		/*
+		 * For now we just complain if A_TP_INGRESS_CONFIG is not
+		 * already set to the correct value for the requested filter
+		 * mode.  It's not clear if it's safe to write to this register
+		 * on the fly.  (And we trust the cached value of the register).
+		 */
+		return (EBUSY);
+	}
+
 	fconf = mode_to_fconf(mode);
 
 	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
@@ -5039,20 +8088,13 @@
 	}
 
 #ifdef TCP_OFFLOAD
-	if (sc->offload_map) {
+	if (uld_active(sc, ULD_TOM)) {
 		rc = EBUSY;
 		goto done;
 	}
 #endif
 
-#ifdef notyet
 	rc = -t4_set_filter_mode(sc, fconf);
-	if (rc == 0)
-		sc->filter_mode = fconf;
-#else
-	rc = ENOTSUP;
-#endif
-
 done:
 	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
@@ -5061,15 +8103,22 @@
 static inline uint64_t
 get_filter_hits(struct adapter *sc, uint32_t fid)
 {
-	uint32_t tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
-	uint64_t hits;
+	uint32_t tcb_addr;
 
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0),
-	    tcb_base + (fid + sc->tids.ftid_base) * TCB_SIZE);
-	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 0));
-	hits = t4_read_reg64(sc, MEMWIN0_BASE + 16);
+	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) +
+	    (fid + sc->tids.ftid_base) * TCB_SIZE;
 
-	return (be64toh(hits));
+	if (is_t4(sc)) {
+		uint64_t hits;
+
+		read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&hits, 8);
+		return (be64toh(hits));
+	} else {
+		uint32_t hits;
+
+		read_via_memwin(sc, 0, tcb_addr + 24, &hits, 4);
+		return (be32toh(hits));
+	}
 }
 
 static int
@@ -5130,21 +8179,15 @@
 		goto done;
 	}
 
-	if (!(sc->flags & FULL_INIT_DONE)) {
-		rc = EAGAIN;
-		goto done;
-	}
-
 	if (t->idx >= nfilters) {
 		rc = EINVAL;
 		goto done;
 	}
 
-	/* Validate against the global filter mode */
-	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
-		rc = E2BIG;
+	/* Validate against the global filter mode and ingress config */
+	rc = check_fspec_against_fconf_iconf(sc, &t->fs);
+	if (rc != 0)
 		goto done;
-	}
 
 	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
 		rc = EINVAL;
@@ -5169,6 +8212,10 @@
 		goto done;
 	}
 
+	if (!(sc->flags & FULL_INIT_DONE) &&
+	    ((rc = adapter_full_init(sc)) != 0))
+		goto done;
+
 	if (sc->tids.ftid_tab == NULL) {
 		KASSERT(sc->tids.ftids_in_use == 0,
 		    ("%s: no memory allocated but filters_in_use > 0",
@@ -5306,9 +8353,9 @@
 set_filter_wr(struct adapter *sc, int fidx)
 {
 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
-	struct wrqe *wr;
 	struct fw_filter_wr *fwr;
-	unsigned int ftid;
+	unsigned int ftid, vnic_vld, vnic_vld_mask;
+	struct wrq_cookie cookie;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
@@ -5325,15 +8372,25 @@
 		}
 	}
 
+	/* Already validated against fconf, iconf */
+	MPASS((f->fs.val.pfvf_vld & f->fs.val.ovlan_vld) == 0);
+	MPASS((f->fs.mask.pfvf_vld & f->fs.mask.ovlan_vld) == 0);
+	if (f->fs.val.pfvf_vld || f->fs.val.ovlan_vld)
+		vnic_vld = 1;
+	else
+		vnic_vld = 0;
+	if (f->fs.mask.pfvf_vld || f->fs.mask.ovlan_vld)
+		vnic_vld_mask = 1;
+	else
+		vnic_vld_mask = 0;
+
 	ftid = sc->tids.ftid_base + fidx;
 
-	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
-	if (wr == NULL)
+	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
+	if (fwr == NULL)
 		return (ENOMEM);
+	bzero(fwr, sizeof(*fwr));
 
-	fwr = wrtod(wr);
-	bzero(fwr, sizeof (*fwr));
-
 	fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
 	fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
 	fwr->tid_to_iq =
@@ -5364,9 +8421,9 @@
 	    (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
 		V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
 		V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
-		V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.vnic_vld) |
+		V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
 		V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
-		V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.vnic_vld));
+		V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
 	fwr->smac_sel = 0;
 	fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
 	    V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
@@ -5401,7 +8458,7 @@
 	f->pending = 1;
 	sc->tids.ftids_in_use++;
 
-	t4_wrq_tx(sc, wr);
+	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
 	return (0);
 }
 
@@ -5409,22 +8466,21 @@
 del_filter_wr(struct adapter *sc, int fidx)
 {
 	struct filter_entry *f = &sc->tids.ftid_tab[fidx];
-	struct wrqe *wr;
 	struct fw_filter_wr *fwr;
 	unsigned int ftid;
+	struct wrq_cookie cookie;
 
 	ftid = sc->tids.ftid_base + fidx;
 
-	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
-	if (wr == NULL)
+	fwr = start_wrq_wr(&sc->sge.mgmtq, howmany(sizeof(*fwr), 16), &cookie);
+	if (fwr == NULL)
 		return (ENOMEM);
-	fwr = wrtod(wr);
 	bzero(fwr, sizeof (*fwr));
 
 	t4_mk_filtdelwr(ftid, fwr, sc->sge.fwq.abs_id);
 
 	f->pending = 1;
-	t4_wrq_tx(sc, wr);
+	commit_wrq_wr(&sc->sge.mgmtq, fwr, &cookie);
 	return (0);
 }
 
@@ -5434,41 +8490,59 @@
 	struct adapter *sc = iq->adapter;
 	const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
 	unsigned int idx = GET_TID(rpl);
+	unsigned int rc;
+	struct filter_entry *f;
 
 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
 	    rss->opcode));
+	MPASS(iq == &sc->sge.fwq);
+	MPASS(is_ftid(sc, idx));
 
-	if (idx >= sc->tids.ftid_base &&
-	    (idx -= sc->tids.ftid_base) < sc->tids.nftids) {
-		unsigned int rc = G_COOKIE(rpl->cookie);
-		struct filter_entry *f = &sc->tids.ftid_tab[idx];
+	idx -= sc->tids.ftid_base;
+	f = &sc->tids.ftid_tab[idx];
+	rc = G_COOKIE(rpl->cookie);
 
-		mtx_lock(&sc->tids.ftid_lock);
-		if (rc == FW_FILTER_WR_FLT_ADDED) {
-			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
-			    __func__, idx));
-			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
-			f->pending = 0;  /* asynchronous setup completed */
-			f->valid = 1;
-		} else {
-			if (rc != FW_FILTER_WR_FLT_DELETED) {
-				/* Add or delete failed, display an error */
-				log(LOG_ERR,
-				    "filter %u setup failed with error %u\n",
-				    idx, rc);
-			}
+	mtx_lock(&sc->tids.ftid_lock);
+	if (rc == FW_FILTER_WR_FLT_ADDED) {
+		KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
+		    __func__, idx));
+		f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
+		f->pending = 0;  /* asynchronous setup completed */
+		f->valid = 1;
+	} else {
+		if (rc != FW_FILTER_WR_FLT_DELETED) {
+			/* Add or delete failed, display an error */
+			log(LOG_ERR,
+			    "filter %u setup failed with error %u\n",
+			    idx, rc);
+		}
 
-			clear_filter(f);
-			sc->tids.ftids_in_use--;
-		}
-		wakeup(&sc->tids.ftid_tab);
-		mtx_unlock(&sc->tids.ftid_lock);
+		clear_filter(f);
+		sc->tids.ftids_in_use--;
 	}
+	wakeup(&sc->tids.ftid_tab);
+	mtx_unlock(&sc->tids.ftid_lock);
 
 	return (0);
 }
 
 static int
+set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+	MPASS(iq->set_tcb_rpl != NULL);
+	return (iq->set_tcb_rpl(iq, rss, m));
+}
+
+static int
+l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+	MPASS(iq->l2t_write_rpl != NULL);
+	return (iq->l2t_write_rpl(iq, rss, m));
+}
+
+static int
 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
 {
 	int rc;
@@ -5533,82 +8607,72 @@
 }
 
 static int
-read_card_mem(struct adapter *sc, struct t4_mem_range *mr)
+load_cfg(struct adapter *sc, struct t4_data *cfg)
 {
-	uint32_t base, size, lo, hi, win, off, remaining, i, n;
-	uint32_t *buf, *b;
 	int rc;
+	uint8_t *cfg_data = NULL;
 
-	/* reads are in multiples of 32 bits */
-	if (mr->addr & 3 || mr->len & 3 || mr->len == 0)
-		return (EINVAL);
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ldcf");
+	if (rc)
+		return (rc);
 
-	/*
-	 * We don't want to deal with potential holes so we mandate that the
-	 * requested region must lie entirely within one of the 3 memories.
-	 */
-	lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
-	if (lo & F_EDRAM0_ENABLE) {
-		hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
-		base = G_EDRAM0_BASE(hi) << 20;
-		size = G_EDRAM0_SIZE(hi) << 20;
-		if (size > 0 &&
-		    mr->addr >= base && mr->addr < base + size &&
-		    mr->addr + mr->len <= base + size)
-			goto proceed;
+	if (cfg->len == 0) {
+		/* clear */
+		rc = -t4_load_cfg(sc, NULL, 0);
+		goto done;
 	}
-	if (lo & F_EDRAM1_ENABLE) {
-		hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
-		base = G_EDRAM1_BASE(hi) << 20;
-		size = G_EDRAM1_SIZE(hi) << 20;
-		if (size > 0 &&
-		    mr->addr >= base && mr->addr < base + size &&
-		    mr->addr + mr->len <= base + size)
-			goto proceed;
+
+	cfg_data = malloc(cfg->len, M_CXGBE, M_WAITOK);
+	if (cfg_data == NULL) {
+		rc = ENOMEM;
+		goto done;
 	}
-	if (lo & F_EXT_MEM_ENABLE) {
-		hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
-		base = G_EXT_MEM_BASE(hi) << 20;
-		size = G_EXT_MEM_SIZE(hi) << 20;
-		if (size > 0 &&
-		    mr->addr >= base && mr->addr < base + size &&
-		    mr->addr + mr->len <= base + size)
-			goto proceed;
-	}
-	return (ENXIO);
 
-proceed:
-	buf = b = malloc(mr->len, M_CXGBE, M_WAITOK);
+	rc = copyin(cfg->data, cfg_data, cfg->len);
+	if (rc == 0)
+		rc = -t4_load_cfg(sc, cfg_data, cfg->len);
 
-	/*
-	 * Position the PCIe window (we use memwin2) to the 16B aligned area
-	 * just at/before the requested region.
-	 */
-	win = mr->addr & ~0xf;
-	off = mr->addr - win;  /* offset of the requested region in the win */
+	free(cfg_data, M_CXGBE);
+done:
+	end_synchronized_op(sc, 0);
+	return (rc);
+}
+
+#define MAX_READ_BUF_SIZE (128 * 1024)
+static int
+read_card_mem(struct adapter *sc, int win, struct t4_mem_range *mr)
+{
+	uint32_t addr, remaining, n;
+	uint32_t *buf;
+	int rc;
+	uint8_t *dst;
+
+	rc = validate_mem_range(sc, mr->addr, mr->len);
+	if (rc != 0)
+		return (rc);
+
+	buf = malloc(min(mr->len, MAX_READ_BUF_SIZE), M_CXGBE, M_WAITOK);
+	addr = mr->addr;
 	remaining = mr->len;
+	dst = (void *)mr->data;
 
 	while (remaining) {
-		t4_write_reg(sc,
-		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2), win);
-		t4_read_reg(sc,
-		    PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
+		n = min(remaining, MAX_READ_BUF_SIZE);
+		read_via_memwin(sc, 2, addr, buf, n);
 
-		/* number of bytes that we'll copy in the inner loop */
-		n = min(remaining, MEMWIN2_APERTURE - off);
+		rc = copyout(buf, dst, n);
+		if (rc != 0)
+			break;
 
-		for (i = 0; i < n; i += 4, remaining -= 4)
-			*b++ = t4_read_reg(sc, MEMWIN2_BASE + off + i);
-
-		win += MEMWIN2_APERTURE;
-		off = 0;
+		dst += n;
+		remaining -= n;
+		addr += n;
 	}
 
-	rc = copyout(buf, mr->data, mr->len);
 	free(buf, M_CXGBE);
-
 	return (rc);
 }
+#undef MAX_READ_BUF_SIZE
 
 static int
 read_i2c(struct adapter *sc, struct t4_i2c_data *i2cd)
@@ -5618,16 +8682,14 @@
 	if (i2cd->len == 0 || i2cd->port_id >= sc->params.nports)
 		return (EINVAL);
 
-	if (i2cd->len > 1) {
-		/* XXX: need fw support for longer reads in one go */
-		return (ENOTSUP);
-	}
+	if (i2cd->len > sizeof(i2cd->data))
+		return (EFBIG);
 
 	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4i2crd");
 	if (rc)
 		return (rc);
 	rc = -t4_i2c_rd(sc, sc->mbox, i2cd->port_id, i2cd->dev_addr,
-	    i2cd->offset, &i2cd->data[0]);
+	    i2cd->offset, i2cd->len, &i2cd->data[0]);
 	end_synchronized_op(sc, 0);
 
 	return (rc);
@@ -5671,21 +8733,29 @@
 t4_os_portmod_changed(const struct adapter *sc, int idx)
 {
 	struct port_info *pi = sc->port[idx];
+	struct vi_info *vi;
+	struct ifnet *ifp;
+	int v;
 	static const char *mod_str[] = {
 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
 	};
 
+	for_each_vi(pi, v, vi) {
+		build_medialist(pi, &vi->media);
+	}
+
+	ifp = pi->vi[0].ifp;
 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
-		if_printf(pi->ifp, "transceiver unplugged.\n");
+		if_printf(ifp, "transceiver unplugged.\n");
 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
-		if_printf(pi->ifp, "unknown transceiver inserted.\n");
+		if_printf(ifp, "unknown transceiver inserted.\n");
 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
-		if_printf(pi->ifp, "unsupported transceiver inserted.\n");
+		if_printf(ifp, "unsupported transceiver inserted.\n");
 	else if (pi->mod_type > 0 && pi->mod_type < nitems(mod_str)) {
-		if_printf(pi->ifp, "%s transceiver inserted.\n",
+		if_printf(ifp, "%s transceiver inserted.\n",
 		    mod_str[pi->mod_type]);
 	} else {
-		if_printf(pi->ifp, "transceiver (type %d) inserted.\n",
+		if_printf(ifp, "transceiver (type %d) inserted.\n",
 		    pi->mod_type);
 	}
 }
@@ -5694,13 +8764,22 @@
 t4_os_link_changed(struct adapter *sc, int idx, int link_stat)
 {
 	struct port_info *pi = sc->port[idx];
-	struct ifnet *ifp = pi->ifp;
+	struct vi_info *vi;
+	struct ifnet *ifp;
+	int v;
 
-	if (link_stat) {
-		ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
-		if_link_state_change(ifp, LINK_STATE_UP);
-	} else
-		if_link_state_change(ifp, LINK_STATE_DOWN);
+	for_each_vi(pi, v, vi) {
+		ifp = vi->ifp;
+		if (ifp == NULL)
+			continue;
+
+		if (link_stat) {
+			ifp->if_baudrate = IF_Mbps(pi->link_cfg.speed);
+			if_link_state_change(ifp, LINK_STATE_UP);
+		} else {
+			if_link_state_change(ifp, LINK_STATE_DOWN);
+		}
+	}
 }
 
 void
@@ -5708,7 +8787,7 @@
 {
 	struct adapter *sc;
 
-	mtx_lock(&t4_list_lock);
+	sx_slock(&t4_list_lock);
 	SLIST_FOREACH(sc, &t4_list, link) {
 		/*
 		 * func should not make any assumptions about what state sc is
@@ -5716,22 +8795,10 @@
 		 */
 		func(sc, arg);
 	}
-	mtx_unlock(&t4_list_lock);
+	sx_sunlock(&t4_list_lock);
 }
 
 static int
-t4_open(struct cdev *dev, int flags, int type, struct thread *td)
-{
-       return (0);
-}
-
-static int
-t4_close(struct cdev *dev, int flags, int type, struct thread *td)
-{
-       return (0);
-}
-
-static int
 t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
     struct thread *td)
 {
@@ -5776,7 +8843,7 @@
 	}
 	case CHELSIO_T4_REGDUMP: {
 		struct t4_regdump *regs = (struct t4_regdump *)data;
-		int reglen = T4_REGDUMP_SIZE;
+		int reglen = t4_get_regs_len(sc);
 		uint8_t *buf;
 
 		if (regs->len < reglen) {
@@ -5786,7 +8853,7 @@
 
 		regs->len = reglen;
 		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
-		t4_get_regs(sc, regs, buf);
+		get_regs(sc, regs, buf);
 		rc = copyout(buf, regs->data, reglen);
 		free(buf, M_CXGBE);
 		break;
@@ -5813,77 +8880,136 @@
 		rc = load_fw(sc, (struct t4_data *)data);
 		break;
 	case CHELSIO_T4_GET_MEM:
-		rc = read_card_mem(sc, (struct t4_mem_range *)data);
+		rc = read_card_mem(sc, 2, (struct t4_mem_range *)data);
 		break;
 	case CHELSIO_T4_GET_I2C:
 		rc = read_i2c(sc, (struct t4_i2c_data *)data);
 		break;
 	case CHELSIO_T4_CLEAR_STATS: {
-		int i;
+		int i, v;
 		u_int port_id = *(uint32_t *)data;
 		struct port_info *pi;
+		struct vi_info *vi;
 
 		if (port_id >= sc->params.nports)
 			return (EINVAL);
+		pi = sc->port[port_id];
+		if (pi == NULL)
+			return (EIO);
 
 		/* MAC stats */
-		t4_clr_port_stats(sc, port_id);
+		t4_clr_port_stats(sc, pi->tx_chan);
+		pi->tx_parse_error = 0;
+		mtx_lock(&sc->reg_lock);
+		for_each_vi(pi, v, vi) {
+			if (vi->flags & VI_INIT_DONE)
+				t4_clr_vi_stats(sc, vi->viid);
+		}
+		mtx_unlock(&sc->reg_lock);
 
-		pi = sc->port[port_id];
-		if (pi->flags & PORT_INIT_DONE) {
-			struct sge_rxq *rxq;
-			struct sge_txq *txq;
-			struct sge_wrq *wrq;
+		/*
+		 * Since this command accepts a port, clear stats for
+		 * all VIs on this port.
+		 */
+		for_each_vi(pi, v, vi) {
+			if (vi->flags & VI_INIT_DONE) {
+				struct sge_rxq *rxq;
+				struct sge_txq *txq;
+				struct sge_wrq *wrq;
 
-			for_each_rxq(pi, i, rxq) {
+				for_each_rxq(vi, i, rxq) {
 #if defined(INET) || defined(INET6)
-				rxq->lro.lro_queued = 0;
-				rxq->lro.lro_flushed = 0;
+					rxq->lro.lro_queued = 0;
+					rxq->lro.lro_flushed = 0;
 #endif
-				rxq->rxcsum = 0;
-				rxq->vlan_extraction = 0;
-			}
+					rxq->rxcsum = 0;
+					rxq->vlan_extraction = 0;
+				}
 
-			for_each_txq(pi, i, txq) {
-				txq->txcsum = 0;
-				txq->tso_wrs = 0;
-				txq->vlan_insertion = 0;
-				txq->imm_wrs = 0;
-				txq->sgl_wrs = 0;
-				txq->txpkt_wrs = 0;
-				txq->txpkts_wrs = 0;
-				txq->txpkts_pkts = 0;
-				txq->br->br_drops = 0;
-				txq->no_dmamap = 0;
-				txq->no_desc = 0;
-			}
+				for_each_txq(vi, i, txq) {
+					txq->txcsum = 0;
+					txq->tso_wrs = 0;
+					txq->vlan_insertion = 0;
+					txq->imm_wrs = 0;
+					txq->sgl_wrs = 0;
+					txq->txpkt_wrs = 0;
+					txq->txpkts0_wrs = 0;
+					txq->txpkts1_wrs = 0;
+					txq->txpkts0_pkts = 0;
+					txq->txpkts1_pkts = 0;
+					mp_ring_reset_stats(txq->r);
+				}
 
 #ifdef TCP_OFFLOAD
-			/* nothing to clear for each ofld_rxq */
+				/* nothing to clear for each ofld_rxq */
 
-			for_each_ofld_txq(pi, i, wrq) {
-				wrq->tx_wrs = 0;
-				wrq->no_desc = 0;
+				for_each_ofld_txq(vi, i, wrq) {
+					wrq->tx_wrs_direct = 0;
+					wrq->tx_wrs_copied = 0;
+				}
+#endif
+
+				if (IS_MAIN_VI(vi)) {
+					wrq = &sc->sge.ctrlq[pi->port_id];
+					wrq->tx_wrs_direct = 0;
+					wrq->tx_wrs_copied = 0;
+				}
 			}
-#endif
-			wrq = &sc->sge.ctrlq[pi->port_id];
-			wrq->tx_wrs = 0;
-			wrq->no_desc = 0;
 		}
 		break;
 	}
+	case CHELSIO_T4_SCHED_CLASS:
+		rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
+		break;
+	case CHELSIO_T4_SCHED_QUEUE:
+		rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
+		break;
+	case CHELSIO_T4_GET_TRACER:
+		rc = t4_get_tracer(sc, (struct t4_tracer *)data);
+		break;
+	case CHELSIO_T4_SET_TRACER:
+		rc = t4_set_tracer(sc, (struct t4_tracer *)data);
+		break;
+	case CHELSIO_T4_LOAD_CFG:
+		rc = load_cfg(sc, (struct t4_data *)data);
+		break;
 	default:
-		rc = EINVAL;
+		rc = ENOTTY;
 	}
 
 	return (rc);
 }
 
+void
+t4_db_full(struct adapter *sc)
+{
+
+	CXGBE_UNIMPLEMENTED(__func__);
+}
+
+void
+t4_db_dropped(struct adapter *sc)
+{
+
+	CXGBE_UNIMPLEMENTED(__func__);
+}
+
 #ifdef TCP_OFFLOAD
+void
+t4_iscsi_init(struct adapter *sc, u_int tag_mask, const u_int *pgsz_order)
+{
+
+	t4_write_reg(sc, A_ULP_RX_ISCSI_TAGMASK, tag_mask);
+	t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, V_HPZ0(pgsz_order[0]) |
+		V_HPZ1(pgsz_order[1]) | V_HPZ2(pgsz_order[2]) |
+		V_HPZ3(pgsz_order[3]));
+}
+
 static int
-toe_capability(struct port_info *pi, int enable)
+toe_capability(struct vi_info *vi, int enable)
 {
 	int rc;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
@@ -5892,16 +9018,34 @@
 		return (ENODEV);
 
 	if (enable) {
-		if (!(sc->flags & FULL_INIT_DONE)) {
-			rc = cxgbe_init_synchronized(pi);
+		if ((vi->ifp->if_capenable & IFCAP_TOE) != 0) {
+			/* TOE is already enabled. */
+			return (0);
+		}
+
+		/*
+		 * We need the port's queues around so that we're able to send
+		 * and receive CPLs to/from the TOE even if the ifnet for this
+		 * port has never been UP'd administratively.
+		 */
+		if (!(vi->flags & VI_INIT_DONE)) {
+			rc = vi_full_init(vi);
 			if (rc)
 				return (rc);
 		}
+		if (!(pi->vi[0].flags & VI_INIT_DONE)) {
+			rc = vi_full_init(&pi->vi[0]);
+			if (rc)
+				return (rc);
+		}
 
-		if (isset(&sc->offload_map, pi->port_id))
+		if (isset(&sc->offload_map, pi->port_id)) {
+			/* TOE is enabled on another VI of this port. */
+			pi->uld_vis++;
 			return (0);
+		}
 
-		if (!(sc->flags & TOM_INIT_DONE)) {
+		if (!uld_active(sc, ULD_TOM)) {
 			rc = t4_activate_uld(sc, ULD_TOM);
 			if (rc == EAGAIN) {
 				log(LOG_WARNING,
@@ -5912,16 +9056,25 @@
 				return (rc);
 			KASSERT(sc->tom_softc != NULL,
 			    ("%s: TOM activated but softc NULL", __func__));
-			KASSERT(sc->flags & TOM_INIT_DONE,
+			KASSERT(uld_active(sc, ULD_TOM),
 			    ("%s: TOM activated but flag not set", __func__));
 		}
 
+		/* Activate iWARP and iSCSI too, if the modules are loaded. */
+		if (!uld_active(sc, ULD_IWARP))
+			(void) t4_activate_uld(sc, ULD_IWARP);
+		if (!uld_active(sc, ULD_ISCSI))
+			(void) t4_activate_uld(sc, ULD_ISCSI);
+
+		pi->uld_vis++;
 		setbit(&sc->offload_map, pi->port_id);
 	} else {
-		if (!isset(&sc->offload_map, pi->port_id))
+		pi->uld_vis--;
+
+		if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
 			return (0);
 
-		KASSERT(sc->flags & TOM_INIT_DONE,
+		KASSERT(uld_active(sc, ULD_TOM),
 		    ("%s: TOM never initialized?", __func__));
 		clrbit(&sc->offload_map, pi->port_id);
 	}
@@ -5938,7 +9091,7 @@
 	int rc = 0;
 	struct uld_info *u;
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_xlock(&t4_uld_list_lock);
 	SLIST_FOREACH(u, &t4_uld_list, link) {
 	    if (u->uld_id == ui->uld_id) {
 		    rc = EEXIST;
@@ -5949,7 +9102,7 @@
 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
 	ui->refcount = 0;
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_xunlock(&t4_uld_list_lock);
 	return (rc);
 }
 
@@ -5959,7 +9112,7 @@
 	int rc = EINVAL;
 	struct uld_info *u;
 
-	mtx_lock(&t4_uld_list_lock);
+	sx_xlock(&t4_uld_list_lock);
 
 	SLIST_FOREACH(u, &t4_uld_list, link) {
 	    if (u == ui) {
@@ -5974,7 +9127,7 @@
 	    }
 	}
 done:
-	mtx_unlock(&t4_uld_list_lock);
+	sx_xunlock(&t4_uld_list_lock);
 	return (rc);
 }
 
@@ -5981,24 +9134,36 @@
 int
 t4_activate_uld(struct adapter *sc, int id)
 {
-	int rc = EAGAIN;
+	int rc;
 	struct uld_info *ui;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
-	mtx_lock(&t4_uld_list_lock);
+	if (id < 0 || id > ULD_MAX)
+		return (EINVAL);
+	rc = EAGAIN;	/* kldoad the module with this ULD and try again. */
 
+	sx_slock(&t4_uld_list_lock);
+
 	SLIST_FOREACH(ui, &t4_uld_list, link) {
 		if (ui->uld_id == id) {
+			if (!(sc->flags & FULL_INIT_DONE)) {
+				rc = adapter_full_init(sc);
+				if (rc != 0)
+					break;
+			}
+
 			rc = ui->activate(sc);
-			if (rc == 0)
+			if (rc == 0) {
+				setbit(&sc->active_ulds, id);
 				ui->refcount++;
-			goto done;
+			}
+			break;
 		}
 	}
-done:
-	mtx_unlock(&t4_uld_list_lock);
 
+	sx_sunlock(&t4_uld_list_lock);
+
 	return (rc);
 }
 
@@ -6005,29 +9170,60 @@
 int
 t4_deactivate_uld(struct adapter *sc, int id)
 {
-	int rc = EINVAL;
+	int rc;
 	struct uld_info *ui;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
-	mtx_lock(&t4_uld_list_lock);
+	if (id < 0 || id > ULD_MAX)
+		return (EINVAL);
+	rc = ENXIO;
 
+	sx_slock(&t4_uld_list_lock);
+
 	SLIST_FOREACH(ui, &t4_uld_list, link) {
 		if (ui->uld_id == id) {
 			rc = ui->deactivate(sc);
-			if (rc == 0)
+			if (rc == 0) {
+				clrbit(&sc->active_ulds, id);
 				ui->refcount--;
-			goto done;
+			}
+			break;
 		}
 	}
-done:
-	mtx_unlock(&t4_uld_list_lock);
 
+	sx_sunlock(&t4_uld_list_lock);
+
 	return (rc);
 }
+
+int
+uld_active(struct adapter *sc, int uld_id)
+{
+
+	MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
+
+	return (isset(&sc->active_ulds, uld_id));
+}
 #endif
 
 /*
+ * t  = ptr to tunable.
+ * nc = number of CPUs.
+ * c  = compiled in default for that tunable.
+ */
+static void
+calculate_nqueues(int *t, int nc, const int c)
+{
+	int nq;
+
+	if (*t > 0)
+		return;
+	nq = *t < 0 ? -*t : c;
+	*t = min(nc, nq);
+}
+
+/*
  * Come up with reasonable defaults for some of the tunables, provided they're
  * not set by the user (in which case we'll use the values as is).
  */
@@ -6036,38 +9232,81 @@
 {
 	int nc = mp_ncpus;	/* our snapshot of the number of CPUs */
 
-	if (t4_ntxq10g < 1)
-		t4_ntxq10g = min(nc, NTXQ_10G);
+	if (t4_ntxq10g < 1) {
+#ifdef RSS
+		t4_ntxq10g = rss_getnumbuckets();
+#else
+		calculate_nqueues(&t4_ntxq10g, nc, NTXQ_10G);
+#endif
+	}
 
-	if (t4_ntxq1g < 1)
-		t4_ntxq1g = min(nc, NTXQ_1G);
+	if (t4_ntxq1g < 1) {
+#ifdef RSS
+		/* XXX: way too many for 1GbE? */
+		t4_ntxq1g = rss_getnumbuckets();
+#else
+		calculate_nqueues(&t4_ntxq1g, nc, NTXQ_1G);
+#endif
+	}
 
-	if (t4_nrxq10g < 1)
-		t4_nrxq10g = min(nc, NRXQ_10G);
+	calculate_nqueues(&t4_ntxq_vi, nc, NTXQ_VI);
 
-	if (t4_nrxq1g < 1)
-		t4_nrxq1g = min(nc, NRXQ_1G);
+	if (t4_nrxq10g < 1) {
+#ifdef RSS
+		t4_nrxq10g = rss_getnumbuckets();
+#else
+		calculate_nqueues(&t4_nrxq10g, nc, NRXQ_10G);
+#endif
+	}
 
-#ifdef TCP_OFFLOAD
-	if (t4_nofldtxq10g < 1)
-		t4_nofldtxq10g = min(nc, NOFLDTXQ_10G);
+	if (t4_nrxq1g < 1) {
+#ifdef RSS
+		/* XXX: way too many for 1GbE? */
+		t4_nrxq1g = rss_getnumbuckets();
+#else
+		calculate_nqueues(&t4_nrxq1g, nc, NRXQ_1G);
+#endif
+	}
 
-	if (t4_nofldtxq1g < 1)
-		t4_nofldtxq1g = min(nc, NOFLDTXQ_1G);
+	calculate_nqueues(&t4_nrxq_vi, nc, NRXQ_VI);
 
-	if (t4_nofldrxq10g < 1)
-		t4_nofldrxq10g = min(nc, NOFLDRXQ_10G);
+#ifdef TCP_OFFLOAD
+	calculate_nqueues(&t4_nofldtxq10g, nc, NOFLDTXQ_10G);
+	calculate_nqueues(&t4_nofldtxq1g, nc, NOFLDTXQ_1G);
+	calculate_nqueues(&t4_nofldtxq_vi, nc, NOFLDTXQ_VI);
+	calculate_nqueues(&t4_nofldrxq10g, nc, NOFLDRXQ_10G);
+	calculate_nqueues(&t4_nofldrxq1g, nc, NOFLDRXQ_1G);
+	calculate_nqueues(&t4_nofldrxq_vi, nc, NOFLDRXQ_VI);
 
-	if (t4_nofldrxq1g < 1)
-		t4_nofldrxq1g = min(nc, NOFLDRXQ_1G);
-
 	if (t4_toecaps_allowed == -1)
 		t4_toecaps_allowed = FW_CAPS_CONFIG_TOE;
+
+	if (t4_rdmacaps_allowed == -1) {
+		t4_rdmacaps_allowed = FW_CAPS_CONFIG_RDMA_RDDP |
+		    FW_CAPS_CONFIG_RDMA_RDMAC;
+	}
+
+	if (t4_iscsicaps_allowed == -1) {
+		t4_iscsicaps_allowed = FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU |
+		    FW_CAPS_CONFIG_ISCSI_TARGET_PDU |
+		    FW_CAPS_CONFIG_ISCSI_T10DIF;
+	}
 #else
 	if (t4_toecaps_allowed == -1)
 		t4_toecaps_allowed = 0;
+
+	if (t4_rdmacaps_allowed == -1)
+		t4_rdmacaps_allowed = 0;
+
+	if (t4_iscsicaps_allowed == -1)
+		t4_iscsicaps_allowed = 0;
 #endif
 
+#ifdef DEV_NETMAP
+	calculate_nqueues(&t4_nnmtxq_vi, nc, NNMTXQ_VI);
+	calculate_nqueues(&t4_nnmrxq_vi, nc, NNMRXQ_VI);
+#endif
+
 	if (t4_tmr_idx_10g < 0 || t4_tmr_idx_10g >= SGE_NTIMERS)
 		t4_tmr_idx_10g = TMR_IDX_10G;
 
@@ -6091,42 +9330,255 @@
 	t4_intr_types &= INTR_MSIX | INTR_MSI | INTR_INTX;
 }
 
+#ifdef DDB
+static void
+t4_dump_tcb(struct adapter *sc, int tid)
+{
+	uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
+
+	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
+	save = t4_read_reg(sc, reg);
+	base = sc->memwin[2].mw_base;
+
+	/* Dump TCB for the tid */
+	tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
+	tcb_addr += tid * TCB_SIZE;
+
+	if (is_t4(sc)) {
+		pf = 0;
+		win_pos = tcb_addr & ~0xf;	/* start must be 16B aligned */
+	} else {
+		pf = V_PFNUM(sc->pf);
+		win_pos = tcb_addr & ~0x7f;	/* start must be 128B aligned */
+	}
+	t4_write_reg(sc, reg, win_pos | pf);
+	t4_read_reg(sc, reg);
+
+	off = tcb_addr - win_pos;
+	for (i = 0; i < 4; i++) {
+		uint32_t buf[8];
+		for (j = 0; j < 8; j++, off += 4)
+			buf[j] = htonl(t4_read_reg(sc, base + off));
+
+		db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
+		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
+		    buf[7]);
+	}
+
+	t4_write_reg(sc, reg, save);
+	t4_read_reg(sc, reg);
+}
+
+static void
+t4_dump_devlog(struct adapter *sc)
+{
+	struct devlog_params *dparams = &sc->params.devlog;
+	struct fw_devlog_e e;
+	int i, first, j, m, nentries, rc;
+	uint64_t ftstamp = UINT64_MAX;
+
+	if (dparams->start == 0) {
+		db_printf("devlog params not valid\n");
+		return;
+	}
+
+	nentries = dparams->size / sizeof(struct fw_devlog_e);
+	m = fwmtype_to_hwmtype(dparams->memtype);
+
+	/* Find the first entry. */
+	first = -1;
+	for (i = 0; i < nentries && !db_pager_quit; i++) {
+		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
+		    sizeof(e), (void *)&e);
+		if (rc != 0)
+			break;
+
+		if (e.timestamp == 0)
+			break;
+
+		e.timestamp = be64toh(e.timestamp);
+		if (e.timestamp < ftstamp) {
+			ftstamp = e.timestamp;
+			first = i;
+		}
+	}
+
+	if (first == -1)
+		return;
+
+	i = first;
+	do {
+		rc = -t4_mem_read(sc, m, dparams->start + i * sizeof(e),
+		    sizeof(e), (void *)&e);
+		if (rc != 0)
+			return;
+
+		if (e.timestamp == 0)
+			return;
+
+		e.timestamp = be64toh(e.timestamp);
+		e.seqno = be32toh(e.seqno);
+		for (j = 0; j < 8; j++)
+			e.params[j] = be32toh(e.params[j]);
+
+		db_printf("%10d  %15ju  %8s  %8s  ",
+		    e.seqno, e.timestamp,
+		    (e.level < nitems(devlog_level_strings) ?
+			devlog_level_strings[e.level] : "UNKNOWN"),
+		    (e.facility < nitems(devlog_facility_strings) ?
+			devlog_facility_strings[e.facility] : "UNKNOWN"));
+		db_printf(e.fmt, e.params[0], e.params[1], e.params[2],
+		    e.params[3], e.params[4], e.params[5], e.params[6],
+		    e.params[7]);
+
+		if (++i == nentries)
+			i = 0;
+	} while (i != first && !db_pager_quit);
+}
+
+static struct command_table db_t4_table = LIST_HEAD_INITIALIZER(db_t4_table);
+_DB_SET(_show, t4, NULL, db_show_table, 0, &db_t4_table);
+
+DB_FUNC(devlog, db_show_devlog, db_t4_table, CS_OWN, NULL)
+{
+	device_t dev;
+	int t;
+	bool valid;
+
+	valid = false;
+	t = db_read_token();
+	if (t == tIDENT) {
+		dev = device_lookup_by_name(db_tok_string);
+		valid = true;
+	}
+	db_skip_to_eol();
+	if (!valid) {
+		db_printf("usage: show t4 devlog <nexus>\n");
+		return;
+	}
+
+	if (dev == NULL) {
+		db_printf("device not found\n");
+		return;
+	}
+
+	t4_dump_devlog(device_get_softc(dev));
+}
+
+DB_FUNC(tcb, db_show_t4tcb, db_t4_table, CS_OWN, NULL)
+{
+	device_t dev;
+	int radix, tid, t;
+	bool valid;
+
+	valid = false;
+	radix = db_radix;
+	db_radix = 10;
+	t = db_read_token();
+	if (t == tIDENT) {
+		dev = device_lookup_by_name(db_tok_string);
+		t = db_read_token();
+		if (t == tNUMBER) {
+			tid = db_tok_number;
+			valid = true;
+		}
+	}	
+	db_radix = radix;
+	db_skip_to_eol();
+	if (!valid) {
+		db_printf("usage: show t4 tcb <nexus> <tid>\n");
+		return;
+	}
+
+	if (dev == NULL) {
+		db_printf("device not found\n");
+		return;
+	}
+	if (tid < 0) {
+		db_printf("invalid tid\n");
+		return;
+	}
+
+	t4_dump_tcb(device_get_softc(dev), tid);
+}
+#endif
+
+static struct sx mlu;	/* mod load unload */
+SX_SYSINIT(cxgbe_mlu, &mlu, "cxgbe mod load/unload");
+
 static int
-t4_mod_event(module_t mod, int cmd, void *arg)
+mod_event(module_t mod, int cmd, void *arg)
 {
 	int rc = 0;
+	static int loaded = 0;
 
 	switch (cmd) {
 	case MOD_LOAD:
-		t4_sge_modload();
-		mtx_init(&t4_list_lock, "T4 adapters", 0, MTX_DEF);
-		SLIST_INIT(&t4_list);
+		sx_xlock(&mlu);
+		if (loaded++ == 0) {
+			t4_sge_modload();
+			t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl);
+			t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl);
+			t4_register_cpl_handler(CPL_TRACE_PKT, t4_trace_pkt);
+			t4_register_cpl_handler(CPL_T5_TRACE_PKT, t5_trace_pkt);
+			sx_init(&t4_list_lock, "T4/T5 adapters");
+			SLIST_INIT(&t4_list);
 #ifdef TCP_OFFLOAD
-		mtx_init(&t4_uld_list_lock, "T4 ULDs", 0, MTX_DEF);
-		SLIST_INIT(&t4_uld_list);
+			sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
+			SLIST_INIT(&t4_uld_list);
 #endif
-		tweak_tunables();
+			t4_tracer_modload();
+			tweak_tunables();
+		}
+		sx_xunlock(&mlu);
 		break;
 
 	case MOD_UNLOAD:
+		sx_xlock(&mlu);
+		if (--loaded == 0) {
+			int tries;
+
+			sx_slock(&t4_list_lock);
+			if (!SLIST_EMPTY(&t4_list)) {
+				rc = EBUSY;
+				sx_sunlock(&t4_list_lock);
+				goto done_unload;
+			}
 #ifdef TCP_OFFLOAD
-		mtx_lock(&t4_uld_list_lock);
-		if (!SLIST_EMPTY(&t4_uld_list)) {
-			rc = EBUSY;
-			mtx_unlock(&t4_uld_list_lock);
-			break;
-		}
-		mtx_unlock(&t4_uld_list_lock);
-		mtx_destroy(&t4_uld_list_lock);
+			sx_slock(&t4_uld_list_lock);
+			if (!SLIST_EMPTY(&t4_uld_list)) {
+				rc = EBUSY;
+				sx_sunlock(&t4_uld_list_lock);
+				sx_sunlock(&t4_list_lock);
+				goto done_unload;
+			}
 #endif
-		mtx_lock(&t4_list_lock);
-		if (!SLIST_EMPTY(&t4_list)) {
-			rc = EBUSY;
-			mtx_unlock(&t4_list_lock);
-			break;
+			tries = 0;
+			while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
+				uprintf("%ju clusters with custom free routine "
+				    "still is use.\n", t4_sge_extfree_refs());
+				pause("t4unload", 2 * hz);
+			}
+#ifdef TCP_OFFLOAD
+			sx_sunlock(&t4_uld_list_lock);
+#endif
+			sx_sunlock(&t4_list_lock);
+
+			if (t4_sge_extfree_refs() == 0) {
+				t4_tracer_modunload();
+#ifdef TCP_OFFLOAD
+				sx_destroy(&t4_uld_list_lock);
+#endif
+				sx_destroy(&t4_list_lock);
+				t4_sge_modunload();
+				loaded = 0;
+			} else {
+				rc = EBUSY;
+				loaded++;	/* undo earlier decrement */
+			}
 		}
-		mtx_unlock(&t4_list_lock);
-		mtx_destroy(&t4_list_lock);
+done_unload:
+		sx_xunlock(&mlu);
 		break;
 	}
 
@@ -6133,11 +9585,39 @@
 	return (rc);
 }
 
-static devclass_t t4_devclass;
-static devclass_t cxgbe_devclass;
+static devclass_t t4_devclass, t5_devclass, t6_devclass;
+static devclass_t cxgbe_devclass, cxl_devclass, cc_devclass;
+static devclass_t vcxgbe_devclass, vcxl_devclass, vcc_devclass;
 
-DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, t4_mod_event, 0);
+DRIVER_MODULE(t4nex, pci, t4_driver, t4_devclass, mod_event, 0);
 MODULE_VERSION(t4nex, 1);
+MODULE_DEPEND(t4nex, firmware, 1, 1, 1);
 
+DRIVER_MODULE(t5nex, pci, t5_driver, t5_devclass, mod_event, 0);
+MODULE_VERSION(t5nex, 1);
+MODULE_DEPEND(t5nex, firmware, 1, 1, 1);
+
+DRIVER_MODULE(t6nex, pci, t6_driver, t6_devclass, mod_event, 0);
+MODULE_VERSION(t6nex, 1);
+MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
 DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, cxgbe_devclass, 0, 0);
 MODULE_VERSION(cxgbe, 1);
+
+DRIVER_MODULE(cxl, t5nex, cxl_driver, cxl_devclass, 0, 0);
+MODULE_VERSION(cxl, 1);
+
+DRIVER_MODULE(cc, t6nex, cc_driver, cc_devclass, 0, 0);
+MODULE_VERSION(cc, 1);
+
+DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, vcxgbe_devclass, 0, 0);
+MODULE_VERSION(vcxgbe, 1);
+
+DRIVER_MODULE(vcxl, cxl, vcxl_driver, vcxl_devclass, 0, 0);
+MODULE_VERSION(vcxl, 1);
+
+DRIVER_MODULE(vcc, cc, vcc_driver, vcc_devclass, 0, 0);
+MODULE_VERSION(vcc, 1);

Added: trunk/sys/dev/cxgbe/t4_mp_ring.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_mp_ring.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_mp_ring.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,370 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_mp_ring.c 284052 2015-06-06 09:28:40Z np $");
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/counter.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <machine/cpu.h>
+
+#include "t4_mp_ring.h"
+
+#if defined(__i386__)
+#define atomic_cmpset_acq_64 atomic_cmpset_64
+#define atomic_cmpset_rel_64 atomic_cmpset_64
+#endif
+
+union ring_state {
+	struct {
+		uint16_t pidx_head;
+		uint16_t pidx_tail;
+		uint16_t cidx;
+		uint16_t flags;
+	};
+	uint64_t state;
+};
+
+enum {
+	IDLE = 0,	/* consumer ran to completion, nothing more to do. */
+	BUSY,		/* consumer is running already, or will be shortly. */
+	STALLED,	/* consumer stopped due to lack of resources. */
+	ABDICATED,	/* consumer stopped even though there was work to be
+			   done because it wants another thread to take over. */
+};
+
+static inline uint16_t
+space_available(struct mp_ring *r, union ring_state s)
+{
+	uint16_t x = r->size - 1;
+
+	if (s.cidx == s.pidx_head)
+		return (x);
+	else if (s.cidx > s.pidx_head)
+		return (s.cidx - s.pidx_head - 1);
+	else
+		return (x - s.pidx_head + s.cidx);
+}
+
+static inline uint16_t
+increment_idx(struct mp_ring *r, uint16_t idx, uint16_t n)
+{
+	int x = r->size - idx;
+
+	MPASS(x > 0);
+	return (x > n ? idx + n : n - x);
+}
+
+/* Consumer is about to update the ring's state to s */
+static inline uint16_t
+state_to_flags(union ring_state s, int abdicate)
+{
+
+	if (s.cidx == s.pidx_tail)
+		return (IDLE);
+	else if (abdicate && s.pidx_tail != s.pidx_head)
+		return (ABDICATED);
+
+	return (BUSY);
+}
+
+/*
+ * Caller passes in a state, with a guarantee that there is work to do and that
+ * all items up to the pidx_tail in the state are visible.
+ */
+static void
+drain_ring(struct mp_ring *r, union ring_state os, uint16_t prev, int budget)
+{
+	union ring_state ns;
+	int n, pending, total;
+	uint16_t cidx = os.cidx;
+	uint16_t pidx = os.pidx_tail;
+
+	MPASS(os.flags == BUSY);
+	MPASS(cidx != pidx);
+
+	if (prev == IDLE)
+		counter_u64_add(r->starts, 1);
+	pending = 0;
+	total = 0;
+
+	while (cidx != pidx) {
+
+		/* Items from cidx to pidx are available for consumption. */
+		n = r->drain(r, cidx, pidx);
+		if (n == 0) {
+			critical_enter();
+			do {
+				os.state = ns.state = r->state;
+				ns.cidx = cidx;
+				ns.flags = STALLED;
+			} while (atomic_cmpset_64(&r->state, os.state,
+			    ns.state) == 0);
+			critical_exit();
+			if (prev != STALLED)
+				counter_u64_add(r->stalls, 1);
+			else if (total > 0) {
+				counter_u64_add(r->restarts, 1);
+				counter_u64_add(r->stalls, 1);
+			}
+			break;
+		}
+		cidx = increment_idx(r, cidx, n);
+		pending += n;
+		total += n;
+
+		/*
+		 * We update the cidx only if we've caught up with the pidx, the
+		 * real cidx is getting too far ahead of the one visible to
+		 * everyone else, or we have exceeded our budget.
+		 */
+		if (cidx != pidx && pending < 64 && total < budget)
+			continue;
+		critical_enter();
+		do {
+			os.state = ns.state = r->state;
+			ns.cidx = cidx;
+			ns.flags = state_to_flags(ns, total >= budget);
+		} while (atomic_cmpset_acq_64(&r->state, os.state, ns.state) == 0);
+		critical_exit();
+
+		if (ns.flags == ABDICATED)
+			counter_u64_add(r->abdications, 1);
+		if (ns.flags != BUSY) {
+			/* Wrong loop exit if we're going to stall. */
+			MPASS(ns.flags != STALLED);
+			if (prev == STALLED) {
+				MPASS(total > 0);
+				counter_u64_add(r->restarts, 1);
+			}
+			break;
+		}
+
+		/*
+		 * The acquire style atomic above guarantees visibility of items
+		 * associated with any pidx change that we notice here.
+		 */
+		pidx = ns.pidx_tail;
+		pending = 0;
+	}
+}
+
+int
+mp_ring_alloc(struct mp_ring **pr, int size, void *cookie, ring_drain_t drain,
+    ring_can_drain_t can_drain, struct malloc_type *mt, int flags)
+{
+	struct mp_ring *r;
+
+	/* All idx are 16b so size can be 65536 at most */
+	if (pr == NULL || size < 2 || size > 65536 || drain == NULL ||
+	    can_drain == NULL)
+		return (EINVAL);
+	*pr = NULL;
+	flags &= M_NOWAIT | M_WAITOK;
+	MPASS(flags != 0);
+
+	r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
+	if (r == NULL)
+		return (ENOMEM);
+	r->size = size;
+	r->cookie = cookie;
+	r->mt = mt;
+	r->drain = drain;
+	r->can_drain = can_drain;
+	r->enqueues = counter_u64_alloc(flags);
+	r->drops = counter_u64_alloc(flags);
+	r->starts = counter_u64_alloc(flags);
+	r->stalls = counter_u64_alloc(flags);
+	r->restarts = counter_u64_alloc(flags);
+	r->abdications = counter_u64_alloc(flags);
+	if (r->enqueues == NULL || r->drops == NULL || r->starts == NULL ||
+	    r->stalls == NULL || r->restarts == NULL ||
+	    r->abdications == NULL) {
+		mp_ring_free(r);
+		return (ENOMEM);
+	}
+
+	*pr = r;
+	return (0);
+}
+
+void
+
+mp_ring_free(struct mp_ring *r)
+{
+
+	if (r == NULL)
+		return;
+
+	if (r->enqueues != NULL)
+		counter_u64_free(r->enqueues);
+	if (r->drops != NULL)
+		counter_u64_free(r->drops);
+	if (r->starts != NULL)
+		counter_u64_free(r->starts);
+	if (r->stalls != NULL)
+		counter_u64_free(r->stalls);
+	if (r->restarts != NULL)
+		counter_u64_free(r->restarts);
+	if (r->abdications != NULL)
+		counter_u64_free(r->abdications);
+
+	free(r, r->mt);
+}
+
+/*
+ * Enqueue n items and maybe drain the ring for some time.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
+{
+	union ring_state os, ns;
+	uint16_t pidx_start, pidx_stop;
+	int i;
+
+	MPASS(items != NULL);
+	MPASS(n > 0);
+
+	/*
+	 * Reserve room for the new items.  Our reservation, if successful, is
+	 * from 'pidx_start' to 'pidx_stop'.
+	 */
+	for (;;) {
+		os.state = r->state;
+		if (n >= space_available(r, os)) {
+			counter_u64_add(r->drops, n);
+			MPASS(os.flags != IDLE);
+			if (os.flags == STALLED)
+				mp_ring_check_drainage(r, 0);
+			return (ENOBUFS);
+		}
+		ns.state = os.state;
+		ns.pidx_head = increment_idx(r, os.pidx_head, n);
+		critical_enter();
+		if (atomic_cmpset_64(&r->state, os.state, ns.state))
+			break;
+		critical_exit();
+		cpu_spinwait();
+	}
+	pidx_start = os.pidx_head;
+	pidx_stop = ns.pidx_head;
+
+	/*
+	 * Wait for other producers who got in ahead of us to enqueue their
+	 * items, one producer at a time.  It is our turn when the ring's
+	 * pidx_tail reaches the begining of our reservation (pidx_start).
+	 */
+	while (ns.pidx_tail != pidx_start) {
+		cpu_spinwait();
+		ns.state = r->state;
+	}
+
+	/* Now it is our turn to fill up the area we reserved earlier. */
+	i = pidx_start;
+	do {
+		r->items[i] = *items++;
+		if (__predict_false(++i == r->size))
+			i = 0;
+	} while (i != pidx_stop);
+
+	/*
+	 * Update the ring's pidx_tail.  The release style atomic guarantees
+	 * that the items are visible to any thread that sees the updated pidx.
+	 */
+	do {
+		os.state = ns.state = r->state;
+		ns.pidx_tail = pidx_stop;
+		ns.flags = BUSY;
+	} while (atomic_cmpset_rel_64(&r->state, os.state, ns.state) == 0);
+	critical_exit();
+	counter_u64_add(r->enqueues, n);
+
+	/*
+	 * Turn into a consumer if some other thread isn't active as a consumer
+	 * already.
+	 */
+	if (os.flags != BUSY)
+		drain_ring(r, ns, os.flags, budget);
+
+	return (0);
+}
+
+void
+mp_ring_check_drainage(struct mp_ring *r, int budget)
+{
+	union ring_state os, ns;
+
+	os.state = r->state;
+	if (os.flags != STALLED || os.pidx_head != os.pidx_tail ||
+	    r->can_drain(r) == 0)
+		return;
+
+	MPASS(os.cidx != os.pidx_tail);	/* implied by STALLED */
+	ns.state = os.state;
+	ns.flags = BUSY;
+
+	/*
+	 * The acquire style atomic guarantees visibility of items associated
+	 * with the pidx that we read here.
+	 */
+	if (!atomic_cmpset_acq_64(&r->state, os.state, ns.state))
+		return;
+
+	drain_ring(r, ns, os.flags, budget);
+}
+
+void
+mp_ring_reset_stats(struct mp_ring *r)
+{
+
+	counter_u64_zero(r->enqueues);
+	counter_u64_zero(r->drops);
+	counter_u64_zero(r->starts);
+	counter_u64_zero(r->stalls);
+	counter_u64_zero(r->restarts);
+	counter_u64_zero(r->abdications);
+}
+
+int
+mp_ring_is_idle(struct mp_ring *r)
+{
+	union ring_state s;
+
+	s.state = r->state;
+	if (s.pidx_head == s.pidx_tail && s.pidx_tail == s.cidx &&
+	    s.flags == IDLE)
+		return (1);
+
+	return (0);
+}


Property changes on: trunk/sys/dev/cxgbe/t4_mp_ring.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/t4_mp_ring.h
===================================================================
--- trunk/sys/dev/cxgbe/t4_mp_ring.h	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_mp_ring.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,69 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/cxgbe/t4_mp_ring.h 284052 2015-06-06 09:28:40Z np $
+ *
+ */
+
+#ifndef __CXGBE_MP_RING_H
+#define __CXGBE_MP_RING_H
+
+#ifndef _KERNEL
+#error "no user-serviceable parts inside"
+#endif
+
+struct mp_ring;
+typedef u_int (*ring_drain_t)(struct mp_ring *, u_int, u_int);
+typedef u_int (*ring_can_drain_t)(struct mp_ring *);
+
+struct mp_ring {
+	volatile uint64_t	state __aligned(CACHE_LINE_SIZE);
+
+	int			size __aligned(CACHE_LINE_SIZE);
+	void *			cookie;
+	struct malloc_type *	mt;
+	ring_drain_t		drain;
+	ring_can_drain_t	can_drain;	/* cheap, may be unreliable */
+	counter_u64_t		enqueues;
+	counter_u64_t		drops;
+	counter_u64_t		starts;
+	counter_u64_t		stalls;
+	counter_u64_t		restarts;	/* recovered after stalling */
+	counter_u64_t		abdications;
+
+	void * volatile		items[] __aligned(CACHE_LINE_SIZE);
+};
+
+int mp_ring_alloc(struct mp_ring **, int, void *, ring_drain_t,
+    ring_can_drain_t, struct malloc_type *, int);
+void mp_ring_free(struct mp_ring *);
+int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+void mp_ring_check_drainage(struct mp_ring *, int);
+void mp_ring_reset_stats(struct mp_ring *);
+int mp_ring_is_idle(struct mp_ring *);
+
+#endif


Property changes on: trunk/sys/dev/cxgbe/t4_mp_ring.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/t4_netmap.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_netmap.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_netmap.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,1026 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2014 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_netmap.c 318826 2017-05-24 21:54:04Z np $");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#ifdef DEV_NETMAP
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/lock.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/selinfo.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <machine/bus.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_var.h>
+#include <net/if_clone.h>
+#include <net/if_types.h>
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+
+extern int fl_pad;	/* XXXNM */
+
+SYSCTL_NODE(_hw, OID_AUTO, cxgbe, CTLFLAG_RD, 0, "cxgbe netmap parameters");
+
+/*
+ * 0 = normal netmap rx
+ * 1 = black hole
+ * 2 = supermassive black hole (buffer packing enabled)
+ */
+int black_hole = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RDTUN, &black_hole, 0,
+    "Sink incoming packets.");
+
+int rx_ndesc = 256;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN,
+    &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated.");
+
+int holdoff_tmr_idx = 2;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN,
+    &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues.");
+
+/*
+ * Congestion drops.
+ * -1: no congestion feedback (not recommended).
+ *  0: backpressure the channel instead of dropping packets right away.
+ *  1: no backpressure, drop packets for the congested queue immediately.
+ */
+static int nm_cong_drop = 1;
+TUNABLE_INT("hw.cxgbe.nm_cong_drop", &nm_cong_drop);
+
+static int
+alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
+{
+	int rc, cntxt_id, i;
+	__be32 v;
+	struct adapter *sc = vi->pi->adapter;
+	struct sge_params *sp = &sc->params.sge;
+	struct netmap_adapter *na = NA(vi->ifp);
+	struct fw_iq_cmd c;
+
+	MPASS(na != NULL);
+	MPASS(nm_rxq->iq_desc != NULL);
+	MPASS(nm_rxq->fl_desc != NULL);
+
+	bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE);
+	bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len);
+
+	bzero(&c, sizeof(c));
+	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) |
+	    V_FW_IQ_CMD_VFN(0));
+	c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
+	    FW_LEN16(c));
+	if (vi->flags & INTR_RXQ) {
+		KASSERT(nm_rxq->intr_idx < sc->intr_count,
+		    ("%s: invalid direct intr_idx %d", __func__,
+		    nm_rxq->intr_idx));
+		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx);
+	} else {
+		CXGBE_UNIMPLEMENTED(__func__);	/* XXXNM: needs review */
+		v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx) |
+		    F_FW_IQ_CMD_IQANDST;
+	}
+	c.type_to_iqandstindex = htobe32(v |
+	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+	    V_FW_IQ_CMD_VIID(vi->viid) |
+	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
+	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(vi->pi->tx_chan) |
+	    F_FW_IQ_CMD_IQGTSMODE |
+	    V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
+	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
+	c.iqsize = htobe16(vi->qsize_rxq);
+	c.iqaddr = htobe64(nm_rxq->iq_ba);
+	if (cong >= 0) {
+		c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
+		    V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF |
+		    F_FW_IQ_CMD_FL0CONGEN);
+	}
+	c.iqns_to_fl0congen |=
+	    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
+		F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
+		(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
+		(black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0));
+	c.fl0dcaen_to_fl0cidxfthresh =
+	    htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
+		X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
+		V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
+		X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
+	c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE);
+	c.fl0addr = htobe64(nm_rxq->fl_ba);
+
+	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "failed to create netmap ingress queue: %d\n", rc);
+		return (rc);
+	}
+
+	nm_rxq->iq_cidx = 0;
+	MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE);
+	nm_rxq->iq_gen = F_RSPD_GEN;
+	nm_rxq->iq_cntxt_id = be16toh(c.iqid);
+	nm_rxq->iq_abs_id = be16toh(c.physiqid);
+	cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
+	if (cntxt_id >= sc->sge.niq) {
+		panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
+		    __func__, cntxt_id, sc->sge.niq - 1);
+	}
+	sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
+
+	nm_rxq->fl_cntxt_id = be16toh(c.fl0id);
+	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
+	MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
+	cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
+	if (cntxt_id >= sc->sge.neq) {
+		panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
+		    __func__, cntxt_id, sc->sge.neq - 1);
+	}
+	sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
+
+	nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) |
+	    sc->chip_params->sge_fl_db;
+
+	if (chip_id(sc) >= CHELSIO_T5 && cong >= 0) {
+		uint32_t param, val;
+
+		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+		    V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
+		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+		    V_FW_PARAMS_PARAM_YZ(nm_rxq->iq_cntxt_id);
+		if (cong == 0)
+			val = 1 << 19;
+		else {
+			val = 2 << 19;
+			for (i = 0; i < 4; i++) {
+				if (cong & (1 << i))
+					val |= 1 << (i << 2);
+			}
+		}
+
+		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+		if (rc != 0) {
+			/* report error but carry on */
+			device_printf(sc->dev,
+			    "failed to set congestion manager context for "
+			    "ingress queue %d: %d\n", nm_rxq->iq_cntxt_id, rc);
+		}
+	}
+
+	t4_write_reg(sc, sc->sge_gts_reg,
+	    V_INGRESSQID(nm_rxq->iq_cntxt_id) |
+	    V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
+
+	return (rc);
+}
+
+static int
+free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
+{
+	struct adapter *sc = vi->pi->adapter;
+	int rc;
+
+	rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+	    nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff);
+	if (rc != 0)
+		device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n",
+		    __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc);
+	return (rc);
+}
+
+static int
+alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
+{
+	int rc, cntxt_id;
+	size_t len;
+	struct adapter *sc = vi->pi->adapter;
+	struct netmap_adapter *na = NA(vi->ifp);
+	struct fw_eq_eth_cmd c;
+
+	MPASS(na != NULL);
+	MPASS(nm_txq->desc != NULL);
+
+	len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
+	bzero(nm_txq->desc, len);
+
+	bzero(&c, sizeof(c));
+	c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+	    F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
+	    V_FW_EQ_ETH_CMD_VFN(0));
+	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
+	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
+	c.fetchszm_to_iqid =
+	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+		V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+		V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
+	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+		      V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE));
+	c.eqaddr = htobe64(nm_txq->ba);
+
+	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
+	if (rc != 0) {
+		device_printf(vi->dev,
+		    "failed to create netmap egress queue: %d\n", rc);
+		return (rc);
+	}
+
+	nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
+	cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
+	if (cntxt_id >= sc->sge.neq)
+	    panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
+		cntxt_id, sc->sge.neq - 1);
+	sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
+
+	nm_txq->pidx = nm_txq->cidx = 0;
+	MPASS(nm_txq->sidx == na->num_tx_desc);
+	nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0;
+
+	nm_txq->doorbells = sc->doorbells;
+	if (isset(&nm_txq->doorbells, DOORBELL_UDB) ||
+	    isset(&nm_txq->doorbells, DOORBELL_UDBWC) ||
+	    isset(&nm_txq->doorbells, DOORBELL_WCWR)) {
+		uint32_t s_qpp = sc->params.sge.eq_s_qpp;
+		uint32_t mask = (1 << s_qpp) - 1;
+		volatile uint8_t *udb;
+
+		udb = sc->udbs_base + UDBS_DB_OFFSET;
+		udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT;
+		nm_txq->udb_qid = nm_txq->cntxt_id & mask;
+		if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
+	    		clrbit(&nm_txq->doorbells, DOORBELL_WCWR);
+		else {
+			udb += nm_txq->udb_qid << UDBS_SEG_SHIFT;
+			nm_txq->udb_qid = 0;
+		}
+		nm_txq->udb = (volatile void *)udb;
+	}
+
+	return (rc);
+}
+
+static int
+free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
+{
+	struct adapter *sc = vi->pi->adapter;
+	int rc;
+
+	rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id);
+	if (rc != 0)
+		device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__,
+		    nm_txq->cntxt_id, rc);
+	return (rc);
+}
+
+static int
+cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
+    struct netmap_adapter *na)
+{
+	struct netmap_slot *slot;
+	struct sge_nm_rxq *nm_rxq;
+	struct sge_nm_txq *nm_txq;
+	int rc, i, j, hwidx;
+	struct hw_buf_info *hwb;
+
+	ASSERT_SYNCHRONIZED_OP(sc);
+
+	if ((vi->flags & VI_INIT_DONE) == 0 ||
+	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+		return (EAGAIN);
+
+	hwb = &sc->sge.hw_buf_info[0];
+	for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
+		if (hwb->size == NETMAP_BUF_SIZE(na))
+			break;
+	}
+	if (i >= SGE_FLBUF_SIZES) {
+		if_printf(ifp, "no hwidx for netmap buffer size %d.\n",
+		    NETMAP_BUF_SIZE(na));
+		return (ENXIO);
+	}
+	hwidx = i;
+
+	/* Must set caps before calling netmap_reset */
+	nm_set_native_flags(na);
+
+	for_each_nm_rxq(vi, i, nm_rxq) {
+		struct irq *irq = &sc->irq[vi->first_intr + i];
+
+		alloc_nm_rxq_hwq(vi, nm_rxq, tnl_cong(vi->pi, nm_cong_drop));
+		nm_rxq->fl_hwidx = hwidx;
+		slot = netmap_reset(na, NR_RX, i, 0);
+		MPASS(slot != NULL);	/* XXXNM: error check, not assert */
+
+		/* We deal with 8 bufs at a time */
+		MPASS((na->num_rx_desc & 7) == 0);
+		MPASS(na->num_rx_desc == nm_rxq->fl_sidx);
+		for (j = 0; j < nm_rxq->fl_sidx; j++) {
+			uint64_t ba;
+
+			PNMB(na, &slot[j], &ba);
+			MPASS(ba != 0);
+			nm_rxq->fl_desc[j] = htobe64(ba | hwidx);
+		}
+		j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8;
+		MPASS((j & 7) == 0);
+		j /= 8;	/* driver pidx to hardware pidx */
+		wmb();
+		t4_write_reg(sc, sc->sge_kdoorbell_reg,
+		    nm_rxq->fl_db_val | V_PIDX(j));
+
+		atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON);
+	}
+
+	for_each_nm_txq(vi, i, nm_txq) {
+		alloc_nm_txq_hwq(vi, nm_txq);
+		slot = netmap_reset(na, NR_TX, i, 0);
+		MPASS(slot != NULL);	/* XXXNM: error check, not assert */
+	}
+
+	if (vi->nm_rss == NULL) {
+		vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE,
+		    M_ZERO | M_WAITOK);
+	}
+	for (i = 0; i < vi->rss_size;) {
+		for_each_nm_rxq(vi, j, nm_rxq) {
+			vi->nm_rss[i++] = nm_rxq->iq_abs_id;
+			if (i == vi->rss_size)
+				break;
+		}
+	}
+	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
+	    vi->nm_rss, vi->rss_size);
+	if (rc != 0)
+		if_printf(ifp, "netmap rss_config failed: %d\n", rc);
+
+	return (rc);
+}
+
+static int
+cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
+    struct netmap_adapter *na)
+{
+	int rc, i;
+	struct sge_nm_txq *nm_txq;
+	struct sge_nm_rxq *nm_rxq;
+
+	ASSERT_SYNCHRONIZED_OP(sc);
+
+	if ((vi->flags & VI_INIT_DONE) == 0)
+		return (0);
+
+	rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size,
+	    vi->rss, vi->rss_size);
+	if (rc != 0)
+		if_printf(ifp, "failed to restore RSS config: %d\n", rc);
+	nm_clear_native_flags(na);
+
+	for_each_nm_txq(vi, i, nm_txq) {
+		struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
+
+		/* Wait for hw pidx to catch up ... */
+		while (be16toh(nm_txq->pidx) != spg->pidx)
+			pause("nmpidx", 1);
+
+		/* ... and then for the cidx. */
+		while (spg->pidx != spg->cidx)
+			pause("nmcidx", 1);
+
+		free_nm_txq_hwq(vi, nm_txq);
+	}
+	for_each_nm_rxq(vi, i, nm_rxq) {
+		struct irq *irq = &sc->irq[vi->first_intr + i];
+
+		while (!atomic_cmpset_int(&irq->nm_state, NM_ON, NM_OFF))
+			pause("nmst", 1);
+
+		free_nm_rxq_hwq(vi, nm_rxq);
+	}
+
+	return (rc);
+}
+
+static int
+cxgbe_netmap_reg(struct netmap_adapter *na, int on)
+{
+	struct ifnet *ifp = na->ifp;
+	struct vi_info *vi = ifp->if_softc;
+	struct adapter *sc = vi->pi->adapter;
+	int rc;
+
+	rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg");
+	if (rc != 0)
+		return (rc);
+	if (on)
+		rc = cxgbe_netmap_on(sc, vi, ifp, na);
+	else
+		rc = cxgbe_netmap_off(sc, vi, ifp, na);
+	end_synchronized_op(sc, 0);
+
+	return (rc);
+}
+
+/* How many packets can a single type1 WR carry in n descriptors */
+static inline int
+ndesc_to_npkt(const int n)
+{
+
+	MPASS(n > 0 && n <= SGE_MAX_WR_NDESC);
+
+	return (n * 2 - 1);
+}
+#define MAX_NPKT_IN_TYPE1_WR	(ndesc_to_npkt(SGE_MAX_WR_NDESC))
+
+/* Space (in descriptors) needed for a type1 WR that carries n packets */
+static inline int
+npkt_to_ndesc(const int n)
+{
+
+	MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
+
+	return ((n + 2) / 2);
+}
+
+/* Space (in 16B units) needed for a type1 WR that carries n packets */
+static inline int
+npkt_to_len16(const int n)
+{
+
+	MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR);
+
+	return (n * 2 + 1);
+}
+
+#define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx)
+
+static void
+ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
+{
+	int n;
+	u_int db = nm_txq->doorbells;
+
+	MPASS(nm_txq->pidx != nm_txq->dbidx);
+
+	n = NMIDXDIFF(nm_txq, dbidx);
+	if (n > 1)
+		clrbit(&db, DOORBELL_WCWR);
+	wmb();
+
+	switch (ffs(db) - 1) {
+	case DOORBELL_UDB:
+		*nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
+		break;
+
+	case DOORBELL_WCWR: {
+		volatile uint64_t *dst, *src;
+
+		/*
+		 * Queues whose 128B doorbell segment fits in the page do not
+		 * use relative qid (udb_qid is always 0).  Only queues with
+		 * doorbell segments can do WCWR.
+		 */
+		KASSERT(nm_txq->udb_qid == 0 && n == 1,
+		    ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p",
+		    __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq));
+
+		dst = (volatile void *)((uintptr_t)nm_txq->udb +
+		    UDBS_WR_OFFSET - UDBS_DB_OFFSET);
+		src = (void *)&nm_txq->desc[nm_txq->dbidx];
+		while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1])
+			*dst++ = *src++;
+		wmb();
+		break;
+	}
+
+	case DOORBELL_UDBWC:
+		*nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n));
+		wmb();
+		break;
+
+	case DOORBELL_KDB:
+		t4_write_reg(sc, sc->sge_kdoorbell_reg,
+		    V_QID(nm_txq->cntxt_id) | V_PIDX(n));
+		break;
+	}
+	nm_txq->dbidx = nm_txq->pidx;
+}
+
+int lazy_tx_credit_flush = 1;
+
+/*
+ * Write work requests to send 'npkt' frames and ring the doorbell to send them
+ * on their way.  No need to check for wraparound.
+ */
+static void
+cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
+    struct netmap_kring *kring, int npkt, int npkt_remaining, int txcsum)
+{
+	struct netmap_ring *ring = kring->ring;
+	struct netmap_slot *slot;
+	const u_int lim = kring->nkr_num_slots - 1;
+	struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx];
+	uint16_t len;
+	uint64_t ba;
+	struct cpl_tx_pkt_core *cpl;
+	struct ulptx_sgl *usgl;
+	int i, n;
+
+	while (npkt) {
+		n = min(npkt, MAX_NPKT_IN_TYPE1_WR);
+		len = 0;
+
+		wr = (void *)&nm_txq->desc[nm_txq->pidx];
+		wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+		wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n)));
+		wr->npkt = n;
+		wr->r3 = 0;
+		wr->type = 1;
+		cpl = (void *)(wr + 1);
+
+		for (i = 0; i < n; i++) {
+			slot = &ring->slot[kring->nr_hwcur];
+			PNMB(kring->na, slot, &ba);
+			MPASS(ba != 0);
+
+			cpl->ctrl0 = nm_txq->cpl_ctrl0;
+			cpl->pack = 0;
+			cpl->len = htobe16(slot->len);
+			/*
+			 * netmap(4) says "netmap does not use features such as
+			 * checksum offloading, TCP segmentation offloading,
+			 * encryption, VLAN encapsulation/decapsulation, etc."
+			 *
+			 * So the ncxl interfaces have tx hardware checksumming
+			 * disabled by default.  But you can override netmap by
+			 * enabling IFCAP_TXCSUM on the interface manully.
+			 */
+			cpl->ctrl1 = txcsum ? 0 :
+			    htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS);
+
+			usgl = (void *)(cpl + 1);
+			usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+			    V_ULPTX_NSGE(1));
+			usgl->len0 = htobe32(slot->len);
+			usgl->addr0 = htobe64(ba);
+
+			slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
+			cpl = (void *)(usgl + 1);
+			MPASS(slot->len + len <= UINT16_MAX);
+			len += slot->len;
+			kring->nr_hwcur = nm_next(kring->nr_hwcur, lim);
+		}
+		wr->plen = htobe16(len);
+
+		npkt -= n;
+		nm_txq->pidx += npkt_to_ndesc(n);
+		MPASS(nm_txq->pidx <= nm_txq->sidx);
+		if (__predict_false(nm_txq->pidx == nm_txq->sidx)) {
+			/*
+			 * This routine doesn't know how to write WRs that wrap
+			 * around.  Make sure it wasn't asked to.
+			 */
+			MPASS(npkt == 0);
+			nm_txq->pidx = 0;
+		}
+
+		if (npkt == 0 && npkt_remaining == 0) {
+			/* All done. */
+			if (lazy_tx_credit_flush == 0) {
+				wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
+				    F_FW_WR_EQUIQ);
+				nm_txq->equeqidx = nm_txq->pidx;
+				nm_txq->equiqidx = nm_txq->pidx;
+			}
+			ring_nm_txq_db(sc, nm_txq);
+			return;
+		}
+
+		if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
+			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
+			    F_FW_WR_EQUIQ);
+			nm_txq->equeqidx = nm_txq->pidx;
+			nm_txq->equiqidx = nm_txq->pidx;
+		} else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
+			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
+			nm_txq->equeqidx = nm_txq->pidx;
+		}
+		if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
+			ring_nm_txq_db(sc, nm_txq);
+	}
+
+	/* Will get called again. */
+	MPASS(npkt_remaining);
+}
+
+/* How many contiguous free descriptors starting at pidx */
+static inline int
+contiguous_ndesc_available(struct sge_nm_txq *nm_txq)
+{
+
+	if (nm_txq->cidx > nm_txq->pidx)
+		return (nm_txq->cidx - nm_txq->pidx - 1);
+	else if (nm_txq->cidx > 0)
+		return (nm_txq->sidx - nm_txq->pidx);
+	else
+		return (nm_txq->sidx - nm_txq->pidx - 1);
+}
+
+static int
+reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq)
+{
+	struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx];
+	uint16_t hw_cidx = spg->cidx;	/* snapshot */
+	struct fw_eth_tx_pkts_wr *wr;
+	int n = 0;
+
+	hw_cidx = be16toh(hw_cidx);
+
+	while (nm_txq->cidx != hw_cidx) {
+		wr = (void *)&nm_txq->desc[nm_txq->cidx];
+
+		MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)));
+		MPASS(wr->type == 1);
+		MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR);
+
+		n += wr->npkt;
+		nm_txq->cidx += npkt_to_ndesc(wr->npkt);
+
+		/*
+		 * We never sent a WR that wrapped around so the credits coming
+		 * back, WR by WR, should never cause the cidx to wrap around
+		 * either.
+		 */
+		MPASS(nm_txq->cidx <= nm_txq->sidx);
+		if (__predict_false(nm_txq->cidx == nm_txq->sidx))
+			nm_txq->cidx = 0;
+	}
+
+	return (n);
+}
+
+static int
+cxgbe_netmap_txsync(struct netmap_kring *kring, int flags)
+{
+	struct netmap_adapter *na = kring->na;
+	struct ifnet *ifp = na->ifp;
+	struct vi_info *vi = ifp->if_softc;
+	struct adapter *sc = vi->pi->adapter;
+	struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id];
+	const u_int head = kring->rhead;
+	u_int reclaimed = 0;
+	int n, d, npkt_remaining, ndesc_remaining, txcsum;
+
+	/*
+	 * Tx was at kring->nr_hwcur last time around and now we need to advance
+	 * to kring->rhead.  Note that the driver's pidx moves independent of
+	 * netmap's kring->nr_hwcur (pidx counts descriptors and the relation
+	 * between descriptors and frames isn't 1:1).
+	 */
+
+	npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
+	    kring->nkr_num_slots - kring->nr_hwcur + head;
+	txcsum = ifp->if_capenable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6);
+	while (npkt_remaining) {
+		reclaimed += reclaim_nm_tx_desc(nm_txq);
+		ndesc_remaining = contiguous_ndesc_available(nm_txq);
+		/* Can't run out of descriptors with packets still remaining */
+		MPASS(ndesc_remaining > 0);
+
+		/* # of desc needed to tx all remaining packets */
+		d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC;
+		if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR)
+			d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR);
+
+		if (d <= ndesc_remaining)
+			n = npkt_remaining;
+		else {
+			/* Can't send all, calculate how many can be sent */
+			n = (ndesc_remaining / SGE_MAX_WR_NDESC) *
+			    MAX_NPKT_IN_TYPE1_WR;
+			if (ndesc_remaining % SGE_MAX_WR_NDESC)
+				n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC);
+		}
+
+		/* Send n packets and update nm_txq->pidx and kring->nr_hwcur */
+		npkt_remaining -= n;
+		cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining, txcsum);
+	}
+	MPASS(npkt_remaining == 0);
+	MPASS(kring->nr_hwcur == head);
+	MPASS(nm_txq->dbidx == nm_txq->pidx);
+
+	/*
+	 * Second part: reclaim buffers for completed transmissions.
+	 */
+	if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) {
+		reclaimed += reclaim_nm_tx_desc(nm_txq);
+		kring->nr_hwtail += reclaimed;
+		if (kring->nr_hwtail >= kring->nkr_num_slots)
+			kring->nr_hwtail -= kring->nkr_num_slots;
+	}
+
+	nm_txsync_finalize(kring);
+
+	return (0);
+}
+
+static int
+cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
+{
+	struct netmap_adapter *na = kring->na;
+	struct netmap_ring *ring = kring->ring;
+	struct ifnet *ifp = na->ifp;
+	struct vi_info *vi = ifp->if_softc;
+	struct adapter *sc = vi->pi->adapter;
+	struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id];
+	u_int const head = nm_rxsync_prologue(kring);
+	u_int n;
+	int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
+
+	if (black_hole)
+		return (0);	/* No updates ever. */
+
+	if (netmap_no_pendintr || force_update) {
+		kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx);
+		kring->nr_kflags &= ~NKR_PENDINTR;
+	}
+
+	/* Userspace done with buffers from kring->nr_hwcur to head */
+	n = head >= kring->nr_hwcur ? head - kring->nr_hwcur :
+	    kring->nkr_num_slots - kring->nr_hwcur + head;
+	n &= ~7U;
+	if (n > 0) {
+		u_int fl_pidx = nm_rxq->fl_pidx;
+		struct netmap_slot *slot = &ring->slot[fl_pidx];
+		uint64_t ba;
+		int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx;
+
+		/*
+		 * We always deal with 8 buffers at a time.  We must have
+		 * stopped at an 8B boundary (fl_pidx) last time around and we
+		 * must have a multiple of 8B buffers to give to the freelist.
+		 */
+		MPASS((fl_pidx & 7) == 0);
+		MPASS((n & 7) == 0);
+
+		IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots);
+		IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx);
+
+		while (n > 0) {
+			for (i = 0; i < 8; i++, fl_pidx++, slot++) {
+				PNMB(na, slot, &ba);
+				MPASS(ba != 0);
+				nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx);
+				slot->flags &= ~NS_BUF_CHANGED;
+				MPASS(fl_pidx <= nm_rxq->fl_sidx);
+			}
+			n -= 8;
+			if (fl_pidx == nm_rxq->fl_sidx) {
+				fl_pidx = 0;
+				slot = &ring->slot[0];
+			}
+			if (++dbinc == 8 && n >= 32) {
+				wmb();
+				t4_write_reg(sc, sc->sge_kdoorbell_reg,
+				    nm_rxq->fl_db_val | V_PIDX(dbinc));
+				dbinc = 0;
+			}
+		}
+		MPASS(nm_rxq->fl_pidx == fl_pidx);
+
+		if (dbinc > 0) {
+			wmb();
+			t4_write_reg(sc, sc->sge_kdoorbell_reg,
+			    nm_rxq->fl_db_val | V_PIDX(dbinc));
+		}
+	}
+
+	nm_rxsync_finalize(kring);
+
+	return (0);
+}
+
+void
+cxgbe_nm_attach(struct vi_info *vi)
+{
+	struct port_info *pi;
+	struct adapter *sc;
+	struct netmap_adapter na;
+
+	MPASS(vi->nnmrxq > 0);
+	MPASS(vi->ifp != NULL);
+
+	pi = vi->pi;
+	sc = pi->adapter;
+
+	bzero(&na, sizeof(na));
+
+	na.ifp = vi->ifp;
+	na.na_flags = NAF_BDG_MAYSLEEP;
+
+	/* Netmap doesn't know about the space reserved for the status page. */
+	na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE;
+
+	/*
+	 * The freelist's cidx/pidx drives netmap's rx cidx/pidx.  So
+	 * num_rx_desc is based on the number of buffers that can be held in the
+	 * freelist, and not the number of entries in the iq.  (These two are
+	 * not exactly the same due to the space taken up by the status page).
+	 */
+	na.num_rx_desc = (vi->qsize_rxq / 8) * 8;
+	na.nm_txsync = cxgbe_netmap_txsync;
+	na.nm_rxsync = cxgbe_netmap_rxsync;
+	na.nm_register = cxgbe_netmap_reg;
+	na.num_tx_rings = vi->nnmtxq;
+	na.num_rx_rings = vi->nnmrxq;
+	netmap_attach(&na);
+}
+
+void
+cxgbe_nm_detach(struct vi_info *vi)
+{
+
+	MPASS(vi->nnmrxq > 0);
+	MPASS(vi->ifp != NULL);
+
+	netmap_detach(vi->ifp);
+}
+
+static inline const void *
+unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl)
+{
+
+	MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL);
+
+	/* data[0] is RSS header */
+	return (&cpl->data[1]);
+}
+
+static void
+handle_nm_sge_egr_update(struct adapter *sc, struct ifnet *ifp,
+    const struct cpl_sge_egr_update *egr)
+{
+	uint32_t oq;
+	struct sge_nm_txq *nm_txq;
+
+	oq = be32toh(egr->opcode_qid);
+	MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE);
+	nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start];
+
+	netmap_tx_irq(ifp, nm_txq->nid);
+}
+
+void
+t4_nm_intr(void *arg)
+{
+	struct sge_nm_rxq *nm_rxq = arg;
+	struct vi_info *vi = nm_rxq->vi;
+	struct adapter *sc = vi->pi->adapter;
+	struct ifnet *ifp = vi->ifp;
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_kring *kring = &na->rx_rings[nm_rxq->nid];
+	struct netmap_ring *ring = kring->ring;
+	struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx];
+	const void *cpl;
+	uint32_t lq;
+	u_int n = 0, work = 0;
+	uint8_t opcode;
+	uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx);
+	u_int fl_credits = fl_cidx & 7;
+
+	while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) {
+
+		rmb();
+
+		lq = be32toh(d->rsp.pldbuflen_qid);
+		opcode = d->rss.opcode;
+		cpl = &d->cpl[0];
+
+		switch (G_RSPD_TYPE(d->rsp.u.type_gen)) {
+		case X_RSPD_TYPE_FLBUF:
+			if (black_hole != 2) {
+				/* No buffer packing so new buf every time */
+				MPASS(lq & F_RSPD_NEWBUF);
+			}
+
+			/* fall through */
+
+		case X_RSPD_TYPE_CPL:
+			MPASS(opcode < NUM_CPL_CMDS);
+
+			switch (opcode) {
+			case CPL_FW4_MSG:
+			case CPL_FW6_MSG:
+				cpl = unwrap_nm_fw6_msg(cpl);
+				/* fall through */
+			case CPL_SGE_EGR_UPDATE:
+				handle_nm_sge_egr_update(sc, ifp, cpl);
+				break;
+			case CPL_RX_PKT:
+				ring->slot[fl_cidx].len = G_RSPD_LEN(lq) -
+				    sc->params.sge.fl_pktshift;
+				ring->slot[fl_cidx].flags = kring->nkr_slot_flags;
+				fl_cidx += (lq & F_RSPD_NEWBUF) ? 1 : 0;
+				fl_credits += (lq & F_RSPD_NEWBUF) ? 1 : 0;
+				if (__predict_false(fl_cidx == nm_rxq->fl_sidx))
+					fl_cidx = 0;
+				break;
+			default:
+				panic("%s: unexpected opcode 0x%x on nm_rxq %p",
+				    __func__, opcode, nm_rxq);
+			}
+			break;
+
+		case X_RSPD_TYPE_INTR:
+			/* Not equipped to handle forwarded interrupts. */
+			panic("%s: netmap queue received interrupt for iq %u\n",
+			    __func__, lq);
+
+		default:
+			panic("%s: illegal response type %d on nm_rxq %p",
+			    __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq);
+		}
+
+		d++;
+		if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) {
+			nm_rxq->iq_cidx = 0;
+			d = &nm_rxq->iq_desc[0];
+			nm_rxq->iq_gen ^= F_RSPD_GEN;
+		}
+
+		if (__predict_false(++n == rx_ndesc)) {
+			atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
+			if (black_hole && fl_credits >= 8) {
+				fl_credits /= 8;
+				IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
+				    nm_rxq->fl_sidx);
+				t4_write_reg(sc, sc->sge_kdoorbell_reg,
+				    nm_rxq->fl_db_val | V_PIDX(fl_credits));
+				fl_credits = fl_cidx & 7;
+			} else if (!black_hole) {
+				netmap_rx_irq(ifp, nm_rxq->nid, &work);
+				MPASS(work != 0);
+			}
+			t4_write_reg(sc, sc->sge_gts_reg,
+			    V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
+			    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
+			n = 0;
+		}
+	}
+
+	atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx);
+	if (black_hole) {
+		fl_credits /= 8;
+		IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
+		t4_write_reg(sc, sc->sge_kdoorbell_reg,
+		    nm_rxq->fl_db_val | V_PIDX(fl_credits));
+	} else
+		netmap_rx_irq(ifp, nm_rxq->nid, &work);
+
+	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) |
+	    V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
+	    V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
+}
+#endif


Property changes on: trunk/sys/dev/cxgbe/t4_netmap.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/t4_sched.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_sched.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_sched.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,464 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2017 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_sched.c 318851 2017-05-25 01:43:28Z np $");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/taskqueue.h>
+#include <sys/sysctl.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_msg.h"
+
+
+static int
+in_range(int val, int lo, int hi)
+{
+
+	return (val < 0 || (val <= hi && val >= lo));
+}
+
+static int
+set_sched_class_config(struct adapter *sc, int minmax)
+{
+	int rc;
+
+	if (minmax < 0)
+		return (EINVAL);
+
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sscc");
+	if (rc)
+		return (rc);
+	rc = -t4_sched_config(sc, FW_SCHED_TYPE_PKTSCHED, minmax, 1);
+	end_synchronized_op(sc, 0);
+
+	return (rc);
+}
+
+static int
+set_sched_class_params(struct adapter *sc, struct t4_sched_class_params *p,
+    int sleep_ok)
+{
+	int rc, top_speed, fw_level, fw_mode, fw_rateunit, fw_ratemode;
+	struct port_info *pi;
+	struct tx_cl_rl_params *tc;
+
+	if (p->level == SCHED_CLASS_LEVEL_CL_RL)
+		fw_level = FW_SCHED_PARAMS_LEVEL_CL_RL;
+	else if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
+		fw_level = FW_SCHED_PARAMS_LEVEL_CL_WRR;
+	else if (p->level == SCHED_CLASS_LEVEL_CH_RL)
+		fw_level = FW_SCHED_PARAMS_LEVEL_CH_RL;
+	else
+		return (EINVAL);
+
+	if (p->mode == SCHED_CLASS_MODE_CLASS)
+		fw_mode = FW_SCHED_PARAMS_MODE_CLASS;
+	else if (p->mode == SCHED_CLASS_MODE_FLOW)
+		fw_mode = FW_SCHED_PARAMS_MODE_FLOW;
+	else
+		return (EINVAL);
+
+	if (p->rateunit == SCHED_CLASS_RATEUNIT_BITS)
+		fw_rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
+	else if (p->rateunit == SCHED_CLASS_RATEUNIT_PKTS)
+		fw_rateunit = FW_SCHED_PARAMS_UNIT_PKTRATE;
+	else
+		return (EINVAL);
+
+	if (p->ratemode == SCHED_CLASS_RATEMODE_REL)
+		fw_ratemode = FW_SCHED_PARAMS_RATE_REL;
+	else if (p->ratemode == SCHED_CLASS_RATEMODE_ABS)
+		fw_ratemode = FW_SCHED_PARAMS_RATE_ABS;
+	else
+		return (EINVAL);
+
+	/* Vet our parameters ... */
+	if (!in_range(p->channel, 0, sc->chip_params->nchan - 1))
+		return (ERANGE);
+
+	pi = sc->port[sc->chan_map[p->channel]];
+	if (pi == NULL)
+		return (ENXIO);
+	MPASS(pi->tx_chan == p->channel);
+	top_speed = port_top_speed(pi) * 1000000; /* Gbps -> Kbps */
+
+	if (!in_range(p->cl, 0, sc->chip_params->nsched_cls) ||
+	    !in_range(p->minrate, 0, top_speed) ||
+	    !in_range(p->maxrate, 0, top_speed) ||
+	    !in_range(p->weight, 0, 100))
+		return (ERANGE);
+
+	/*
+	 * Translate any unset parameters into the firmware's
+	 * nomenclature and/or fail the call if the parameters
+	 * are required ...
+	 */
+	if (p->rateunit < 0 || p->ratemode < 0 || p->channel < 0 || p->cl < 0)
+		return (EINVAL);
+
+	if (p->minrate < 0)
+		p->minrate = 0;
+	if (p->maxrate < 0) {
+		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
+		    p->level == SCHED_CLASS_LEVEL_CH_RL)
+			return (EINVAL);
+		else
+			p->maxrate = 0;
+	}
+	if (p->weight < 0) {
+		if (p->level == SCHED_CLASS_LEVEL_CL_WRR)
+			return (EINVAL);
+		else
+			p->weight = 0;
+	}
+	if (p->pktsize < 0) {
+		if (p->level == SCHED_CLASS_LEVEL_CL_RL ||
+		    p->level == SCHED_CLASS_LEVEL_CH_RL)
+			return (EINVAL);
+		else
+			p->pktsize = 0;
+	}
+
+	rc = begin_synchronized_op(sc, NULL,
+	    sleep_ok ? (SLEEP_OK | INTR_OK) : HOLD_LOCK, "t4sscp");
+	if (rc)
+		return (rc);
+	if (p->level == SCHED_CLASS_LEVEL_CL_RL) {
+		tc = &pi->sched_params->cl_rl[p->cl];
+		if (tc->refcount > 0) {
+			rc = EBUSY;
+			goto done;
+		} else {
+			tc->ratemode = fw_ratemode;
+			tc->rateunit = fw_rateunit;
+			tc->mode = fw_mode;
+			tc->maxrate = p->maxrate;
+			tc->pktsize = p->pktsize;
+		}
+	}
+	rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED, fw_level, fw_mode,
+	    fw_rateunit, fw_ratemode, p->channel, p->cl, p->minrate, p->maxrate,
+	    p->weight, p->pktsize, sleep_ok);
+	if (p->level == SCHED_CLASS_LEVEL_CL_RL && rc != 0) {
+		/*
+		 * Unknown state at this point, see parameters in tc for what
+		 * was attempted.
+		 */
+		tc->flags |= TX_CLRL_ERROR;
+	}
+done:
+	end_synchronized_op(sc, sleep_ok ? 0 : LOCK_HELD);
+
+	return (rc);
+}
+
+static void
+update_tx_sched(void *context, int pending)
+{
+	int i, j, mode, rateunit, ratemode, maxrate, pktsize, rc;
+	struct port_info *pi;
+	struct tx_cl_rl_params *tc;
+	struct adapter *sc = context;
+	const int n = sc->chip_params->nsched_cls;
+
+	mtx_lock(&sc->tc_lock);
+	for_each_port(sc, i) {
+		pi = sc->port[i];
+		tc = &pi->sched_params->cl_rl[0];
+		for (j = 0; j < n; j++, tc++) {
+			MPASS(mtx_owned(&sc->tc_lock));
+			if ((tc->flags & TX_CLRL_REFRESH) == 0)
+				continue;
+
+			mode = tc->mode;
+			rateunit = tc->rateunit;
+			ratemode = tc->ratemode;
+			maxrate = tc->maxrate;
+			pktsize = tc->pktsize;
+			mtx_unlock(&sc->tc_lock);
+
+			if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+			    "t4utxs") != 0) {
+				mtx_lock(&sc->tc_lock);
+				continue;
+			}
+			rc = t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
+			    FW_SCHED_PARAMS_LEVEL_CL_RL, mode, rateunit,
+			    ratemode, pi->tx_chan, j, 0, maxrate, 0, pktsize,
+			    1);
+			end_synchronized_op(sc, 0);
+
+			mtx_lock(&sc->tc_lock);
+			if (rc != 0) {
+				tc->flags |= TX_CLRL_ERROR;
+			} else if (tc->mode == mode &&
+			    tc->rateunit == rateunit &&
+			    tc->maxrate == maxrate &&
+			    tc->pktsize == tc->pktsize) {
+				tc->flags &= ~(TX_CLRL_REFRESH | TX_CLRL_ERROR);
+			}
+		}
+	}
+	mtx_unlock(&sc->tc_lock);
+}
+
+int
+t4_set_sched_class(struct adapter *sc, struct t4_sched_params *p)
+{
+
+	if (p->type != SCHED_CLASS_TYPE_PACKET)
+		return (EINVAL);
+
+	if (p->subcmd == SCHED_CLASS_SUBCMD_CONFIG)
+		return (set_sched_class_config(sc, p->u.config.minmax));
+
+	if (p->subcmd == SCHED_CLASS_SUBCMD_PARAMS)
+		return (set_sched_class_params(sc, &p->u.params, 1));
+
+	return (EINVAL);
+}
+
+int
+t4_set_sched_queue(struct adapter *sc, struct t4_sched_queue *p)
+{
+	struct port_info *pi = NULL;
+	struct vi_info *vi;
+	struct sge_txq *txq;
+	uint32_t fw_mnem, fw_queue, fw_class;
+	int i, rc;
+
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setsq");
+	if (rc)
+		return (rc);
+
+	if (p->port >= sc->params.nports) {
+		rc = EINVAL;
+		goto done;
+	}
+
+	/* XXX: Only supported for the main VI. */
+	pi = sc->port[p->port];
+	vi = &pi->vi[0];
+	if (!(vi->flags & VI_INIT_DONE)) {
+		/* tx queues not set up yet */
+		rc = EAGAIN;
+		goto done;
+	}
+
+	if (!in_range(p->queue, 0, vi->ntxq - 1) ||
+	    !in_range(p->cl, 0, sc->chip_params->nsched_cls - 1)) {
+		rc = EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Create a template for the FW_PARAMS_CMD mnemonic and value (TX
+	 * Scheduling Class in this case).
+	 */
+	fw_mnem = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
+	fw_class = p->cl < 0 ? 0xffffffff : p->cl;
+
+	/*
+	 * If op.queue is non-negative, then we're only changing the scheduling
+	 * on a single specified TX queue.
+	 */
+	if (p->queue >= 0) {
+		txq = &sc->sge.txq[vi->first_txq + p->queue];
+		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
+		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
+		    &fw_class);
+		goto done;
+	}
+
+	/*
+	 * Change the scheduling on all the TX queues for the
+	 * interface.
+	 */
+	for_each_txq(vi, i, txq) {
+		fw_queue = (fw_mnem | V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id));
+		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue,
+		    &fw_class);
+		if (rc)
+			goto done;
+	}
+
+	rc = 0;
+done:
+	end_synchronized_op(sc, 0);
+	return (rc);
+}
+
+int
+t4_init_tx_sched(struct adapter *sc)
+{
+	int i, j;
+	const int n = sc->chip_params->nsched_cls;
+	struct port_info *pi;
+	struct tx_cl_rl_params *tc;
+	static const uint32_t init_kbps[] = {
+		100 * 1000,
+		200 * 1000,
+		400 * 1000,
+		500 * 1000,
+		800 * 1000,
+		1000 * 1000,
+		1200 * 1000,
+		1500 * 1000,
+		1800 * 1000,
+		2000 * 1000,
+		2500 * 1000,
+		3000 * 1000,
+		3500 * 1000,
+		4000 * 1000,
+		5000 * 1000,
+		10000 * 1000
+	};
+
+	mtx_init(&sc->tc_lock, "tx_sched lock", NULL, MTX_DEF);
+	TASK_INIT(&sc->tc_task, 0, update_tx_sched, sc);
+	for_each_port(sc, i) {
+		pi = sc->port[i];
+		pi->sched_params = malloc(sizeof(*pi->sched_params) +
+		    n * sizeof(*tc), M_CXGBE, M_ZERO | M_WAITOK);
+		tc = &pi->sched_params->cl_rl[0];
+		for (j = 0; j < n; j++, tc++) {
+			tc->refcount = 0;
+			tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
+			tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
+			tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
+			tc->maxrate = init_kbps[min(j, nitems(init_kbps) - 1)];
+			tc->pktsize = ETHERMTU;	/* XXX */
+
+			if (t4_sched_params_cl_rl_kbps(sc, pi->tx_chan, j,
+			    tc->mode, tc->maxrate, tc->pktsize, 1) == 0)
+				tc->flags = 0;
+			else
+				tc->flags = TX_CLRL_ERROR;
+		}
+	}
+
+	return (0);
+}
+
+int
+t4_free_tx_sched(struct adapter *sc)
+{
+	int i;
+
+	taskqueue_drain(taskqueue_thread, &sc->tc_task);
+
+	for_each_port(sc, i)
+	    free(sc->port[i]->sched_params, M_CXGBE);
+
+	if (mtx_initialized(&sc->tc_lock))
+		mtx_destroy(&sc->tc_lock);
+
+	return (0);
+}
+
+void
+t4_update_tx_sched(struct adapter *sc)
+{
+
+	taskqueue_enqueue(taskqueue_thread, &sc->tc_task);
+}
+
+int
+t4_reserve_cl_rl_kbps(struct adapter *sc, int port_id, u_int maxrate,
+    int *tc_idx)
+{
+	int rc = 0, fa = -1, i;
+	struct tx_cl_rl_params *tc;
+
+	MPASS(port_id >= 0 && port_id < sc->params.nports);
+
+	tc = &sc->port[port_id]->sched_params->cl_rl[0];
+	mtx_lock(&sc->tc_lock);
+	for (i = 0; i < sc->chip_params->nsched_cls; i++, tc++) {
+		if (fa < 0 && tc->refcount == 0)
+			fa = i;
+
+		if (tc->ratemode == FW_SCHED_PARAMS_RATE_ABS &&
+		    tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE &&
+		    tc->mode == FW_SCHED_PARAMS_MODE_FLOW &&
+		    tc->maxrate == maxrate) {
+			tc->refcount++;
+			*tc_idx = i;
+			goto done;
+		}
+	}
+	/* Not found */
+	MPASS(i == sc->chip_params->nsched_cls);
+	if (fa != -1) {
+		tc = &sc->port[port_id]->sched_params->cl_rl[fa];
+		tc->flags = TX_CLRL_REFRESH;
+		tc->refcount = 1;
+		tc->ratemode = FW_SCHED_PARAMS_RATE_ABS;
+		tc->rateunit = FW_SCHED_PARAMS_UNIT_BITRATE;
+		tc->mode = FW_SCHED_PARAMS_MODE_FLOW;
+		tc->maxrate = maxrate;
+		tc->pktsize = ETHERMTU;	/* XXX */
+		*tc_idx = fa;
+		t4_update_tx_sched(sc);
+	} else {
+		*tc_idx = -1;
+		rc = ENOSPC;
+	}
+done:
+	mtx_unlock(&sc->tc_lock);
+	return (rc);
+}
+
+void
+t4_release_cl_rl_kbps(struct adapter *sc, int port_id, int tc_idx)
+{
+	struct tx_cl_rl_params *tc;
+
+	MPASS(port_id >= 0 && port_id < sc->params.nports);
+	MPASS(tc_idx >= 0 && tc_idx < sc->chip_params->nsched_cls);
+
+	mtx_lock(&sc->tc_lock);
+	tc = &sc->port[port_id]->sched_params->cl_rl[tc_idx];
+	MPASS(tc->refcount > 0);
+	MPASS(tc->ratemode == FW_SCHED_PARAMS_RATE_ABS);
+	MPASS(tc->rateunit == FW_SCHED_PARAMS_UNIT_BITRATE);
+	MPASS(tc->mode == FW_SCHED_PARAMS_MODE_FLOW);
+	tc->refcount--;
+	mtx_unlock(&sc->tc_lock);
+}


Property changes on: trunk/sys/dev/cxgbe/t4_sched.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/t4_sge.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_sge.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/t4_sge.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2011 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/t4_sge.c 248078 2013-03-09 00:39:54Z marius $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_sge.c 318855 2017-05-25 02:00:37Z np $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -35,12 +36,15 @@
 #include <sys/mbuf.h>
 #include <sys/socket.h>
 #include <sys/kernel.h>
-#include <sys/kdb.h>
 #include <sys/malloc.h>
 #include <sys/queue.h>
+#include <sys/sbuf.h>
 #include <sys/taskqueue.h>
+#include <sys/time.h>
+#include <sys/sglist.h>
 #include <sys/sysctl.h>
 #include <sys/smp.h>
+#include <sys/counter.h>
 #include <net/bpf.h>
 #include <net/ethernet.h>
 #include <net/if.h>
@@ -49,25 +53,31 @@
 #include <netinet/ip.h>
 #include <netinet/ip6.h>
 #include <netinet/tcp.h>
+#include <machine/in_cksum.h>
+#include <machine/md_var.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#ifdef DEV_NETMAP
+#include <machine/bus.h>
+#include <sys/selinfo.h>
+#include <net/if_var.h>
+#include <net/netmap.h>
+#include <dev/netmap/netmap_kern.h>
+#endif
 
 #include "common/common.h"
 #include "common/t4_regs.h"
 #include "common/t4_regs_values.h"
 #include "common/t4_msg.h"
+#include "t4_l2t.h"
+#include "t4_mp_ring.h"
 
-struct fl_buf_info {
-	int size;
-	int type;
-	uma_zone_t zone;
-};
+#ifdef T4_PKT_TIMESTAMP
+#define RX_COPY_THRESHOLD (MINCLSIZE - 8)
+#else
+#define RX_COPY_THRESHOLD MINCLSIZE
+#endif
 
-/* Filled up by t4_sge_modload */
-static struct fl_buf_info fl_buf_info[FL_BUF_SIZES];
-
-#define FL_BUF_SIZE(x)	(fl_buf_info[x].size)
-#define FL_BUF_TYPE(x)	(fl_buf_info[x].type)
-#define FL_BUF_ZONE(x)	(fl_buf_info[x].zone)
-
 /*
  * Ethernet frames are DMA'd at this byte offset into the freelist buffer.
  * 0-7 are valid values.
@@ -78,9 +88,10 @@
 /*
  * Pad ethernet payload up to this boundary.
  * -1: driver should figure out a good value.
- *  Any power of 2, from 32 to 4096 (both inclusive) is a valid value.
+ *  0: disable padding.
+ *  Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
  */
-static int fl_pad = -1;
+int fl_pad = -1;
 TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
 
 /*
@@ -100,123 +111,266 @@
 static int cong_drop = 0;
 TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop);
 
-/* Used to track coalesced tx work request */
+/*
+ * Deliver multiple frames in the same free list buffer if they fit.
+ * -1: let the driver decide whether to enable buffer packing or not.
+ *  0: disable buffer packing.
+ *  1: enable buffer packing.
+ */
+static int buffer_packing = -1;
+TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing);
+
+/*
+ * Start next frame in a packed buffer at this boundary.
+ * -1: driver should figure out a good value.
+ * T4: driver will ignore this and use the same value as fl_pad above.
+ * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
+ */
+static int fl_pack = -1;
+TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack);
+
+/*
+ * Allow the driver to create mbuf(s) in a cluster allocated for rx.
+ * 0: never; always allocate mbufs from the zone_mbuf UMA zone.
+ * 1: ok to create mbuf(s) within a cluster if there is room.
+ */
+static int allow_mbufs_in_cluster = 1;
+TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster);
+
+/*
+ * Largest rx cluster size that the driver is allowed to allocate.
+ */
+static int largest_rx_cluster = MJUM16BYTES;
+TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster);
+
+/*
+ * Size of cluster allocation that's most likely to succeed.  The driver will
+ * fall back to this size if it fails to allocate clusters larger than this.
+ */
+static int safest_rx_cluster = PAGE_SIZE;
+TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster);
+
+/*
+ * The interrupt holdoff timers are multiplied by this value on T6+.
+ * 1 and 3-17 (both inclusive) are legal values.
+ */
+static int tscale = 1;
+TUNABLE_INT("hw.cxgbe.tscale", &tscale);
+
 struct txpkts {
-	uint64_t *flitp;	/* ptr to flit where next pkt should start */
-	uint8_t npkt;		/* # of packets in this work request */
-	uint8_t nflits;		/* # of flits used by this work request */
-	uint16_t plen;		/* total payload (sum of all packets) */
+	u_int wr_type;		/* type 0 or type 1 */
+	u_int npkt;		/* # of packets in this work request */
+	u_int plen;		/* total payload (sum of all packets) */
+	u_int len16;		/* # of 16B pieces used by this work request */
 };
 
 /* A packet's SGL.  This + m_pkthdr has all info needed for tx */
 struct sgl {
-	int nsegs;		/* # of segments in the SGL, 0 means imm. tx */
-	int nflits;		/* # of flits needed for the SGL */
-	bus_dma_segment_t seg[TX_SGL_SEGS];
+	struct sglist sg;
+	struct sglist_seg seg[TX_SGL_SEGS];
 };
 
 static int service_iq(struct sge_iq *, int);
-static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t,
-    int *);
+static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t);
 static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *);
-static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int,
-    int);
-static inline void init_fl(struct sge_fl *, int, int, char *);
-static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t,
-    char *);
+static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int);
+static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *);
+static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t,
+    uint16_t, char *);
 static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *,
     bus_addr_t *, void **);
 static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t,
     void *);
-static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *,
+static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *,
     int, int);
-static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *);
+static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *);
+static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *,
+    struct sysctl_oid *, struct sge_fl *);
 static int alloc_fwq(struct adapter *);
 static int free_fwq(struct adapter *);
 static int alloc_mgmtq(struct adapter *);
 static int free_mgmtq(struct adapter *);
-static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int,
+static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int,
     struct sysctl_oid *);
-static int free_rxq(struct port_info *, struct sge_rxq *);
+static int free_rxq(struct vi_info *, struct sge_rxq *);
 #ifdef TCP_OFFLOAD
-static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int,
+static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int,
     struct sysctl_oid *);
-static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *);
+static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
 #endif
+#ifdef DEV_NETMAP
+static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int,
+    struct sysctl_oid *);
+static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *);
+static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int,
+    struct sysctl_oid *);
+static int free_nm_txq(struct vi_info *, struct sge_nm_txq *);
+#endif
 static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
 #ifdef TCP_OFFLOAD
-static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
 #endif
-static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *);
+static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *);
 static int free_eq(struct adapter *, struct sge_eq *);
-static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *,
+static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
     struct sysctl_oid *);
 static int free_wrq(struct adapter *, struct sge_wrq *);
-static int alloc_txq(struct port_info *, struct sge_txq *, int,
+static int alloc_txq(struct vi_info *, struct sge_txq *, int,
     struct sysctl_oid *);
-static int free_txq(struct port_info *, struct sge_txq *);
+static int free_txq(struct vi_info *, struct sge_txq *);
 static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int);
-static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **);
-static inline void iq_next(struct sge_iq *);
 static inline void ring_fl_db(struct adapter *, struct sge_fl *);
 static int refill_fl(struct adapter *, struct sge_fl *, int);
 static void refill_sfl(void *);
 static int alloc_fl_sdesc(struct sge_fl *);
-static void free_fl_sdesc(struct sge_fl *);
-static void set_fl_tag_idx(struct sge_fl *, int);
+static void free_fl_sdesc(struct adapter *, struct sge_fl *);
+static void find_best_refill_source(struct adapter *, struct sge_fl *, int);
+static void find_safe_refill_source(struct adapter *, struct sge_fl *);
 static void add_fl_to_sfl(struct adapter *, struct sge_fl *);
 
-static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int);
-static int free_pkt_sgl(struct sge_txq *, struct sgl *);
-static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *,
-    struct sgl *);
-static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *,
-    struct mbuf *, struct sgl *);
-static void write_txpkts_wr(struct sge_txq *, struct txpkts *);
-static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *,
-    struct txpkts *, struct mbuf *, struct sgl *);
-static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *);
+static inline void get_pkt_gl(struct mbuf *, struct sglist *);
+static inline u_int txpkt_len16(u_int, u_int);
+static inline u_int txpkt_vm_len16(u_int, u_int);
+static inline u_int txpkts0_len16(u_int);
+static inline u_int txpkts1_len16(void);
+static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *,
+    struct mbuf *, u_int);
+static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *,
+    struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int);
+static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int);
+static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int);
+static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *,
+    struct mbuf *, const struct txpkts *, u_int);
+static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int);
 static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int);
-static inline void ring_eq_db(struct adapter *, struct sge_eq *);
-static inline int reclaimable(struct sge_eq *);
-static int reclaim_tx_descs(struct sge_txq *, int, int);
-static void write_eqflush_wr(struct sge_eq *);
-static __be64 get_flit(bus_dma_segment_t *, int, int);
+static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int);
+static inline uint16_t read_hw_cidx(struct sge_eq *);
+static inline u_int reclaimable_tx_desc(struct sge_eq *);
+static inline u_int total_available_tx_desc(struct sge_eq *);
+static u_int reclaim_tx_descs(struct sge_txq *, u_int);
+static void tx_reclaim(void *, int);
+static __be64 get_flit(struct sglist_seg *, int, int);
 static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *,
     struct mbuf *);
 static int handle_fw_msg(struct sge_iq *, const struct rss_header *,
     struct mbuf *);
+static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *);
+static void wrq_tx_drain(void *, int);
+static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *);
 
 static int sysctl_uint16(SYSCTL_HANDLER_ARGS);
+static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS);
+static int sysctl_tc(SYSCTL_HANDLER_ARGS);
 
-#if defined(__i386__) || defined(__amd64__)
-extern u_int cpu_clflush_line_size;
+static counter_u64_t extfree_refs;
+static counter_u64_t extfree_rels;
+
+an_handler_t t4_an_handler;
+fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES];
+cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS];
+
+
+static int
+an_not_handled(struct sge_iq *iq, const struct rsp_ctrl *ctrl)
+{
+
+#ifdef INVARIANTS
+	panic("%s: async notification on iq %p (ctrl %p)", __func__, iq, ctrl);
+#else
+	log(LOG_ERR, "%s: async notification on iq %p (ctrl %p)\n",
+	    __func__, iq, ctrl);
 #endif
+	return (EDOOFUS);
+}
 
+int
+t4_register_an_handler(an_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	new = h ? (uintptr_t)h : (uintptr_t)an_not_handled;
+	loc = (uintptr_t *) &t4_an_handler;
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
+static int
+fw_msg_not_handled(struct adapter *sc, const __be64 *rpl)
+{
+	const struct cpl_fw6_msg *cpl =
+	    __containerof(rpl, struct cpl_fw6_msg, data[0]);
+
+#ifdef INVARIANTS
+	panic("%s: fw_msg type %d", __func__, cpl->type);
+#else
+	log(LOG_ERR, "%s: fw_msg type %d\n", __func__, cpl->type);
+#endif
+	return (EDOOFUS);
+}
+
+int
+t4_register_fw_msg_handler(int type, fw_msg_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	if (type >= nitems(t4_fw_msg_handler))
+		return (EINVAL);
+
+	/*
+	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
+	 * handler dispatch table.  Reject any attempt to install a handler for
+	 * this subtype.
+	 */
+	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
+		return (EINVAL);
+
+	new = h ? (uintptr_t)h : (uintptr_t)fw_msg_not_handled;
+	loc = (uintptr_t *) &t4_fw_msg_handler[type];
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
+static int
+cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+
+#ifdef INVARIANTS
+	panic("%s: opcode 0x%02x on iq %p with payload %p",
+	    __func__, rss->opcode, iq, m);
+#else
+	log(LOG_ERR, "%s: opcode 0x%02x on iq %p with payload %p\n",
+	    __func__, rss->opcode, iq, m);
+	m_freem(m);
+#endif
+	return (EDOOFUS);
+}
+
+int
+t4_register_cpl_handler(int opcode, cpl_handler_t h)
+{
+	uintptr_t *loc, new;
+
+	if (opcode >= nitems(t4_cpl_handler))
+		return (EINVAL);
+
+	new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled;
+	loc = (uintptr_t *) &t4_cpl_handler[opcode];
+	atomic_store_rel_ptr(loc, new);
+
+	return (0);
+}
+
 /*
- * Called on MOD_LOAD.  Fills up fl_buf_info[] and validates/calculates the SGE
- * tunables.
+ * Called on MOD_LOAD.  Validates and calculates the SGE tunables.
  */
 void
 t4_sge_modload(void)
 {
 	int i;
-	int bufsize[FL_BUF_SIZES] = {
-		MCLBYTES,
-#if MJUMPAGESIZE != MCLBYTES
-		MJUMPAGESIZE,
-#endif
-		MJUM9BYTES,
-		MJUM16BYTES
-	};
 
-	for (i = 0; i < FL_BUF_SIZES; i++) {
-		FL_BUF_SIZE(i) = bufsize[i];
-		FL_BUF_TYPE(i) = m_gettype(bufsize[i]);
-		FL_BUF_ZONE(i) = m_getzone(bufsize[i]);
-	}
-
 	if (fl_pktshift < 0 || fl_pktshift > 7) {
 		printf("Invalid hw.cxgbe.fl_pktshift value (%d),"
 		    " using 2 instead.\n", fl_pktshift);
@@ -223,23 +377,6 @@
 		fl_pktshift = 2;
 	}
 
-	if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) {
-		int pad;
-
-#if defined(__i386__) || defined(__amd64__)
-		pad = max(cpu_clflush_line_size, 32);
-#else
-		pad = max(CACHE_LINE_SIZE, 32);
-#endif
-		pad = min(pad, 4096);
-
-		if (fl_pad != -1) {
-			printf("Invalid hw.cxgbe.fl_pad value (%d),"
-			    " using %d instead.\n", fl_pad, pad);
-		}
-		fl_pad = pad;
-	}
-
 	if (spg_len != 64 && spg_len != 128) {
 		int len;
 
@@ -260,31 +397,157 @@
 		    " using 0 instead.\n", cong_drop);
 		cong_drop = 0;
 	}
+
+	if (tscale != 1 && (tscale < 3 || tscale > 17)) {
+		printf("Invalid hw.cxgbe.tscale value (%d),"
+		    " using 1 instead.\n", tscale);
+		tscale = 1;
+	}
+
+	extfree_refs = counter_u64_alloc(M_WAITOK);
+	extfree_rels = counter_u64_alloc(M_WAITOK);
+	counter_u64_zero(extfree_refs);
+	counter_u64_zero(extfree_rels);
+
+	t4_an_handler = an_not_handled;
+	for (i = 0; i < nitems(t4_fw_msg_handler); i++)
+		t4_fw_msg_handler[i] = fw_msg_not_handled;
+	for (i = 0; i < nitems(t4_cpl_handler); i++)
+		t4_cpl_handler[i] = cpl_not_handled;
+
+	t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg);
+	t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg);
+	t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
+	t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx);
+	t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
+	t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl);
 }
 
-/**
- *	t4_sge_init - initialize SGE
- *	@sc: the adapter
- *
- *	Performs SGE initialization needed every time after a chip reset.
- *	We do not initialize any of the queues here, instead the driver
- *	top-level must request them individually.
+void
+t4_sge_modunload(void)
+{
+
+	counter_u64_free(extfree_refs);
+	counter_u64_free(extfree_rels);
+}
+
+uint64_t
+t4_sge_extfree_refs(void)
+{
+	uint64_t refs, rels;
+
+	rels = counter_u64_fetch(extfree_rels);
+	refs = counter_u64_fetch(extfree_refs);
+
+	return (refs - rels);
+}
+
+static inline void
+setup_pad_and_pack_boundaries(struct adapter *sc)
+{
+	uint32_t v, m;
+	int pad, pack, pad_shift;
+
+	pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT :
+	    X_INGPADBOUNDARY_SHIFT;
+	pad = fl_pad;
+	if (fl_pad < (1 << pad_shift) ||
+	    fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) ||
+	    !powerof2(fl_pad)) {
+		/*
+		 * If there is any chance that we might use buffer packing and
+		 * the chip is a T4, then pick 64 as the pad/pack boundary.  Set
+		 * it to the minimum allowed in all other cases.
+		 */
+		pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift;
+
+		/*
+		 * For fl_pad = 0 we'll still write a reasonable value to the
+		 * register but all the freelists will opt out of padding.
+		 * We'll complain here only if the user tried to set it to a
+		 * value greater than 0 that was invalid.
+		 */
+		if (fl_pad > 0) {
+			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value"
+			    " (%d), using %d instead.\n", fl_pad, pad);
+		}
+	}
+	m = V_INGPADBOUNDARY(M_INGPADBOUNDARY);
+	v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift);
+	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
+
+	if (is_t4(sc)) {
+		if (fl_pack != -1 && fl_pack != pad) {
+			/* Complain but carry on. */
+			device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored,"
+			    " using %d instead.\n", fl_pack, pad);
+		}
+		return;
+	}
+
+	pack = fl_pack;
+	if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 ||
+	    !powerof2(fl_pack)) {
+		pack = max(sc->params.pci.mps, CACHE_LINE_SIZE);
+		MPASS(powerof2(pack));
+		if (pack < 16)
+			pack = 16;
+		if (pack == 32)
+			pack = 64;
+		if (pack > 4096)
+			pack = 4096;
+		if (fl_pack != -1) {
+			device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value"
+			    " (%d), using %d instead.\n", fl_pack, pack);
+		}
+	}
+	m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY);
+	if (pack == 16)
+		v = V_INGPACKBOUNDARY(0);
+	else
+		v = V_INGPACKBOUNDARY(ilog2(pack) - 5);
+
+	MPASS(!is_t4(sc));	/* T4 doesn't have SGE_CONTROL2 */
+	t4_set_reg_field(sc, A_SGE_CONTROL2, m, v);
+}
+
+/*
+ * adap->params.vpd.cclk must be set up before this is called.
  */
-int
-t4_sge_init(struct adapter *sc)
+void
+t4_tweak_chip_settings(struct adapter *sc)
 {
-	struct sge *s = &sc->sge;
-	int i, rc = 0;
-	uint32_t ctrl_mask, ctrl_val, hpsize, v;
+	int i;
+	uint32_t v, m;
+	int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
+	int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk;
+	int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
+	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
+	static int sge_flbuf_sizes[] = {
+		MCLBYTES,
+#if MJUMPAGESIZE != MCLBYTES
+		MJUMPAGESIZE,
+		MJUMPAGESIZE - CL_METADATA_SIZE,
+		MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE,
+#endif
+		MJUM9BYTES,
+		MJUM16BYTES,
+		MCLBYTES - MSIZE - CL_METADATA_SIZE,
+		MJUM9BYTES - CL_METADATA_SIZE,
+		MJUM16BYTES - CL_METADATA_SIZE,
+	};
 
-	ctrl_mask = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE |
-	    V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
-	    F_EGRSTATUSPAGESIZE;
-	ctrl_val = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
-	    V_INGPADBOUNDARY(ilog2(fl_pad) - 5) |
+	KASSERT(sc->flags & MASTER_PF,
+	    ("%s: trying to change chip settings when not master.", __func__));
+
+	m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE;
+	v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE |
 	    V_EGRSTATUSPAGESIZE(spg_len == 128);
+	t4_set_reg_field(sc, A_SGE_CONTROL, m, v);
 
-	hpsize = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
+	setup_pad_and_pack_boundaries(sc);
+
+	v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) |
 	    V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) |
 	    V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) |
 	    V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) |
@@ -292,88 +555,262 @@
 	    V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) |
 	    V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) |
 	    V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10);
+	t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v);
 
-	if (sc->flags & MASTER_PF) {
-		int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200};
-		int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
+	KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES,
+	    ("%s: hw buffer size table too big", __func__));
+	for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) {
+		t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
+		    sge_flbuf_sizes[i]);
+	}
 
-		t4_set_reg_field(sc, A_SGE_CONTROL, ctrl_mask, ctrl_val);
-		t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, hpsize);
-		for (i = 0; i < FL_BUF_SIZES; i++) {
-			t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i),
-			    FL_BUF_SIZE(i));
+	v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) |
+	    V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]);
+	t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v);
+
+	KASSERT(intr_timer[0] <= timer_max,
+	    ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0],
+	    timer_max));
+	for (i = 1; i < nitems(intr_timer); i++) {
+		KASSERT(intr_timer[i] >= intr_timer[i - 1],
+		    ("%s: timers not listed in increasing order (%d)",
+		    __func__, i));
+
+		while (intr_timer[i] > timer_max) {
+			if (i == nitems(intr_timer) - 1) {
+				intr_timer[i] = timer_max;
+				break;
+			}
+			intr_timer[i] += intr_timer[i - 1];
+			intr_timer[i] /= 2;
 		}
+	}
 
-		t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD,
-		    V_THRESHOLD_0(intr_pktcount[0]) |
-		    V_THRESHOLD_1(intr_pktcount[1]) |
-		    V_THRESHOLD_2(intr_pktcount[2]) |
-		    V_THRESHOLD_3(intr_pktcount[3]));
+	v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
+	    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1]));
+	t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v);
+	v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
+	    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3]));
+	t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v);
+	v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
+	    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5]));
+	t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v);
 
-		t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1,
-		    V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) |
-		    V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])));
-		t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3,
-		    V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) |
-		    V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])));
-		t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5,
-		    V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) |
-		    V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])));
+	if (chip_id(sc) >= CHELSIO_T6) {
+		m = V_TSCALE(M_TSCALE);
+		if (tscale == 1)
+			v = 0;
+		else
+			v = V_TSCALE(tscale - 2);
+		t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v);
+	}
 
-		if (cong_drop == 0) {
-			t4_set_reg_field(sc, A_TP_PARA_REG3, F_TUNNELCNGDROP0 |
-			    F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 |
-			    F_TUNNELCNGDROP3, 0);
-		}
+	/* 4K, 16K, 64K, 256K DDP "page sizes" */
+	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
+	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v);
+
+	m = v = F_TDDPTAGTCB;
+	t4_set_reg_field(sc, A_ULP_RX_CTL, m, v);
+
+	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
+	    F_RESETDDPOFFSET;
+	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
+	t4_set_reg_field(sc, A_TP_PARA_REG5, m, v);
+}
+
+/*
+ * SGE wants the buffer to be at least 64B and then a multiple of 16.  If
+ * padding is in use, the buffer's start and end need to be aligned to the pad
+ * boundary as well.  We'll just make sure that the size is a multiple of the
+ * boundary here, it is up to the buffer allocation code to make sure the start
+ * of the buffer is aligned as well.
+ */
+static inline int
+hwsz_ok(struct adapter *sc, int hwsz)
+{
+	int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1;
+
+	return (hwsz >= 64 && (hwsz & mask) == 0);
+}
+
+/*
+ * XXX: driver really should be able to deal with unexpected settings.
+ */
+int
+t4_read_chip_settings(struct adapter *sc)
+{
+	struct sge *s = &sc->sge;
+	struct sge_params *sp = &sc->params.sge;
+	int i, j, n, rc = 0;
+	uint32_t m, v, r;
+	uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
+	static int sw_buf_sizes[] = {	/* Sorted by size */
+		MCLBYTES,
+#if MJUMPAGESIZE != MCLBYTES
+		MJUMPAGESIZE,
+#endif
+		MJUM9BYTES,
+		MJUM16BYTES
+	};
+	struct sw_zone_info *swz, *safe_swz;
+	struct hw_buf_info *hwb;
+
+	m = F_RXPKTCPLMODE;
+	v = F_RXPKTCPLMODE;
+	r = sc->params.sge.sge_control;
+	if ((r & m) != v) {
+		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
+		rc = EINVAL;
 	}
 
-	v = t4_read_reg(sc, A_SGE_CONTROL);
-	if ((v & ctrl_mask) != ctrl_val) {
-		device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", v);
+	/*
+	 * If this changes then every single use of PAGE_SHIFT in the driver
+	 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
+	 */
+	if (sp->page_shift != PAGE_SHIFT) {
+		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r);
 		rc = EINVAL;
 	}
 
-	v = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE);
-	if (v != hpsize) {
-		device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", v);
+	/* Filter out unusable hw buffer sizes entirely (mark with -2). */
+	hwb = &s->hw_buf_info[0];
+	for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) {
+		r = sc->params.sge.sge_fl_buffer_size[i];
+		hwb->size = r;
+		hwb->zidx = hwsz_ok(sc, r) ? -1 : -2;
+		hwb->next = -1;
+	}
+
+	/*
+	 * Create a sorted list in decreasing order of hw buffer sizes (and so
+	 * increasing order of spare area) for each software zone.
+	 *
+	 * If padding is enabled then the start and end of the buffer must align
+	 * to the pad boundary; if packing is enabled then they must align with
+	 * the pack boundary as well.  Allocations from the cluster zones are
+	 * aligned to min(size, 4K), so the buffer starts at that alignment and
+	 * ends at hwb->size alignment.  If mbuf inlining is allowed the
+	 * starting alignment will be reduced to MSIZE and the driver will
+	 * exercise appropriate caution when deciding on the best buffer layout
+	 * to use.
+	 */
+	n = 0;	/* no usable buffer size to begin with */
+	swz = &s->sw_zone_info[0];
+	safe_swz = NULL;
+	for (i = 0; i < SW_ZONE_SIZES; i++, swz++) {
+		int8_t head = -1, tail = -1;
+
+		swz->size = sw_buf_sizes[i];
+		swz->zone = m_getzone(swz->size);
+		swz->type = m_gettype(swz->size);
+
+		if (swz->size < PAGE_SIZE) {
+			MPASS(powerof2(swz->size));
+			if (fl_pad && (swz->size % sp->pad_boundary != 0))
+				continue;
+		}
+
+		if (swz->size == safest_rx_cluster)
+			safe_swz = swz;
+
+		hwb = &s->hw_buf_info[0];
+		for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) {
+			if (hwb->zidx != -1 || hwb->size > swz->size)
+				continue;
+#ifdef INVARIANTS
+			if (fl_pad)
+				MPASS(hwb->size % sp->pad_boundary == 0);
+#endif
+			hwb->zidx = i;
+			if (head == -1)
+				head = tail = j;
+			else if (hwb->size < s->hw_buf_info[tail].size) {
+				s->hw_buf_info[tail].next = j;
+				tail = j;
+			} else {
+				int8_t *cur;
+				struct hw_buf_info *t;
+
+				for (cur = &head; *cur != -1; cur = &t->next) {
+					t = &s->hw_buf_info[*cur];
+					if (hwb->size == t->size) {
+						hwb->zidx = -2;
+						break;
+					}
+					if (hwb->size > t->size) {
+						hwb->next = *cur;
+						*cur = j;
+						break;
+					}
+				}
+			}
+		}
+		swz->head_hwidx = head;
+		swz->tail_hwidx = tail;
+
+		if (tail != -1) {
+			n++;
+			if (swz->size - s->hw_buf_info[tail].size >=
+			    CL_METADATA_SIZE)
+				sc->flags |= BUF_PACKING_OK;
+		}
+	}
+	if (n == 0) {
+		device_printf(sc->dev, "no usable SGE FL buffer size.\n");
 		rc = EINVAL;
 	}
 
-	for (i = 0; i < FL_BUF_SIZES; i++) {
-		v = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i));
-		if (v != FL_BUF_SIZE(i)) {
-			device_printf(sc->dev,
-			    "invalid SGE_FL_BUFFER_SIZE[%d](0x%x)\n", i, v);
-			rc = EINVAL;
+	s->safe_hwidx1 = -1;
+	s->safe_hwidx2 = -1;
+	if (safe_swz != NULL) {
+		s->safe_hwidx1 = safe_swz->head_hwidx;
+		for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) {
+			int spare;
+
+			hwb = &s->hw_buf_info[i];
+#ifdef INVARIANTS
+			if (fl_pad)
+				MPASS(hwb->size % sp->pad_boundary == 0);
+#endif
+			spare = safe_swz->size - hwb->size;
+			if (spare >= CL_METADATA_SIZE) {
+				s->safe_hwidx2 = i;
+				break;
+			}
 		}
 	}
 
-	v = t4_read_reg(sc, A_SGE_CONM_CTRL);
-	s->fl_starve_threshold = G_EGRTHRESHOLD(v) * 2 + 1;
+	if (sc->flags & IS_VF)
+		return (0);
 
-	v = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD);
-	sc->sge.counter_val[0] = G_THRESHOLD_0(v);
-	sc->sge.counter_val[1] = G_THRESHOLD_1(v);
-	sc->sge.counter_val[2] = G_THRESHOLD_2(v);
-	sc->sge.counter_val[3] = G_THRESHOLD_3(v);
+	v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6);
+	r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
+	if (r != v) {
+		device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r);
+		rc = EINVAL;
+	}
 
-	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1);
-	sc->sge.timer_val[0] = G_TIMERVALUE0(v) / core_ticks_per_usec(sc);
-	sc->sge.timer_val[1] = G_TIMERVALUE1(v) / core_ticks_per_usec(sc);
-	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3);
-	sc->sge.timer_val[2] = G_TIMERVALUE2(v) / core_ticks_per_usec(sc);
-	sc->sge.timer_val[3] = G_TIMERVALUE3(v) / core_ticks_per_usec(sc);
-	v = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5);
-	sc->sge.timer_val[4] = G_TIMERVALUE4(v) / core_ticks_per_usec(sc);
-	sc->sge.timer_val[5] = G_TIMERVALUE5(v) / core_ticks_per_usec(sc);
+	m = v = F_TDDPTAGTCB;
+	r = t4_read_reg(sc, A_ULP_RX_CTL);
+	if ((r & m) != v) {
+		device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r);
+		rc = EINVAL;
+	}
 
-	t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg);
-	t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg);
-	t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update);
-	t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx);
+	m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET |
+	    F_RESETDDPOFFSET;
+	v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET;
+	r = t4_read_reg(sc, A_TP_PARA_REG5);
+	if ((r & m) != v) {
+		device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r);
+		rc = EINVAL;
+	}
 
-	t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl);
+	t4_init_tp_params(sc);
 
+	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
+	t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd);
+
 	return (rc);
 }
 
@@ -394,6 +831,32 @@
 	return (rc);
 }
 
+void
+t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+    struct sysctl_oid_list *children)
+{
+	struct sge_params *sp = &sc->params.sge;
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes",
+	    CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A",
+	    "freelist buffer sizes");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD,
+	    NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD,
+	    NULL, sp->pad_boundary, "payload pad boundary (bytes)");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD,
+	    NULL, sp->spg_len, "status page size (bytes)");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD,
+	    NULL, cong_drop, "congestion drop setting");
+
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD,
+	    NULL, sp->pack_boundary, "payload pack boundary (bytes)");
+}
+
 int
 t4_destroy_dma_tag(struct adapter *sc)
 {
@@ -430,7 +893,8 @@
 	 * Management queue.  This is just a control queue that uses the fwq as
 	 * its associated iq.
 	 */
-	rc = alloc_mgmtq(sc);
+	if (!(sc->flags & IS_VF))
+		rc = alloc_mgmtq(sc);
 
 	return (rc);
 }
@@ -457,105 +921,93 @@
 }
 
 static inline int
-first_vector(struct port_info *pi)
+first_vector(struct vi_info *vi)
 {
-	struct adapter *sc = pi->adapter;
-	int rc = T4_EXTRA_INTR, i;
+	struct adapter *sc = vi->pi->adapter;
 
 	if (sc->intr_count == 1)
 		return (0);
 
-	for_each_port(sc, i) {
-		struct port_info *p = sc->port[i];
-
-		if (i == pi->port_id)
-			break;
-
-#ifdef TCP_OFFLOAD
-		if (sc->flags & INTR_DIRECT)
-			rc += p->nrxq + p->nofldrxq;
-		else
-			rc += max(p->nrxq, p->nofldrxq);
-#else
-		/*
-		 * Not compiled with offload support and intr_count > 1.  Only
-		 * NIC queues exist and they'd better be taking direct
-		 * interrupts.
-		 */
-		KASSERT(sc->flags & INTR_DIRECT,
-		    ("%s: intr_count %d, !INTR_DIRECT", __func__,
-		    sc->intr_count));
-
-		rc += p->nrxq;
-#endif
-	}
-
-	return (rc);
+	return (vi->first_intr);
 }
 
 /*
  * Given an arbitrary "index," come up with an iq that can be used by other
- * queues (of this port) for interrupt forwarding, SGE egress updates, etc.
+ * queues (of this VI) for interrupt forwarding, SGE egress updates, etc.
  * The iq returned is guaranteed to be something that takes direct interrupts.
  */
 static struct sge_iq *
-port_intr_iq(struct port_info *pi, int idx)
+vi_intr_iq(struct vi_info *vi, int idx)
 {
-	struct adapter *sc = pi->adapter;
+	struct adapter *sc = vi->pi->adapter;
 	struct sge *s = &sc->sge;
 	struct sge_iq *iq = NULL;
+	int nintr, i;
 
 	if (sc->intr_count == 1)
 		return (&sc->sge.fwq);
 
-#ifdef TCP_OFFLOAD
-	if (sc->flags & INTR_DIRECT) {
-		idx %= pi->nrxq + pi->nofldrxq;
-		
-		if (idx >= pi->nrxq) {
-			idx -= pi->nrxq;
-			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
-		} else
-			iq = &s->rxq[pi->first_rxq + idx].iq;
+	nintr = vi->nintr;
+#ifdef DEV_NETMAP
+	/* Do not consider any netmap-only interrupts */
+	if (vi->flags & INTR_RXQ && vi->nnmrxq > vi->nrxq)
+		nintr -= vi->nnmrxq - vi->nrxq;
+#endif
+	KASSERT(nintr != 0,
+	    ("%s: vi %p has no exclusive interrupts, total interrupts = %d",
+	    __func__, vi, sc->intr_count));
+	i = idx % nintr;
 
-	} else {
-		idx %= max(pi->nrxq, pi->nofldrxq);
-
-		if (pi->nrxq >= pi->nofldrxq)
-			iq = &s->rxq[pi->first_rxq + idx].iq;
-		else
-			iq = &s->ofld_rxq[pi->first_ofld_rxq + idx].iq;
+	if (vi->flags & INTR_RXQ) {
+	       	if (i < vi->nrxq) {
+			iq = &s->rxq[vi->first_rxq + i].iq;
+			goto done;
+		}
+		i -= vi->nrxq;
 	}
-#else
-	/*
-	 * Not compiled with offload support and intr_count > 1.  Only NIC
-	 * queues exist and they'd better be taking direct interrupts.
-	 */
-	KASSERT(sc->flags & INTR_DIRECT,
-	    ("%s: intr_count %d, !INTR_DIRECT", __func__, sc->intr_count));
-
-	idx %= pi->nrxq;
-	iq = &s->rxq[pi->first_rxq + idx].iq;
+#ifdef TCP_OFFLOAD
+	if (vi->flags & INTR_OFLD_RXQ) {
+	       	if (i < vi->nofldrxq) {
+			iq = &s->ofld_rxq[vi->first_ofld_rxq + i].iq;
+			goto done;
+		}
+		i -= vi->nofldrxq;
+	}
 #endif
-
-	KASSERT(iq->flags & IQ_INTR, ("%s: EDOOFUS", __func__));
+	panic("%s: vi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__,
+	    vi, vi->flags & INTR_ALL, idx, nintr);
+done:
+	MPASS(iq != NULL);
+	KASSERT(iq->flags & IQ_INTR,
+	    ("%s: iq %p (vi %p, intr_flags 0x%lx, idx %d)", __func__, iq, vi,
+	    vi->flags & INTR_ALL, idx));
 	return (iq);
 }
 
+/* Maximum payload that can be delivered with a single iq descriptor */
 static inline int
-mtu_to_bufsize(int mtu)
+mtu_to_max_payload(struct adapter *sc, int mtu, const int toe)
 {
-	int bufsize;
+	int payload;
 
-	/* large enough for a frame even when VLAN extraction is disabled */
-	bufsize = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + mtu;
-	bufsize = roundup(bufsize + fl_pktshift, fl_pad);
+#ifdef TCP_OFFLOAD
+	if (toe) {
+		payload = sc->tt.rx_coalesce ?
+		    G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu;
+	} else {
+#endif
+		/* large enough even when hw VLAN extraction is disabled */
+		payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN +
+		    ETHER_VLAN_ENCAP_LEN + mtu;
+#ifdef TCP_OFFLOAD
+	}
+#endif
 
-	return (bufsize);
+	return (payload);
 }
 
 int
-t4_setup_port_queues(struct port_info *pi)
+t4_setup_vi_queues(struct vi_info *vi)
 {
 	int rc = 0, i, j, intr_idx, iqid;
 	struct sge_rxq *rxq;
@@ -564,69 +1016,107 @@
 #ifdef TCP_OFFLOAD
 	struct sge_ofld_rxq *ofld_rxq;
 	struct sge_wrq *ofld_txq;
-	struct sysctl_oid *oid2 = NULL;
 #endif
+#ifdef DEV_NETMAP
+	int saved_idx;
+	struct sge_nm_rxq *nm_rxq;
+	struct sge_nm_txq *nm_txq;
+#endif
 	char name[16];
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
-	struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev);
+	struct ifnet *ifp = vi->ifp;
+	struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev);
 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
-	int bufsize = mtu_to_bufsize(pi->ifp->if_mtu);
+	int maxp, mtu = ifp->if_mtu;
 
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", CTLFLAG_RD,
-	    NULL, "rx queues");
+	/* Interrupt vector to start from (when using multiple vectors) */
+	intr_idx = first_vector(vi);
 
-#ifdef TCP_OFFLOAD
-	if (is_offload(sc)) {
-		oid2 = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq",
-		    CTLFLAG_RD, NULL,
-		    "rx queues for offloaded TCP connections");
+#ifdef DEV_NETMAP
+	saved_idx = intr_idx;
+	if (ifp->if_capabilities & IFCAP_NETMAP) {
+
+		/* netmap is supported with direct interrupts only. */
+		MPASS(vi->flags & INTR_RXQ);
+
+		/*
+		 * We don't have buffers to back the netmap rx queues
+		 * right now so we create the queues in a way that
+		 * doesn't set off any congestion signal in the chip.
+		 */
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq",
+		    CTLFLAG_RD, NULL, "rx queues");
+		for_each_nm_rxq(vi, i, nm_rxq) {
+			rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid);
+			if (rc != 0)
+				goto done;
+			intr_idx++;
+		}
+
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq",
+		    CTLFLAG_RD, NULL, "tx queues");
+		for_each_nm_txq(vi, i, nm_txq) {
+			iqid = vi->first_nm_rxq + (i % vi->nnmrxq);
+			rc = alloc_nm_txq(vi, nm_txq, iqid, i, oid);
+			if (rc != 0)
+				goto done;
+		}
 	}
+
+	/* Normal rx queues and netmap rx queues share the same interrupts. */
+	intr_idx = saved_idx;
 #endif
 
-	/* Interrupt vector to start from (when using multiple vectors) */
-	intr_idx = first_vector(pi);
-
 	/*
-	 * First pass over all rx queues (NIC and TOE):
+	 * First pass over all NIC and TOE rx queues:
 	 * a) initialize iq and fl
 	 * b) allocate queue iff it will take direct interrupts.
 	 */
-	for_each_rxq(pi, i, rxq) {
+	maxp = mtu_to_max_payload(sc, mtu, 0);
+	if (vi->flags & INTR_RXQ) {
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
+		    CTLFLAG_RD, NULL, "rx queues");
+	}
+	for_each_rxq(vi, i, rxq) {
 
-		init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq,
-		    RX_IQ_ESIZE);
+		init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq);
 
 		snprintf(name, sizeof(name), "%s rxq%d-fl",
-		    device_get_nameunit(pi->dev), i);
-		init_fl(&rxq->fl, pi->qsize_rxq / 8, bufsize, name);
+		    device_get_nameunit(vi->dev), i);
+		init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name);
 
-		if (sc->flags & INTR_DIRECT
-#ifdef TCP_OFFLOAD
-		    || (sc->intr_count > 1 && pi->nrxq >= pi->nofldrxq)
-#endif
-		   ) {
+		if (vi->flags & INTR_RXQ) {
 			rxq->iq.flags |= IQ_INTR;
-			rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
+			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
 			if (rc != 0)
 				goto done;
 			intr_idx++;
 		}
 	}
-
+#ifdef DEV_NETMAP
+	if (ifp->if_capabilities & IFCAP_NETMAP)
+		intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq);
+#endif
 #ifdef TCP_OFFLOAD
-	for_each_ofld_rxq(pi, i, ofld_rxq) {
+	maxp = mtu_to_max_payload(sc, mtu, 1);
+	if (vi->flags & INTR_OFLD_RXQ) {
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
+		    CTLFLAG_RD, NULL,
+		    "rx queues for offloaded TCP connections");
+	}
+	for_each_ofld_rxq(vi, i, ofld_rxq) {
 
-		init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx,
-		    pi->qsize_rxq, RX_IQ_ESIZE);
+		init_iq(&ofld_rxq->iq, sc, vi->tmr_idx, vi->pktc_idx,
+		    vi->qsize_rxq);
 
 		snprintf(name, sizeof(name), "%s ofld_rxq%d-fl",
-		    device_get_nameunit(pi->dev), i);
-		init_fl(&ofld_rxq->fl, pi->qsize_rxq / 8, OFLD_BUF_SIZE, name);
+		    device_get_nameunit(vi->dev), i);
+		init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name);
 
-		if (sc->flags & INTR_DIRECT ||
-		    (sc->intr_count > 1 && pi->nofldrxq > pi->nrxq)) {
+		if (vi->flags & INTR_OFLD_RXQ) {
 			ofld_rxq->iq.flags |= IQ_INTR;
-			rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
+			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
 			if (rc != 0)
 				goto done;
 			intr_idx++;
@@ -635,33 +1125,39 @@
 #endif
 
 	/*
-	 * Second pass over all rx queues (NIC and TOE).  The queues forwarding
+	 * Second pass over all NIC and TOE rx queues.  The queues forwarding
 	 * their interrupts are allocated now.
 	 */
 	j = 0;
-	for_each_rxq(pi, i, rxq) {
-		if (rxq->iq.flags & IQ_INTR)
-			continue;
+	if (!(vi->flags & INTR_RXQ)) {
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq",
+		    CTLFLAG_RD, NULL, "rx queues");
+		for_each_rxq(vi, i, rxq) {
+			MPASS(!(rxq->iq.flags & IQ_INTR));
 
-		intr_idx = port_intr_iq(pi, j)->abs_id;
+			intr_idx = vi_intr_iq(vi, j)->abs_id;
 
-		rc = alloc_rxq(pi, rxq, intr_idx, i, oid);
-		if (rc != 0)
-			goto done;
-		j++;
+			rc = alloc_rxq(vi, rxq, intr_idx, i, oid);
+			if (rc != 0)
+				goto done;
+			j++;
+		}
 	}
-
 #ifdef TCP_OFFLOAD
-	for_each_ofld_rxq(pi, i, ofld_rxq) {
-		if (ofld_rxq->iq.flags & IQ_INTR)
-			continue;
+	if (vi->nofldrxq != 0 && !(vi->flags & INTR_OFLD_RXQ)) {
+		oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq",
+		    CTLFLAG_RD, NULL,
+		    "rx queues for offloaded TCP connections");
+		for_each_ofld_rxq(vi, i, ofld_rxq) {
+			MPASS(!(ofld_rxq->iq.flags & IQ_INTR));
 
-		intr_idx = port_intr_iq(pi, j)->abs_id;
+			intr_idx = vi_intr_iq(vi, j)->abs_id;
 
-		rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid2);
-		if (rc != 0)
-			goto done;
-		j++;
+			rc = alloc_ofld_rxq(vi, ofld_rxq, intr_idx, i, oid);
+			if (rc != 0)
+				goto done;
+			j++;
+		}
 	}
 #endif
 
@@ -668,43 +1164,38 @@
 	/*
 	 * Now the tx queues.  Only one pass needed.
 	 */
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD,
 	    NULL, "tx queues");
 	j = 0;
-	for_each_txq(pi, i, txq) {
-		uint16_t iqid;
-
-		iqid = port_intr_iq(pi, j)->cntxt_id;
-
+	for_each_txq(vi, i, txq) {
+		iqid = vi_intr_iq(vi, j)->cntxt_id;
 		snprintf(name, sizeof(name), "%s txq%d",
-		    device_get_nameunit(pi->dev), i);
-		init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid,
+		    device_get_nameunit(vi->dev), i);
+		init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, iqid,
 		    name);
 
-		rc = alloc_txq(pi, txq, i, oid);
+		rc = alloc_txq(vi, txq, i, oid);
 		if (rc != 0)
 			goto done;
 		j++;
 	}
-
 #ifdef TCP_OFFLOAD
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq",
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq",
 	    CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections");
-	for_each_ofld_txq(pi, i, ofld_txq) {
-		uint16_t iqid;
+	for_each_ofld_txq(vi, i, ofld_txq) {
+		struct sysctl_oid *oid2;
 
-		iqid = port_intr_iq(pi, j)->cntxt_id;
-
+		iqid = vi_intr_iq(vi, j)->cntxt_id;
 		snprintf(name, sizeof(name), "%s ofld_txq%d",
-		    device_get_nameunit(pi->dev), i);
-		init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan,
+		    device_get_nameunit(vi->dev), i);
+		init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan,
 		    iqid, name);
 
 		snprintf(name, sizeof(name), "%d", i);
-		oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+		oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
 		    name, CTLFLAG_RD, NULL, "offload tx queue");
 
-		rc = alloc_wrq(sc, pi, ofld_txq, oid2);
+		rc = alloc_wrq(sc, vi, ofld_txq, oid2);
 		if (rc != 0)
 			goto done;
 		j++;
@@ -714,17 +1205,20 @@
 	/*
 	 * Finally, the control queue.
 	 */
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
+	if (!IS_MAIN_VI(vi) || sc->flags & IS_VF)
+		goto done;
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD,
 	    NULL, "ctrl queue");
 	ctrlq = &sc->sge.ctrlq[pi->port_id];
-	iqid = port_intr_iq(pi, 0)->cntxt_id;
-	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev));
-	init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name);
-	rc = alloc_wrq(sc, pi, ctrlq, oid);
+	iqid = vi_intr_iq(vi, 0)->cntxt_id;
+	snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev));
+	init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid,
+	    name);
+	rc = alloc_wrq(sc, vi, ctrlq, oid);
 
 done:
 	if (rc)
-		t4_teardown_port_queues(pi);
+		t4_teardown_vi_queues(vi);
 
 	return (rc);
 }
@@ -733,9 +1227,10 @@
  * Idempotent
  */
 int
-t4_teardown_port_queues(struct port_info *pi)
+t4_teardown_vi_queues(struct vi_info *vi)
 {
 	int i;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 	struct sge_rxq *rxq;
 	struct sge_txq *txq;
@@ -743,26 +1238,42 @@
 	struct sge_ofld_rxq *ofld_rxq;
 	struct sge_wrq *ofld_txq;
 #endif
+#ifdef DEV_NETMAP
+	struct sge_nm_rxq *nm_rxq;
+	struct sge_nm_txq *nm_txq;
+#endif
 
 	/* Do this before freeing the queues */
-	if (pi->flags & PORT_SYSCTL_CTX) {
-		sysctl_ctx_free(&pi->ctx);
-		pi->flags &= ~PORT_SYSCTL_CTX;
+	if (vi->flags & VI_SYSCTL_CTX) {
+		sysctl_ctx_free(&vi->ctx);
+		vi->flags &= ~VI_SYSCTL_CTX;
 	}
 
+#ifdef DEV_NETMAP
+	if (vi->ifp->if_capabilities & IFCAP_NETMAP) {
+		for_each_nm_txq(vi, i, nm_txq) {
+			free_nm_txq(vi, nm_txq);
+		}
+
+		for_each_nm_rxq(vi, i, nm_rxq) {
+			free_nm_rxq(vi, nm_rxq);
+		}
+	}
+#endif
+
 	/*
 	 * Take down all the tx queues first, as they reference the rx queues
 	 * (for egress updates, etc.).
 	 */
 
-	free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
+	if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF))
+		free_wrq(sc, &sc->sge.ctrlq[pi->port_id]);
 
-	for_each_txq(pi, i, txq) {
-		free_txq(pi, txq);
+	for_each_txq(vi, i, txq) {
+		free_txq(vi, txq);
 	}
-
 #ifdef TCP_OFFLOAD
-	for_each_ofld_txq(pi, i, ofld_txq) {
+	for_each_ofld_txq(vi, i, ofld_txq) {
 		free_wrq(sc, ofld_txq);
 	}
 #endif
@@ -772,15 +1283,14 @@
 	 * reference other rx queues.
 	 */
 
-	for_each_rxq(pi, i, rxq) {
+	for_each_rxq(vi, i, rxq) {
 		if ((rxq->iq.flags & IQ_INTR) == 0)
-			free_rxq(pi, rxq);
+			free_rxq(vi, rxq);
 	}
-
 #ifdef TCP_OFFLOAD
-	for_each_ofld_rxq(pi, i, ofld_rxq) {
+	for_each_ofld_rxq(vi, i, ofld_rxq) {
 		if ((ofld_rxq->iq.flags & IQ_INTR) == 0)
-			free_ofld_rxq(pi, ofld_rxq);
+			free_ofld_rxq(vi, ofld_rxq);
 	}
 #endif
 
@@ -788,15 +1298,14 @@
 	 * Then take down the rx queues that take direct interrupts.
 	 */
 
-	for_each_rxq(pi, i, rxq) {
+	for_each_rxq(vi, i, rxq) {
 		if (rxq->iq.flags & IQ_INTR)
-			free_rxq(pi, rxq);
+			free_rxq(vi, rxq);
 	}
-
 #ifdef TCP_OFFLOAD
-	for_each_ofld_rxq(pi, i, ofld_rxq) {
+	for_each_ofld_rxq(vi, i, ofld_rxq) {
 		if (ofld_rxq->iq.flags & IQ_INTR)
-			free_ofld_rxq(pi, ofld_rxq);
+			free_ofld_rxq(vi, ofld_rxq);
 	}
 #endif
 
@@ -852,6 +1361,21 @@
 	}
 }
 
+void
+t4_vi_intr(void *arg)
+{
+	struct irq *irq = arg;
+
+#ifdef DEV_NETMAP
+	if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) {
+		t4_nm_intr(irq->nm_rxq);
+		atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON);
+	}
+#endif
+	if (irq->rxq != NULL)
+		t4_intr(irq->rxq);
+}
+
 /*
  * Deals with anything and everything on the given ingress queue.
  */
@@ -860,33 +1384,44 @@
 {
 	struct sge_iq *q;
 	struct sge_rxq *rxq = iq_to_rxq(iq);	/* Use iff iq is part of rxq */
-	struct sge_fl *fl = &rxq->fl;		/* Use iff IQ_HAS_FL */
+	struct sge_fl *fl;			/* Use iff IQ_HAS_FL */
 	struct adapter *sc = iq->adapter;
-	struct rsp_ctrl *ctrl;
-	const struct rss_header *rss;
-	int ndescs = 0, limit, fl_bufs_used = 0;
-	int rsp_type;
+	struct iq_desc *d = &iq->desc[iq->cidx];
+	int ndescs = 0, limit;
+	int rsp_type, refill;
 	uint32_t lq;
+	uint16_t fl_hw_cidx;
 	struct mbuf *m0;
 	STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
+#if defined(INET) || defined(INET6)
+	const struct timeval lro_timeout = {0, sc->lro_timeout};
+#endif
 
-	limit = budget ? budget : iq->qsize / 8;
-
 	KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq));
 
+	limit = budget ? budget : iq->qsize / 16;
+
+	if (iq->flags & IQ_HAS_FL) {
+		fl = &rxq->fl;
+		fl_hw_cidx = fl->hw_cidx;	/* stable snapshot */
+	} else {
+		fl = NULL;
+		fl_hw_cidx = 0;			/* to silence gcc warning */
+	}
+
 	/*
 	 * We always come back and check the descriptor ring for new indirect
 	 * interrupts and other responses after running a single handler.
 	 */
 	for (;;) {
-		while (is_new_response(iq, &ctrl)) {
+		while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) {
 
 			rmb();
 
+			refill = 0;
 			m0 = NULL;
-			rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
-			lq = be32toh(ctrl->pldbuflen_qid);
-			rss = (const void *)iq->cdesc;
+			rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen);
+			lq = be32toh(d->rsp.pldbuflen_qid);
 
 			switch (rsp_type) {
 			case X_RSPD_TYPE_FLBUF:
@@ -895,7 +1430,10 @@
 				    ("%s: data for an iq (%p) with no freelist",
 				    __func__, iq));
 
-				m0 = get_fl_payload(sc, fl, lq, &fl_bufs_used);
+				m0 = get_fl_payload(sc, fl, lq);
+				if (__predict_false(m0 == NULL))
+					goto process_iql;
+				refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2;
 #ifdef T4_PKT_TIMESTAMP
 				/*
 				 * 60 bit timestamp for the payload is
@@ -914,10 +1452,10 @@
 				/* fall through */
 
 			case X_RSPD_TYPE_CPL:
-				KASSERT(rss->opcode < NUM_CPL_CMDS,
+				KASSERT(d->rss.opcode < NUM_CPL_CMDS,
 				    ("%s: bad opcode %02x.", __func__,
-				    rss->opcode));
-				sc->cpl_handler[rss->opcode](iq, rss, m0);
+				    d->rss.opcode));
+				t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0);
 				break;
 
 			case X_RSPD_TYPE_INTR:
@@ -932,10 +1470,22 @@
 				    ("%s: budget %u, rsp_type %u", __func__,
 				    budget, rsp_type));
 
-				q = sc->sge.iqmap[lq - sc->sge.iq_start];
+				/*
+				 * There are 1K interrupt-capable queues (qids 0
+				 * through 1023).  A response type indicating a
+				 * forwarded interrupt with a qid >= 1K is an
+				 * iWARP async notification.
+				 */
+				if (lq >= 1024) {
+                                        t4_an_handler(iq, &d->rsp);
+                                        break;
+                                }
+
+				q = sc->sge.iqmap[lq - sc->sge.iq_start -
+				    sc->sge.iq_base];
 				if (atomic_cmpset_int(&q->state, IQS_IDLE,
 				    IQS_BUSY)) {
-					if (service_iq(q, q->qsize / 8) == 0) {
+					if (service_iq(q, q->qsize / 16) == 0) {
 						atomic_cmpset_int(&q->state,
 						    IQS_BUSY, IQS_IDLE);
 					} else {
@@ -946,31 +1496,54 @@
 				break;
 
 			default:
-				sc->an_handler(iq, ctrl);
+				KASSERT(0,
+				    ("%s: illegal response type %d on iq %p",
+				    __func__, rsp_type, iq));
+				log(LOG_ERR,
+				    "%s: illegal response type %d on iq %p",
+				    device_get_nameunit(sc->dev), rsp_type, iq);
 				break;
 			}
 
-			iq_next(iq);
-			if (++ndescs == limit) {
-				t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+			d++;
+			if (__predict_false(++iq->cidx == iq->sidx)) {
+				iq->cidx = 0;
+				iq->gen ^= F_RSPD_GEN;
+				d = &iq->desc[0];
+			}
+			if (__predict_false(++ndescs == limit)) {
+				t4_write_reg(sc, sc->sge_gts_reg,
 				    V_CIDXINC(ndescs) |
 				    V_INGRESSQID(iq->cntxt_id) |
 				    V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
 				ndescs = 0;
 
-				if (fl_bufs_used > 0) {
-					FL_LOCK(fl);
-					fl->needed += fl_bufs_used;
-					refill_fl(sc, fl, fl->cap / 8);
-					FL_UNLOCK(fl);
-					fl_bufs_used = 0;
+#if defined(INET) || defined(INET6)
+				if (iq->flags & IQ_LRO_ENABLED &&
+				    sc->lro_timeout != 0) {
+					tcp_lro_flush_inactive(&rxq->lro,
+					    &lro_timeout);
 				}
+#endif
 
-				if (budget)
+				if (budget) {
+					if (iq->flags & IQ_HAS_FL) {
+						FL_LOCK(fl);
+						refill_fl(sc, fl, 32);
+						FL_UNLOCK(fl);
+					}
 					return (EINPROGRESS);
+				}
 			}
+			if (refill) {
+				FL_LOCK(fl);
+				refill_fl(sc, fl, 32);
+				FL_UNLOCK(fl);
+				fl_hw_cidx = fl->hw_cidx;
+			}
 		}
 
+process_iql:
 		if (STAILQ_EMPTY(&iql))
 			break;
 
@@ -999,7 +1572,7 @@
 	}
 #endif
 
-	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
+	t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
 	    V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
 
 	if (iq->flags & IQ_HAS_FL) {
@@ -1006,8 +1579,7 @@
 		int starved;
 
 		FL_LOCK(fl);
-		fl->needed += fl_bufs_used;
-		starved = refill_fl(sc, fl, fl->cap / 4);
+		starved = refill_fl(sc, fl, 64);
 		FL_UNLOCK(fl);
 		if (__predict_false(starved != 0))
 			add_fl_to_sfl(sc, fl);
@@ -1016,91 +1588,220 @@
 	return (0);
 }
 
+static inline int
+cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll)
+{
+	int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0;
+
+	if (rc)
+		MPASS(cll->region3 >= CL_METADATA_SIZE);
+
+	return (rc);
+}
+
+static inline struct cluster_metadata *
+cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll,
+    caddr_t cl)
+{
+
+	if (cl_has_metadata(fl, cll)) {
+		struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
+
+		return ((struct cluster_metadata *)(cl + swz->size) - 1);
+	}
+	return (NULL);
+}
+
+static int
+rxb_free(struct mbuf *m, void *arg1, void *arg2)
+{
+	uma_zone_t zone = arg1;
+	caddr_t cl = arg2;
+
+	uma_zfree(zone, cl);
+	counter_u64_add(extfree_rels, 1);
+
+	return (EXT_FREE_OK);
+}
+
+/*
+ * The mbuf returned by this function could be allocated from zone_mbuf or
+ * constructed in spare room in the cluster.
+ *
+ * The mbuf carries the payload in one of these ways
+ * a) frame inside the mbuf (mbuf from zone_mbuf)
+ * b) m_cljset (for clusters without metadata) zone_mbuf
+ * c) m_extaddref (cluster with metadata) inline mbuf
+ * d) m_extaddref (cluster with metadata) zone_mbuf
+ */
 static struct mbuf *
-get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf,
-    int *fl_bufs_used)
+get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset,
+    int remaining)
 {
-	struct mbuf *m0, *m;
+	struct mbuf *m;
 	struct fl_sdesc *sd = &fl->sdesc[fl->cidx];
-	unsigned int nbuf, len;
+	struct cluster_layout *cll = &sd->cll;
+	struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx];
+	struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx];
+	struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl);
+	int len, blen;
+	caddr_t payload;
 
-	/*
-	 * No assertion for the fl lock because we don't need it.  This routine
-	 * is called only from the rx interrupt handler and it only updates
-	 * fl->cidx.  (Contrast that with fl->pidx/fl->needed which could be
-	 * updated in the rx interrupt handler or the starvation helper routine.
-	 * That's why code that manipulates fl->pidx/fl->needed needs the fl
-	 * lock but this routine does not).
-	 */
+	blen = hwb->size - fl->rx_offset;	/* max possible in this buf */
+	len = min(remaining, blen);
+	payload = sd->cl + cll->region1 + fl->rx_offset;
+	if (fl->flags & FL_BUF_PACKING) {
+		const u_int l = fr_offset + len;
+		const u_int pad = roundup2(l, fl->buf_boundary) - l;
 
-	if (__predict_false((len_newbuf & F_RSPD_NEWBUF) == 0))
-		panic("%s: cannot handle packed frames", __func__);
-	len = G_RSPD_LEN(len_newbuf);
+		if (fl->rx_offset + len + pad < hwb->size)
+			blen = len + pad;
+		MPASS(fl->rx_offset + blen <= hwb->size);
+	} else {
+		MPASS(fl->rx_offset == 0);	/* not packing */
+	}
 
-	m0 = sd->m;
-	sd->m = NULL;	/* consumed */
 
-	bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, BUS_DMASYNC_POSTREAD);
-	m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR);
+	if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) {
+
+		/*
+		 * Copy payload into a freshly allocated mbuf.
+		 */
+
+		m = fr_offset == 0 ?
+		    m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
+		if (m == NULL)
+			return (NULL);
+		fl->mbuf_allocated++;
 #ifdef T4_PKT_TIMESTAMP
-	/* Leave room for a timestamp */
-	m0->m_data += 8;
+		/* Leave room for a timestamp */
+		m->m_data += 8;
 #endif
+		/* copy data to mbuf */
+		bcopy(payload, mtod(m, caddr_t), len);
 
-	if (len < RX_COPY_THRESHOLD) {
-		/* copy data to mbuf, buffer will be recycled */
-		bcopy(sd->cl, mtod(m0, caddr_t), len);
-		m0->m_len = len;
+	} else if (sd->nmbuf * MSIZE < cll->region1) {
+
+		/*
+		 * There's spare room in the cluster for an mbuf.  Create one
+		 * and associate it with the payload that's in the cluster.
+		 */
+
+		MPASS(clm != NULL);
+		m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE);
+		/* No bzero required */
+		if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA,
+		    fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE))
+			return (NULL);
+		fl->mbuf_inlined++;
+		m_extaddref(m, payload, blen, &clm->refcount, rxb_free,
+		    swz->zone, sd->cl);
+		if (sd->nmbuf++ == 0)
+			counter_u64_add(extfree_refs, 1);
+
 	} else {
-		bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
-		m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx));
-		sd->cl = NULL;	/* consumed */
-		m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
+
+		/*
+		 * Grab an mbuf from zone_mbuf and associate it with the
+		 * payload in the cluster.
+		 */
+
+		m = fr_offset == 0 ?
+		    m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA);
+		if (m == NULL)
+			return (NULL);
+		fl->mbuf_allocated++;
+		if (clm != NULL) {
+			m_extaddref(m, payload, blen, &clm->refcount,
+			    rxb_free, swz->zone, sd->cl);
+			if (sd->nmbuf++ == 0)
+				counter_u64_add(extfree_refs, 1);
+		} else {
+			m_cljset(m, sd->cl, swz->type);
+			sd->cl = NULL;	/* consumed, not a recycle candidate */
+		}
 	}
-	m0->m_pkthdr.len = len;
+	if (fr_offset == 0)
+		m->m_pkthdr.len = remaining;
+	m->m_len = len;
 
-	sd++;
-	if (__predict_false(++fl->cidx == fl->cap)) {
-		sd = fl->sdesc;
-		fl->cidx = 0;
+	if (fl->flags & FL_BUF_PACKING) {
+		fl->rx_offset += blen;
+		MPASS(fl->rx_offset <= hwb->size);
+		if (fl->rx_offset < hwb->size)
+			return (m);	/* without advancing the cidx */
 	}
 
-	m = m0;
-	len -= m->m_len;
-	nbuf = 1;	/* # of fl buffers used */
+	if (__predict_false(++fl->cidx % 8 == 0)) {
+		uint16_t cidx = fl->cidx / 8;
 
-	while (len > 0) {
-		m->m_next = sd->m;
-		sd->m = NULL;	/* consumed */
-		m = m->m_next;
+		if (__predict_false(cidx == fl->sidx))
+			fl->cidx = cidx = 0;
+		fl->hw_cidx = cidx;
+	}
+	fl->rx_offset = 0;
 
-		bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map,
-		    BUS_DMASYNC_POSTREAD);
+	return (m);
+}
 
-		m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0);
-		if (len <= MLEN) {
-			bcopy(sd->cl, mtod(m, caddr_t), len);
-			m->m_len = len;
-		} else {
-			bus_dmamap_unload(fl->tag[sd->tag_idx],
-			    sd->map);
-			m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx));
-			sd->cl = NULL;	/* consumed */
-			m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx));
+static struct mbuf *
+get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf)
+{
+	struct mbuf *m0, *m, **pnext;
+	u_int remaining;
+	const u_int total = G_RSPD_LEN(len_newbuf);
+
+	if (__predict_false(fl->flags & FL_BUF_RESUME)) {
+		M_ASSERTPKTHDR(fl->m0);
+		MPASS(fl->m0->m_pkthdr.len == total);
+		MPASS(fl->remaining < total);
+
+		m0 = fl->m0;
+		pnext = fl->pnext;
+		remaining = fl->remaining;
+		fl->flags &= ~FL_BUF_RESUME;
+		goto get_segment;
+	}
+
+	if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
+		fl->rx_offset = 0;
+		if (__predict_false(++fl->cidx % 8 == 0)) {
+			uint16_t cidx = fl->cidx / 8;
+
+			if (__predict_false(cidx == fl->sidx))
+				fl->cidx = cidx = 0;
+			fl->hw_cidx = cidx;
 		}
+	}
 
-		sd++;
-		if (__predict_false(++fl->cidx == fl->cap)) {
-			sd = fl->sdesc;
-			fl->cidx = 0;
+	/*
+	 * Payload starts at rx_offset in the current hw buffer.  Its length is
+	 * 'len' and it may span multiple hw buffers.
+	 */
+
+	m0 = get_scatter_segment(sc, fl, 0, total);
+	if (m0 == NULL)
+		return (NULL);
+	remaining = total - m0->m_len;
+	pnext = &m0->m_next;
+	while (remaining > 0) {
+get_segment:
+		MPASS(fl->rx_offset == 0);
+		m = get_scatter_segment(sc, fl, total - remaining, remaining);
+		if (__predict_false(m == NULL)) {
+			fl->m0 = m0;
+			fl->pnext = pnext;
+			fl->remaining = remaining;
+			fl->flags |= FL_BUF_RESUME;
+			return (NULL);
 		}
-
-		len -= m->m_len;
-		nbuf++;
+		*pnext = m;
+		pnext = &m->m_next;
+		remaining -= m->m_len;
 	}
+	*pnext = NULL;
 
-	(*fl_bufs_used) += nbuf;
-
+	M_ASSERTPKTHDR(m0);
 	return (m0);
 }
 
@@ -1109,6 +1810,7 @@
 {
 	struct sge_rxq *rxq = iq_to_rxq(iq);
 	struct ifnet *ifp = rxq->ifp;
+	struct adapter *sc = iq->adapter;
 	const struct cpl_rx_pkt *cpl = (const void *)(rss + 1);
 #if defined(INET) || defined(INET6)
 	struct lro_ctrl *lro = &rxq->lro;
@@ -1117,15 +1819,15 @@
 	KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__,
 	    rss->opcode));
 
-	m0->m_pkthdr.len -= fl_pktshift;
-	m0->m_len -= fl_pktshift;
-	m0->m_data += fl_pktshift;
+	m0->m_pkthdr.len -= sc->params.sge.fl_pktshift;
+	m0->m_len -= sc->params.sge.fl_pktshift;
+	m0->m_data += sc->params.sge.fl_pktshift;
 
 	m0->m_pkthdr.rcvif = ifp;
-	m0->m_flags |= M_FLOWID;
-	m0->m_pkthdr.flowid = rss->hash_val;
+	M_HASHTYPE_SET(m0, M_HASHTYPE_OPAQUE);
+	m0->m_pkthdr.flowid = be32toh(rss->hash_val);
 
-	if (cpl->csum_calc && !cpl->err_vec) {
+	if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) {
 		if (ifp->if_capenable & IFCAP_RXCSUM &&
 		    cpl->l2info & htobe32(F_RXF_IP)) {
 			m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
@@ -1151,8 +1853,7 @@
 	}
 
 #if defined(INET) || defined(INET6)
-	if (cpl->l2info & htobe32(F_RXF_LRO) &&
-	    iq->flags & IQ_LRO_ENABLED &&
+	if (iq->flags & IQ_LRO_ENABLED &&
 	    tcp_lro_rx(lro, m0, 0) == 0) {
 		/* queued for LRO */
 	} else
@@ -1163,317 +1864,691 @@
 }
 
 /*
+ * Must drain the wrq or make sure that someone else will.
+ */
+static void
+wrq_tx_drain(void *arg, int n)
+{
+	struct sge_wrq *wrq = arg;
+	struct sge_eq *eq = &wrq->eq;
+
+	EQ_LOCK(eq);
+	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
+		drain_wrq_wr_list(wrq->adapter, wrq);
+	EQ_UNLOCK(eq);
+}
+
+static void
+drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq)
+{
+	struct sge_eq *eq = &wrq->eq;
+	u_int available, dbdiff;	/* # of hardware descriptors */
+	u_int n;
+	struct wrqe *wr;
+	struct fw_eth_tx_pkt_wr *dst;	/* any fw WR struct will do */
+
+	EQ_LOCK_ASSERT_OWNED(eq);
+	MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
+	wr = STAILQ_FIRST(&wrq->wr_list);
+	MPASS(wr != NULL);	/* Must be called with something useful to do */
+	MPASS(eq->pidx == eq->dbidx);
+	dbdiff = 0;
+
+	do {
+		eq->cidx = read_hw_cidx(eq);
+		if (eq->pidx == eq->cidx)
+			available = eq->sidx - 1;
+		else
+			available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
+
+		MPASS(wr->wrq == wrq);
+		n = howmany(wr->wr_len, EQ_ESIZE);
+		if (available < n)
+			break;
+
+		dst = (void *)&eq->desc[eq->pidx];
+		if (__predict_true(eq->sidx - eq->pidx > n)) {
+			/* Won't wrap, won't end exactly at the status page. */
+			bcopy(&wr->wr[0], dst, wr->wr_len);
+			eq->pidx += n;
+		} else {
+			int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE;
+
+			bcopy(&wr->wr[0], dst, first_portion);
+			if (wr->wr_len > first_portion) {
+				bcopy(&wr->wr[first_portion], &eq->desc[0],
+				    wr->wr_len - first_portion);
+			}
+			eq->pidx = n - (eq->sidx - eq->pidx);
+		}
+		wrq->tx_wrs_copied++;
+
+		if (available < eq->sidx / 4 &&
+		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
+			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
+			    F_FW_WR_EQUEQ);
+			eq->equeqidx = eq->pidx;
+		} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
+			dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
+			eq->equeqidx = eq->pidx;
+		}
+
+		dbdiff += n;
+		if (dbdiff >= 16) {
+			ring_eq_db(sc, eq, dbdiff);
+			dbdiff = 0;
+		}
+
+		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
+		free_wrqe(wr);
+		MPASS(wrq->nwr_pending > 0);
+		wrq->nwr_pending--;
+		MPASS(wrq->ndesc_needed >= n);
+		wrq->ndesc_needed -= n;
+	} while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL);
+
+	if (dbdiff)
+		ring_eq_db(sc, eq, dbdiff);
+}
+
+/*
  * Doesn't fail.  Holds on to work requests it can't send right away.
  */
 void
 t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr)
 {
+#ifdef INVARIANTS
 	struct sge_eq *eq = &wrq->eq;
-	int can_reclaim;
-	caddr_t dst;
+#endif
 
-	TXQ_LOCK_ASSERT_OWNED(wrq);
+	EQ_LOCK_ASSERT_OWNED(eq);
+	MPASS(wr != NULL);
+	MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN);
+	MPASS((wr->wr_len & 0x7) == 0);
+
+	STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
+	wrq->nwr_pending++;
+	wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE);
+
+	if (!TAILQ_EMPTY(&wrq->incomplete_wrs))
+		return;	/* commit_wrq_wr will drain wr_list as well. */
+
+	drain_wrq_wr_list(sc, wrq);
+
+	/* Doorbell must have caught up to the pidx. */
+	MPASS(eq->pidx == eq->dbidx);
+}
+
+void
+t4_update_fl_bufsize(struct ifnet *ifp)
+{
+	struct vi_info *vi = ifp->if_softc;
+	struct adapter *sc = vi->pi->adapter;
+	struct sge_rxq *rxq;
 #ifdef TCP_OFFLOAD
-	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD ||
-	    (eq->flags & EQ_TYPEMASK) == EQ_CTRL,
-	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
-#else
-	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL,
-	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
+	struct sge_ofld_rxq *ofld_rxq;
 #endif
+	struct sge_fl *fl;
+	int i, maxp, mtu = ifp->if_mtu;
 
-	if (__predict_true(wr != NULL))
-		STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link);
+	maxp = mtu_to_max_payload(sc, mtu, 0);
+	for_each_rxq(vi, i, rxq) {
+		fl = &rxq->fl;
 
-	can_reclaim = reclaimable(eq);
-	if (__predict_false(eq->flags & EQ_STALLED)) {
-		if (can_reclaim < tx_resume_threshold(eq))
-			return;
-		eq->flags &= ~EQ_STALLED;
-		eq->unstalled++;
+		FL_LOCK(fl);
+		find_best_refill_source(sc, fl, maxp);
+		FL_UNLOCK(fl);
 	}
-	eq->cidx += can_reclaim;
-	eq->avail += can_reclaim;
-	if (__predict_false(eq->cidx >= eq->cap))
-		eq->cidx -= eq->cap;
+#ifdef TCP_OFFLOAD
+	maxp = mtu_to_max_payload(sc, mtu, 1);
+	for_each_ofld_rxq(vi, i, ofld_rxq) {
+		fl = &ofld_rxq->fl;
 
-	while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) {
-		int ndesc;
+		FL_LOCK(fl);
+		find_best_refill_source(sc, fl, maxp);
+		FL_UNLOCK(fl);
+	}
+#endif
+}
 
-		if (__predict_false(wr->wr_len < 0 ||
-		    wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) {
+static inline int
+mbuf_nsegs(struct mbuf *m)
+{
 
-#ifdef INVARIANTS
-			panic("%s: work request with length %d", __func__,
-			    wr->wr_len);
-#endif
-#ifdef KDB
-			kdb_backtrace();
-#endif
-			log(LOG_ERR, "%s: %s work request with length %d",
-			    device_get_nameunit(sc->dev), __func__, wr->wr_len);
-			STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
-			free_wrqe(wr);
-			continue;
-		}
+	M_ASSERTPKTHDR(m);
+	KASSERT(m->m_pkthdr.l5hlen > 0,
+	    ("%s: mbuf %p missing information on # of segments.", __func__, m));
 
-		ndesc = howmany(wr->wr_len, EQ_ESIZE);
-		if (eq->avail < ndesc) {
-			wrq->no_desc++;
-			break;
-		}
+	return (m->m_pkthdr.l5hlen);
+}
 
-		dst = (void *)&eq->desc[eq->pidx];
-		copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len);
+static inline void
+set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs)
+{
 
-		eq->pidx += ndesc;
-		eq->avail -= ndesc;
-		if (__predict_false(eq->pidx >= eq->cap))
-			eq->pidx -= eq->cap;
+	M_ASSERTPKTHDR(m);
+	m->m_pkthdr.l5hlen = nsegs;
+}
 
-		eq->pending += ndesc;
-		if (eq->pending > 16)
-			ring_eq_db(sc, eq);
+static inline int
+mbuf_len16(struct mbuf *m)
+{
+	int n;
 
-		wrq->tx_wrs++;
-		STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
-		free_wrqe(wr);
+	M_ASSERTPKTHDR(m);
+	n = m->m_pkthdr.PH_loc.eigth[0];
+	MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16);
 
-		if (eq->avail < 8) {
-			can_reclaim = reclaimable(eq);
-			eq->cidx += can_reclaim;
-			eq->avail += can_reclaim;
-			if (__predict_false(eq->cidx >= eq->cap))
-				eq->cidx -= eq->cap;
-		}
-	}
+	return (n);
+}
 
-	if (eq->pending)
-		ring_eq_db(sc, eq);
+static inline void
+set_mbuf_len16(struct mbuf *m, uint8_t len16)
+{
 
-	if (wr != NULL) {
-		eq->flags |= EQ_STALLED;
-		if (callout_pending(&eq->tx_callout) == 0)
-			callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
-	}
+	M_ASSERTPKTHDR(m);
+	m->m_pkthdr.PH_loc.eigth[0] = len16;
 }
 
-/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */
-#define TXPKTS_PKT_HDR ((\
-    sizeof(struct ulp_txpkt) + \
-    sizeof(struct ulptx_idata) + \
-    sizeof(struct cpl_tx_pkt_core) \
-    ) / 8)
+static inline int
+needs_tso(struct mbuf *m)
+{
 
-/* Header of a coalesced tx WR, before SGL of first packet (in flits) */
-#define TXPKTS_WR_HDR (\
-    sizeof(struct fw_eth_tx_pkts_wr) / 8 + \
-    TXPKTS_PKT_HDR)
+	M_ASSERTPKTHDR(m);
 
-/* Header of a tx WR, before SGL of first packet (in flits) */
-#define TXPKT_WR_HDR ((\
-    sizeof(struct fw_eth_tx_pkt_wr) + \
-    sizeof(struct cpl_tx_pkt_core) \
-    ) / 8 )
+	if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+		KASSERT(m->m_pkthdr.tso_segsz > 0,
+		    ("%s: TSO requested in mbuf %p but MSS not provided",
+		    __func__, m));
+		return (1);
+	}
 
-/* Header of a tx LSO WR, before SGL of first packet (in flits) */
-#define TXPKT_LSO_WR_HDR ((\
-    sizeof(struct fw_eth_tx_pkt_wr) + \
-    sizeof(struct cpl_tx_pkt_lso_core) + \
-    sizeof(struct cpl_tx_pkt_core) \
-    ) / 8 )
+	return (0);
+}
 
-int
-t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m)
+static inline int
+needs_l3_csum(struct mbuf *m)
 {
-	struct port_info *pi = (void *)ifp->if_softc;
-	struct adapter *sc = pi->adapter;
-	struct sge_eq *eq = &txq->eq;
-	struct buf_ring *br = txq->br;
-	struct mbuf *next;
-	int rc, coalescing, can_reclaim;
-	struct txpkts txpkts;
-	struct sgl sgl;
 
-	TXQ_LOCK_ASSERT_OWNED(txq);
-	KASSERT(m, ("%s: called with nothing to do.", __func__));
-	KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH,
-	    ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK));
+	M_ASSERTPKTHDR(m);
 
-	prefetch(&eq->desc[eq->pidx]);
-	prefetch(&txq->sdesc[eq->pidx]);
+	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))
+		return (1);
+	return (0);
+}
 
-	txpkts.npkt = 0;/* indicates there's nothing in txpkts */
-	coalescing = 0;
+static inline int
+needs_l4_csum(struct mbuf *m)
+{
 
-	can_reclaim = reclaimable(eq);
-	if (__predict_false(eq->flags & EQ_STALLED)) {
-		if (can_reclaim < tx_resume_threshold(eq)) {
-			txq->m = m;
-			return (0);
-		}
-		eq->flags &= ~EQ_STALLED;
-		eq->unstalled++;
-	}
+	M_ASSERTPKTHDR(m);
 
-	if (__predict_false(eq->flags & EQ_DOOMED)) {
-		m_freem(m);
-		while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
-			m_freem(m);
-		return (ENETDOWN);
-	}
+	if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
+	    CSUM_TCP_IPV6 | CSUM_TSO))
+		return (1);
+	return (0);
+}
 
-	if (eq->avail < 8 && can_reclaim)
-		reclaim_tx_descs(txq, can_reclaim, 32);
+static inline int
+needs_vlan_insertion(struct mbuf *m)
+{
 
-	for (; m; m = next ? next : drbr_dequeue(ifp, br)) {
+	M_ASSERTPKTHDR(m);
 
-		if (eq->avail < 8)
-			break;
+	if (m->m_flags & M_VLANTAG) {
+		KASSERT(m->m_pkthdr.ether_vtag != 0,
+		    ("%s: HWVLAN requested in mbuf %p but tag not provided",
+		    __func__, m));
+		return (1);
+	}
+	return (0);
+}
 
-		next = m->m_nextpkt;
-		m->m_nextpkt = NULL;
+static void *
+m_advance(struct mbuf **pm, int *poffset, int len)
+{
+	struct mbuf *m = *pm;
+	int offset = *poffset;
+	uintptr_t p = 0;
 
-		if (next || buf_ring_peek(br))
-			coalescing = 1;
+	MPASS(len > 0);
 
-		rc = get_pkt_sgl(txq, &m, &sgl, coalescing);
-		if (rc != 0) {
-			if (rc == ENOMEM) {
+	for (;;) {
+		if (offset + len < m->m_len) {
+			offset += len;
+			p = mtod(m, uintptr_t) + offset;
+			break;
+		}
+		len -= m->m_len - offset;
+		m = m->m_next;
+		offset = 0;
+		MPASS(m != NULL);
+	}
+	*poffset = offset;
+	*pm = m;
+	return ((void *)p);
+}
 
-				/* Short of resources, suspend tx */
+/*
+ * Can deal with empty mbufs in the chain that have m_len = 0, but the chain
+ * must have at least one mbuf that's not empty.
+ */
+static inline int
+count_mbuf_nsegs(struct mbuf *m)
+{
+	vm_paddr_t lastb, next;
+	vm_offset_t va;
+	int len, nsegs;
 
-				m->m_nextpkt = next;
-				break;
-			}
+	MPASS(m != NULL);
 
-			/*
-			 * Unrecoverable error for this packet, throw it away
-			 * and move on to the next.  get_pkt_sgl may already
-			 * have freed m (it will be NULL in that case and the
-			 * m_freem here is still safe).
-			 */
+	nsegs = 0;
+	lastb = 0;
+	for (; m; m = m->m_next) {
 
-			m_freem(m);
+		len = m->m_len;
+		if (__predict_false(len == 0))
 			continue;
-		}
+		va = mtod(m, vm_offset_t);
+		next = pmap_kextract(va);
+		nsegs += sglist_count(m->m_data, len);
+		if (lastb + 1 == next)
+			nsegs--;
+		lastb = pmap_kextract(va + len - 1);
+	}
 
-		if (coalescing &&
-		    add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) {
+	MPASS(nsegs > 0);
+	return (nsegs);
+}
 
-			/* Successfully absorbed into txpkts */
+/*
+ * Analyze the mbuf to determine its tx needs.  The mbuf passed in may change:
+ * a) caller can assume it's been freed if this function returns with an error.
+ * b) it may get defragged up if the gather list is too long for the hardware.
+ */
+int
+parse_pkt(struct adapter *sc, struct mbuf **mp)
+{
+	struct mbuf *m0 = *mp, *m;
+	int rc, nsegs, defragged = 0, offset;
+	struct ether_header *eh;
+	void *l3hdr;
+#if defined(INET) || defined(INET6)
+	struct tcphdr *tcp;
+#endif
+	uint16_t eh_type;
 
-			write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl);
-			goto doorbell;
+	M_ASSERTPKTHDR(m0);
+	if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) {
+		rc = EINVAL;
+fail:
+		m_freem(m0);
+		*mp = NULL;
+		return (rc);
+	}
+restart:
+	/*
+	 * First count the number of gather list segments in the payload.
+	 * Defrag the mbuf if nsegs exceeds the hardware limit.
+	 */
+	M_ASSERTPKTHDR(m0);
+	MPASS(m0->m_pkthdr.len > 0);
+	nsegs = count_mbuf_nsegs(m0);
+	if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) {
+		if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) {
+			rc = EFBIG;
+			goto fail;
 		}
+		*mp = m0 = m;	/* update caller's copy after defrag */
+		goto restart;
+	}
 
-		/*
-		 * We weren't coalescing to begin with, or current frame could
-		 * not be coalesced (add_to_txpkts flushes txpkts if a frame
-		 * given to it can't be coalesced).  Either way there should be
-		 * nothing in txpkts.
-		 */
-		KASSERT(txpkts.npkt == 0,
-		    ("%s: txpkts not empty: %d", __func__, txpkts.npkt));
+	if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) {
+		m0 = m_pullup(m0, m0->m_pkthdr.len);
+		if (m0 == NULL) {
+			/* Should have left well enough alone. */
+			rc = EFBIG;
+			goto fail;
+		}
+		*mp = m0;	/* update caller's copy after pullup */
+		goto restart;
+	}
+	set_mbuf_nsegs(m0, nsegs);
+	if (sc->flags & IS_VF)
+		set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0)));
+	else
+		set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0)));
 
-		/* We're sending out individual packets now */
-		coalescing = 0;
+	if (!needs_tso(m0) &&
+	    !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0))))
+		return (0);
 
-		if (eq->avail < 8)
-			reclaim_tx_descs(txq, 0, 8);
-		rc = write_txpkt_wr(pi, txq, m, &sgl);
-		if (rc != 0) {
+	m = m0;
+	eh = mtod(m, struct ether_header *);
+	eh_type = ntohs(eh->ether_type);
+	if (eh_type == ETHERTYPE_VLAN) {
+		struct ether_vlan_header *evh = (void *)eh;
 
-			/* Short of hardware descriptors, suspend tx */
+		eh_type = ntohs(evh->evl_proto);
+		m0->m_pkthdr.l2hlen = sizeof(*evh);
+	} else
+		m0->m_pkthdr.l2hlen = sizeof(*eh);
 
-			/*
-			 * This is an unlikely but expensive failure.  We've
-			 * done all the hard work (DMA mappings etc.) and now we
-			 * can't send out the packet.  What's worse, we have to
-			 * spend even more time freeing up everything in sgl.
-			 */
-			txq->no_desc++;
-			free_pkt_sgl(txq, &sgl);
+	offset = 0;
+	l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen);
 
-			m->m_nextpkt = next;
-			break;
-		}
+	switch (eh_type) {
+#ifdef INET6
+	case ETHERTYPE_IPV6:
+	{
+		struct ip6_hdr *ip6 = l3hdr;
 
-		ETHER_BPF_MTAP(ifp, m);
-		if (sgl.nsegs == 0)
-			m_freem(m);
-doorbell:
-		if (eq->pending >= 64)
-		    ring_eq_db(sc, eq);
+		MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP);
 
-		can_reclaim = reclaimable(eq);
-		if (can_reclaim >= 32)
-			reclaim_tx_descs(txq, can_reclaim, 64);
+		m0->m_pkthdr.l3hlen = sizeof(*ip6);
+		break;
 	}
+#endif
+#ifdef INET
+	case ETHERTYPE_IP:
+	{
+		struct ip *ip = l3hdr;
 
-	if (txpkts.npkt > 0)
-		write_txpkts_wr(txq, &txpkts);
+		m0->m_pkthdr.l3hlen = ip->ip_hl * 4;
+		break;
+	}
+#endif
+	default:
+		panic("%s: ethertype 0x%04x unknown.  if_cxgbe must be compiled"
+		    " with the same INET/INET6 options as the kernel.",
+		    __func__, eh_type);
+	}
 
-	/*
-	 * m not NULL means there was an error but we haven't thrown it away.
-	 * This can happen when we're short of tx descriptors (no_desc) or maybe
-	 * even DMA maps (no_dmamap).  Either way, a credit flush and reclaim
-	 * will get things going again.
-	 */
-	if (m && !(eq->flags & EQ_CRFLUSHED)) {
-		struct tx_sdesc *txsd = &txq->sdesc[eq->pidx];
+#if defined(INET) || defined(INET6)
+	if (needs_tso(m0)) {
+		tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen);
+		m0->m_pkthdr.l4hlen = tcp->th_off * 4;
+	}
+#endif
+	MPASS(m0 == *mp);
+	return (0);
+}
 
-		/*
-		 * If EQ_CRFLUSHED is not set then we know we have at least one
-		 * available descriptor because any WR that reduces eq->avail to
-		 * 0 also sets EQ_CRFLUSHED.
-		 */
-		KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__));
+void *
+start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
+{
+	struct sge_eq *eq = &wrq->eq;
+	struct adapter *sc = wrq->adapter;
+	int ndesc, available;
+	struct wrqe *wr;
+	void *w;
 
-		txsd->desc_used = 1;
-		txsd->credits = 0;
-		write_eqflush_wr(eq);
+	MPASS(len16 > 0);
+	ndesc = howmany(len16, EQ_ESIZE / 16);
+	MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
+
+	EQ_LOCK(eq);
+
+	if (!STAILQ_EMPTY(&wrq->wr_list))
+		drain_wrq_wr_list(sc, wrq);
+
+	if (!STAILQ_EMPTY(&wrq->wr_list)) {
+slowpath:
+		EQ_UNLOCK(eq);
+		wr = alloc_wrqe(len16 * 16, wrq);
+		if (__predict_false(wr == NULL))
+			return (NULL);
+		cookie->pidx = -1;
+		cookie->ndesc = ndesc;
+		return (&wr->wr);
 	}
-	txq->m = m;
 
-	if (eq->pending)
-		ring_eq_db(sc, eq);
+	eq->cidx = read_hw_cidx(eq);
+	if (eq->pidx == eq->cidx)
+		available = eq->sidx - 1;
+	else
+		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
+	if (available < ndesc)
+		goto slowpath;
 
-	reclaim_tx_descs(txq, 0, 128);
+	cookie->pidx = eq->pidx;
+	cookie->ndesc = ndesc;
+	TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link);
 
-	if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0)
-		callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq);
+	w = &eq->desc[eq->pidx];
+	IDXINCR(eq->pidx, ndesc, eq->sidx);
+	if (__predict_false(cookie->pidx + ndesc > eq->sidx)) {
+		w = &wrq->ss[0];
+		wrq->ss_pidx = cookie->pidx;
+		wrq->ss_len = len16 * 16;
+	}
 
-	return (0);
+	EQ_UNLOCK(eq);
+
+	return (w);
 }
 
 void
-t4_update_fl_bufsize(struct ifnet *ifp)
+commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
 {
-	struct port_info *pi = ifp->if_softc;
-	struct sge_rxq *rxq;
-	struct sge_fl *fl;
-	int i, bufsize = mtu_to_bufsize(ifp->if_mtu);
+	struct sge_eq *eq = &wrq->eq;
+	struct adapter *sc = wrq->adapter;
+	int ndesc, pidx;
+	struct wrq_cookie *prev, *next;
 
-	for_each_rxq(pi, i, rxq) {
-		fl = &rxq->fl;
+	if (cookie->pidx == -1) {
+		struct wrqe *wr = __containerof(w, struct wrqe, wr);
 
-		FL_LOCK(fl);
-		set_fl_tag_idx(fl, bufsize);
-		FL_UNLOCK(fl);
+		t4_wrq_tx(sc, wr);
+		return;
 	}
+
+	ndesc = cookie->ndesc;	/* Can be more than SGE_MAX_WR_NDESC here. */
+	pidx = cookie->pidx;
+	MPASS(pidx >= 0 && pidx < eq->sidx);
+	if (__predict_false(w == &wrq->ss[0])) {
+		int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE;
+
+		MPASS(wrq->ss_len > n);	/* WR had better wrap around. */
+		bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n);
+		bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n);
+		wrq->tx_wrs_ss++;
+	} else
+		wrq->tx_wrs_direct++;
+
+	EQ_LOCK(eq);
+	prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link);
+	next = TAILQ_NEXT(cookie, link);
+	if (prev == NULL) {
+		MPASS(pidx == eq->dbidx);
+		if (next == NULL || ndesc >= 16)
+			ring_eq_db(wrq->adapter, eq, ndesc);
+		else {
+			MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
+			next->pidx = pidx;
+			next->ndesc += ndesc;
+		}
+	} else {
+		MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc);
+		prev->ndesc += ndesc;
+	}
+	TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link);
+
+	if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
+		drain_wrq_wr_list(sc, wrq);
+
+#ifdef INVARIANTS
+	if (TAILQ_EMPTY(&wrq->incomplete_wrs)) {
+		/* Doorbell must have caught up to the pidx. */
+		MPASS(wrq->eq.pidx == wrq->eq.dbidx);
+	}
+#endif
+	EQ_UNLOCK(eq);
 }
 
-int
-can_resume_tx(struct sge_eq *eq)
+static u_int
+can_resume_eth_tx(struct mp_ring *r)
 {
-	return (reclaimable(eq) >= tx_resume_threshold(eq));
+	struct sge_eq *eq = r->cookie;
+
+	return (total_available_tx_desc(eq) > eq->sidx / 8);
 }
 
+static inline int
+cannot_use_txpkts(struct mbuf *m)
+{
+	/* maybe put a GL limit too, to avoid silliness? */
+
+	return (needs_tso(m));
+}
+
+static inline int
+discard_tx(struct sge_eq *eq)
+{
+
+	return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED);
+}
+
+/*
+ * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
+ * be consumed.  Return the actual number consumed.  0 indicates a stall.
+ */
+static u_int
+eth_tx(struct mp_ring *r, u_int cidx, u_int pidx)
+{
+	struct sge_txq *txq = r->cookie;
+	struct sge_eq *eq = &txq->eq;
+	struct ifnet *ifp = txq->ifp;
+	struct vi_info *vi = ifp->if_softc;
+	struct port_info *pi = vi->pi;
+	struct adapter *sc = pi->adapter;
+	u_int total, remaining;		/* # of packets */
+	u_int available, dbdiff;	/* # of hardware descriptors */
+	u_int n, next_cidx;
+	struct mbuf *m0, *tail;
+	struct txpkts txp;
+	struct fw_eth_tx_pkts_wr *wr;	/* any fw WR struct will do */
+
+	remaining = IDXDIFF(pidx, cidx, r->size);
+	MPASS(remaining > 0);	/* Must not be called without work to do. */
+	total = 0;
+
+	TXQ_LOCK(txq);
+	if (__predict_false(discard_tx(eq))) {
+		while (cidx != pidx) {
+			m0 = r->items[cidx];
+			m_freem(m0);
+			if (++cidx == r->size)
+				cidx = 0;
+		}
+		reclaim_tx_descs(txq, 2048);
+		total = remaining;
+		goto done;
+	}
+
+	/* How many hardware descriptors do we have readily available. */
+	if (eq->pidx == eq->cidx)
+		available = eq->sidx - 1;
+	else
+		available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1;
+	dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx);
+
+	while (remaining > 0) {
+
+		m0 = r->items[cidx];
+		M_ASSERTPKTHDR(m0);
+		MPASS(m0->m_nextpkt == NULL);
+
+		if (available < SGE_MAX_WR_NDESC) {
+			available += reclaim_tx_descs(txq, 64);
+			if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16))
+				break;	/* out of descriptors */
+		}
+
+		next_cidx = cidx + 1;
+		if (__predict_false(next_cidx == r->size))
+			next_cidx = 0;
+
+		wr = (void *)&eq->desc[eq->pidx];
+		if (sc->flags & IS_VF) {
+			total++;
+			remaining--;
+			ETHER_BPF_MTAP(ifp, m0);
+			n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0,
+			    available);
+		} else if (remaining > 1 &&
+		    try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) {
+
+			/* pkts at cidx, next_cidx should both be in txp. */
+			MPASS(txp.npkt == 2);
+			tail = r->items[next_cidx];
+			MPASS(tail->m_nextpkt == NULL);
+			ETHER_BPF_MTAP(ifp, m0);
+			ETHER_BPF_MTAP(ifp, tail);
+			m0->m_nextpkt = tail;
+
+			if (__predict_false(++next_cidx == r->size))
+				next_cidx = 0;
+
+			while (next_cidx != pidx) {
+				if (add_to_txpkts(r->items[next_cidx], &txp,
+				    available) != 0)
+					break;
+				tail->m_nextpkt = r->items[next_cidx];
+				tail = tail->m_nextpkt;
+				ETHER_BPF_MTAP(ifp, tail);
+				if (__predict_false(++next_cidx == r->size))
+					next_cidx = 0;
+			}
+
+			n = write_txpkts_wr(txq, wr, m0, &txp, available);
+			total += txp.npkt;
+			remaining -= txp.npkt;
+		} else {
+			total++;
+			remaining--;
+			ETHER_BPF_MTAP(ifp, m0);
+			n = write_txpkt_wr(txq, (void *)wr, m0, available);
+		}
+		MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC);
+
+		available -= n;
+		dbdiff += n;
+		IDXINCR(eq->pidx, n, eq->sidx);
+
+		if (total_available_tx_desc(eq) < eq->sidx / 4 &&
+		    atomic_cmpset_int(&eq->equiq, 0, 1)) {
+			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ |
+			    F_FW_WR_EQUEQ);
+			eq->equeqidx = eq->pidx;
+		} else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) {
+			wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
+			eq->equeqidx = eq->pidx;
+		}
+
+		if (dbdiff >= 16 && remaining >= 4) {
+			ring_eq_db(sc, eq, dbdiff);
+			available += reclaim_tx_descs(txq, 4 * dbdiff);
+			dbdiff = 0;
+		}
+
+		cidx = next_cidx;
+	}
+	if (dbdiff != 0) {
+		ring_eq_db(sc, eq, dbdiff);
+		reclaim_tx_descs(txq, 32);
+	}
+done:
+	TXQ_UNLOCK(txq);
+
+	return (total);
+}
+
 static inline void
 init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx,
-    int qsize, int esize)
+    int qsize)
 {
+
 	KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS,
 	    ("%s: bad tmr_idx %d", __func__, tmr_idx));
 	KASSERT(pktc_idx < SGE_NCOUNTERS,	/* -ve is ok, means don't use */
@@ -1487,33 +2562,36 @@
 		iq->intr_params |= F_QINTR_CNT_EN;
 		iq->intr_pktc_idx = pktc_idx;
 	}
-	iq->qsize = roundup(qsize, 16);		/* See FW_IQ_CMD/iqsize */
-	iq->esize = max(esize, 16);		/* See FW_IQ_CMD/iqesize */
+	iq->qsize = roundup2(qsize, 16);	/* See FW_IQ_CMD/iqsize */
+	iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE;
 }
 
 static inline void
-init_fl(struct sge_fl *fl, int qsize, int bufsize, char *name)
+init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name)
 {
+
 	fl->qsize = qsize;
+	fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
 	strlcpy(fl->lockname, name, sizeof(fl->lockname));
-	set_fl_tag_idx(fl, bufsize);
+	if (sc->flags & BUF_PACKING_OK &&
+	    ((!is_t4(sc) && buffer_packing) ||	/* T5+: enabled unless 0 */
+	    (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */
+		fl->flags |= FL_BUF_PACKING;
+	find_best_refill_source(sc, fl, maxp);
+	find_safe_refill_source(sc, fl);
 }
 
 static inline void
-init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan,
-    uint16_t iqid, char *name)
+init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
+    uint8_t tx_chan, uint16_t iqid, char *name)
 {
-	KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan));
 	KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype));
 
 	eq->flags = eqtype & EQ_TYPEMASK;
 	eq->tx_chan = tx_chan;
 	eq->iqid = iqid;
-	eq->qsize = qsize;
+	eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
 	strlcpy(eq->lockname, name, sizeof(eq->lockname));
-
-	TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq);
-	callout_init(&eq->tx_callout, CALLOUT_MPSAFE);
 }
 
 static int
@@ -1575,16 +2653,18 @@
  * the abs_id of the ingress queue to which its interrupts should be forwarded.
  */
 static int
-alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl,
+alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
     int intr_idx, int cong)
 {
 	int rc, i, cntxt_id;
 	size_t len;
 	struct fw_iq_cmd c;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = iq->adapter;
+	struct sge_params *sp = &sc->params.sge;
 	__be32 v = 0;
 
-	len = iq->qsize * iq->esize;
+	len = iq->qsize * IQ_ESIZE;
 	rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba,
 	    (void **)&iq->desc);
 	if (rc != 0)
@@ -1611,12 +2691,12 @@
 
 	c.type_to_iqandstindex = htobe32(v |
 	    V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
-	    V_FW_IQ_CMD_VIID(pi->viid) |
+	    V_FW_IQ_CMD_VIID(vi->viid) |
 	    V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
 	c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
 	    F_FW_IQ_CMD_IQGTSMODE |
 	    V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
-	    V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4));
+	    V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
 	c.iqsize = htobe16(iq->qsize);
 	c.iqaddr = htobe64(iq->ba);
 	if (cong >= 0)
@@ -1625,25 +2705,7 @@
 	if (fl) {
 		mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF);
 
-		for (i = 0; i < FL_BUF_SIZES; i++) {
-
-			/*
-			 * A freelist buffer must be 16 byte aligned as the SGE
-			 * uses the low 4 bits of the bus addr to figure out the
-			 * buffer size.
-			 */
-			rc = bus_dma_tag_create(sc->dmat, 16, 0,
-			    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
-			    FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW,
-			    NULL, NULL, &fl->tag[i]);
-			if (rc != 0) {
-				device_printf(sc->dev,
-				    "failed to create fl DMA tag[%d]: %d\n",
-				    i, rc);
-				return (rc);
-			}
-		}
-		len = fl->qsize * RX_FL_ESIZE;
+		len = fl->qsize * EQ_ESIZE;
 		rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map,
 		    &fl->ba, (void **)&fl->desc);
 		if (rc)
@@ -1650,10 +2712,7 @@
 			return (rc);
 
 		/* Allocate space for one software descriptor per buffer. */
-		fl->cap = (fl->qsize - spg_len / RX_FL_ESIZE) * 8;
-		FL_LOCK(fl);
 		rc = alloc_fl_sdesc(fl);
-		FL_UNLOCK(fl);
 		if (rc != 0) {
 			device_printf(sc->dev,
 			    "failed to setup fl software descriptors: %d\n",
@@ -1660,13 +2719,23 @@
 			    rc);
 			return (rc);
 		}
-		fl->needed = fl->cap;
-		fl->lowat = roundup(sc->sge.fl_starve_threshold, 8);
 
+		if (fl->flags & FL_BUF_PACKING) {
+			fl->lowat = roundup2(sp->fl_starve_threshold2, 8);
+			fl->buf_boundary = sp->pack_boundary;
+		} else {
+			fl->lowat = roundup2(sp->fl_starve_threshold, 8);
+			fl->buf_boundary = 16;
+		}
+		if (fl_pad && fl->buf_boundary < sp->pad_boundary)
+			fl->buf_boundary = sp->pad_boundary;
+
 		c.iqns_to_fl0congen |=
 		    htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
 			F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
-			F_FW_IQ_CMD_FL0PADEN);
+			(fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) |
+			(fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN :
+			    0));
 		if (cong >= 0) {
 			c.iqns_to_fl0congen |=
 				htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
@@ -1674,8 +2743,10 @@
 				    F_FW_IQ_CMD_FL0CONGEN);
 		}
 		c.fl0dcaen_to_fl0cidxfthresh =
-		    htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
-			V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B));
+		    htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ?
+			X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) |
+			V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ?
+			X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
 		c.fl0size = htobe16(fl->qsize);
 		c.fl0addr = htobe64(fl->ba);
 	}
@@ -1687,9 +2758,8 @@
 		return (rc);
 	}
 
-	iq->cdesc = iq->desc;
 	iq->cidx = 0;
-	iq->gen = 1;
+	iq->gen = F_RSPD_GEN;
 	iq->intr_next = iq->intr_params;
 	iq->cntxt_id = be16toh(c.iqid);
 	iq->abs_id = be16toh(c.physiqid);
@@ -1703,6 +2773,9 @@
 	sc->sge.iqmap[cntxt_id] = iq;
 
 	if (fl) {
+		u_int qid;
+
+		iq->flags |= IQ_HAS_FL;
 		fl->cntxt_id = be16toh(c.fl0id);
 		fl->pidx = fl->cidx = 0;
 
@@ -1713,17 +2786,57 @@
 		}
 		sc->sge.eqmap[cntxt_id] = (void *)fl;
 
+		qid = fl->cntxt_id;
+		if (isset(&sc->doorbells, DOORBELL_UDB)) {
+			uint32_t s_qpp = sc->params.sge.eq_s_qpp;
+			uint32_t mask = (1 << s_qpp) - 1;
+			volatile uint8_t *udb;
+
+			udb = sc->udbs_base + UDBS_DB_OFFSET;
+			udb += (qid >> s_qpp) << PAGE_SHIFT;
+			qid &= mask;
+			if (qid < PAGE_SIZE / UDBS_SEG_SIZE) {
+				udb += qid << UDBS_SEG_SHIFT;
+				qid = 0;
+			}
+			fl->udb = (volatile void *)udb;
+		}
+		fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db;
+
 		FL_LOCK(fl);
 		/* Enough to make sure the SGE doesn't think it's starved */
 		refill_fl(sc, fl, fl->lowat);
 		FL_UNLOCK(fl);
+	}
 
-		iq->flags |= IQ_HAS_FL;
+	if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) {
+		uint32_t param, val;
+
+		param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+		    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+		    V_FW_PARAMS_PARAM_YZ(iq->cntxt_id);
+		if (cong == 0)
+			val = 1 << 19;
+		else {
+			val = 2 << 19;
+			for (i = 0; i < 4; i++) {
+				if (cong & (1 << i))
+					val |= 1 << (i << 2);
+			}
+		}
+
+		rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
+		if (rc != 0) {
+			/* report error but carry on */
+			device_printf(sc->dev,
+			    "failed to set congestion manager context for "
+			    "ingress queue %d: %d\n", iq->cntxt_id, rc);
+		}
 	}
 
 	/* Enable IQ interrupts */
 	atomic_store_rel_int(&iq->state, IQS_IDLE);
-	t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
+	t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
 	    V_INGRESSQID(iq->cntxt_id));
 
 	return (0);
@@ -1730,9 +2843,9 @@
 }
 
 static int
-free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl)
+free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
 {
-	int i, rc;
+	int rc;
 	struct adapter *sc = iq->adapter;
 	device_t dev;
 
@@ -1739,7 +2852,7 @@
 	if (sc == NULL)
 		return (0);	/* nothing to do */
 
-	dev = pi ? pi->dev : sc->dev;
+	dev = vi ? vi->dev : sc->dev;
 
 	if (iq->flags & IQ_ALLOCATED) {
 		rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0,
@@ -1761,20 +2874,12 @@
 		free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba,
 		    fl->desc);
 
-		if (fl->sdesc) {
-			FL_LOCK(fl);
-			free_fl_sdesc(fl);
-			FL_UNLOCK(fl);
-		}
+		if (fl->sdesc)
+			free_fl_sdesc(sc, fl);
 
 		if (mtx_initialized(&fl->fl_lock))
 			mtx_destroy(&fl->fl_lock);
 
-		for (i = 0; i < FL_BUF_SIZES; i++) {
-			if (fl->tag[i])
-				bus_dma_tag_destroy(fl->tag[i]);
-		}
-
 		bzero(fl, sizeof(*fl));
 	}
 
@@ -1781,6 +2886,48 @@
 	return (0);
 }
 
+static void
+add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+    struct sysctl_oid *oid, struct sge_fl *fl)
+{
+	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
+
+	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
+	    "freelist");
+	children = SYSCTL_CHILDREN(oid);
+
+	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &fl->ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    fl->sidx * EQ_ESIZE + sc->params.sge.spg_len,
+	    "desc ring size in bytes");
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
+	    CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I",
+	    "SGE context id of the freelist");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL,
+	    fl_pad ? 1 : 0, "padding enabled");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL,
+	    fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx,
+	    0, "consumer index");
+	if (fl->flags & FL_BUF_PACKING) {
+		SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset",
+		    CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset");
+	}
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx,
+	    0, "producer index");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated",
+	    CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined",
+	    CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated",
+	    CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled",
+	    CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled",
+	    CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)");
+}
+
 static int
 alloc_fwq(struct adapter *sc)
 {
@@ -1789,10 +2936,16 @@
 	struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev);
 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
 
-	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE);
+	init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE);
 	fwq->flags |= IQ_INTR;	/* always */
-	intr_idx = sc->intr_count > 1 ? 1 : 0;
-	rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1);
+	if (sc->flags & IS_VF)
+		intr_idx = 0;
+	else {
+		intr_idx = sc->intr_count > 1 ? 1 : 0;
+		fwq->set_tcb_rpl = t4_filter_rpl;
+		fwq->l2t_write_rpl = do_l2t_write_rpl;
+	}
+	rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1);
 	if (rc != 0) {
 		device_printf(sc->dev,
 		    "failed to create firmware event queue: %d\n", rc);
@@ -1803,6 +2956,10 @@
 	    NULL, "firmware event queue");
 	children = SYSCTL_CHILDREN(oid);
 
+	SYSCTL_ADD_UAUTO(&sc->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &fwq->ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(&sc->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    fwq->qsize * IQ_ESIZE, "descriptor ring size in bytes");
 	SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id",
 	    CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I",
 	    "absolute id of the queue");
@@ -1835,7 +2992,7 @@
 	    NULL, "management queue");
 
 	snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev));
-	init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
+	init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan,
 	    sc->sge.fwq.cntxt_id, name);
 	rc = alloc_wrq(sc, NULL, mgmtq, oid);
 	if (rc != 0) {
@@ -1854,32 +3011,46 @@
 	return free_wrq(sc, &sc->sge.mgmtq);
 }
 
-static inline int
-tnl_cong(struct port_info *pi)
+int
+tnl_cong(struct port_info *pi, int drop)
 {
 
-	if (cong_drop == -1)
+	if (drop == -1)
 		return (-1);
-	else if (cong_drop == 1)
+	else if (drop == 1)
 		return (0);
 	else
-		return (1 << pi->tx_chan);
+		return (pi->rx_chan_map);
 }
 
 static int
-alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx,
+alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
     struct sysctl_oid *oid)
 {
 	int rc;
+	struct adapter *sc = vi->pi->adapter;
 	struct sysctl_oid_list *children;
 	char name[16];
 
-	rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi));
+	rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx,
+	    tnl_cong(vi->pi, cong_drop));
 	if (rc != 0)
 		return (rc);
 
+	if (idx == 0)
+		sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
+	else
+		KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
+		    ("iq_base mismatch"));
+	KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
+	    ("PF with non-zero iq_base"));
+
+	/*
+	 * The freelist is just barely above the starvation threshold right now,
+	 * fill it up a bit more.
+	 */
 	FL_LOCK(&rxq->fl);
-	refill_fl(pi->adapter, &rxq->fl, rxq->fl.needed / 8);
+	refill_fl(sc, &rxq->fl, 128);
 	FL_UNLOCK(&rxq->fl);
 
 #if defined(INET) || defined(INET6)
@@ -1886,59 +3057,52 @@
 	rc = tcp_lro_init(&rxq->lro);
 	if (rc != 0)
 		return (rc);
-	rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */
+	rxq->lro.ifp = vi->ifp; /* also indicates LRO init'ed */
 
-	if (pi->ifp->if_capenable & IFCAP_LRO)
+	if (vi->ifp->if_capenable & IFCAP_LRO)
 		rxq->iq.flags |= IQ_LRO_ENABLED;
 #endif
-	rxq->ifp = pi->ifp;
+	rxq->ifp = vi->ifp;
 
 	children = SYSCTL_CHILDREN(oid);
 
 	snprintf(name, sizeof(name), "%d", idx);
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
 	    NULL, "rx queue");
 	children = SYSCTL_CHILDREN(oid);
 
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
+	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &rxq->iq.ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes");
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I",
 	    "absolute id of the queue");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I",
 	    "SGE context id of the queue");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
 	    CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I",
 	    "consumer index");
 #if defined(INET) || defined(INET6)
-	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD,
 	    &rxq->lro.lro_queued, 0, NULL);
-	SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD,
 	    &rxq->lro.lro_flushed, 0, NULL);
 #endif
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD,
 	    &rxq->rxcsum, "# of times hardware assisted with checksum");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction",
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction",
 	    CTLFLAG_RD, &rxq->vlan_extraction,
 	    "# of times hardware extracted 802.1Q tag");
 
-	children = SYSCTL_CHILDREN(oid);
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
-	    NULL, "freelist");
-	children = SYSCTL_CHILDREN(oid);
+	add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl);
 
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
-	    CTLTYPE_INT | CTLFLAG_RD, &rxq->fl.cntxt_id, 0, sysctl_uint16, "I",
-	    "SGE context id of the queue");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
-	    &rxq->fl.cidx, 0, "consumer index");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
-	    &rxq->fl.pidx, 0, "producer index");
-
 	return (rc);
 }
 
 static int
-free_rxq(struct port_info *pi, struct sge_rxq *rxq)
+free_rxq(struct vi_info *vi, struct sge_rxq *rxq)
 {
 	int rc;
 
@@ -1949,7 +3113,7 @@
 	}
 #endif
 
-	rc = free_iq_fl(pi, &rxq->iq, &rxq->fl);
+	rc = free_iq_fl(vi, &rxq->iq, &rxq->fl);
 	if (rc == 0)
 		bzero(rxq, sizeof(*rxq));
 
@@ -1958,15 +3122,16 @@
 
 #ifdef TCP_OFFLOAD
 static int
-alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq,
+alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq,
     int intr_idx, int idx, struct sysctl_oid *oid)
 {
+	struct port_info *pi = vi->pi;
 	int rc;
 	struct sysctl_oid_list *children;
 	char name[16];
 
-	rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
-	    1 << pi->tx_chan);
+	rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx,
+	    pi->rx_chan_map);
 	if (rc != 0)
 		return (rc);
 
@@ -1973,47 +3138,180 @@
 	children = SYSCTL_CHILDREN(oid);
 
 	snprintf(name, sizeof(name), "%d", idx);
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
 	    NULL, "rx queue");
 	children = SYSCTL_CHILDREN(oid);
 
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id",
+	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &ofld_rxq->iq.ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    ofld_rxq->iq.qsize * IQ_ESIZE, "descriptor ring size in bytes");
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "abs_id",
 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16,
 	    "I", "absolute id of the queue");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cntxt_id",
 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16,
 	    "I", "SGE context id of the queue");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
 	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I",
 	    "consumer index");
 
+	add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl);
+
+	return (rc);
+}
+
+static int
+free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq)
+{
+	int rc;
+
+	rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl);
+	if (rc == 0)
+		bzero(ofld_rxq, sizeof(*ofld_rxq));
+
+	return (rc);
+}
+#endif
+
+#ifdef DEV_NETMAP
+static int
+alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx,
+    int idx, struct sysctl_oid *oid)
+{
+	int rc;
+	struct sysctl_oid_list *children;
+	struct sysctl_ctx_list *ctx;
+	char name[16];
+	size_t len;
+	struct adapter *sc = vi->pi->adapter;
+	struct netmap_adapter *na = NA(vi->ifp);
+
+	MPASS(na != NULL);
+
+	len = vi->qsize_rxq * IQ_ESIZE;
+	rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map,
+	    &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc);
+	if (rc != 0)
+		return (rc);
+
+	len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len;
+	rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map,
+	    &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc);
+	if (rc != 0)
+		return (rc);
+
+	nm_rxq->vi = vi;
+	nm_rxq->nid = idx;
+	nm_rxq->iq_cidx = 0;
+	nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE;
+	nm_rxq->iq_gen = F_RSPD_GEN;
+	nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0;
+	nm_rxq->fl_sidx = na->num_rx_desc;
+	nm_rxq->intr_idx = intr_idx;
+
+	ctx = &vi->ctx;
 	children = SYSCTL_CHILDREN(oid);
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "fl", CTLFLAG_RD,
-	    NULL, "freelist");
+
+	snprintf(name, sizeof(name), "%d", idx);
+	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL,
+	    "rx queue");
 	children = SYSCTL_CHILDREN(oid);
 
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id",
-	    CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->fl.cntxt_id, 0, sysctl_uint16,
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16,
+	    "I", "absolute id of the queue");
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16,
 	    "I", "SGE context id of the queue");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
-	    &ofld_rxq->fl.cidx, 0, "consumer index");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
-	    &ofld_rxq->fl.pidx, 0, "producer index");
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I",
+	    "consumer index");
 
+	children = SYSCTL_CHILDREN(oid);
+	oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL,
+	    "freelist");
+	children = SYSCTL_CHILDREN(oid);
+
+	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16,
+	    "I", "SGE context id of the freelist");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD,
+	    &nm_rxq->fl_cidx, 0, "consumer index");
+	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD,
+	    &nm_rxq->fl_pidx, 0, "producer index");
+
 	return (rc);
 }
 
+
 static int
-free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq)
+free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
 {
+	struct adapter *sc = vi->pi->adapter;
+
+	free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba,
+	    nm_rxq->iq_desc);
+	free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba,
+	    nm_rxq->fl_desc);
+
+	return (0);
+}
+
+static int
+alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx,
+    struct sysctl_oid *oid)
+{
 	int rc;
+	size_t len;
+	struct port_info *pi = vi->pi;
+	struct adapter *sc = pi->adapter;
+	struct netmap_adapter *na = NA(vi->ifp);
+	char name[16];
+	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
 
-	rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl);
-	if (rc == 0)
-		bzero(ofld_rxq, sizeof(*ofld_rxq));
+	len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len;
+	rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map,
+	    &nm_txq->ba, (void **)&nm_txq->desc);
+	if (rc)
+		return (rc);
 
+	nm_txq->pidx = nm_txq->cidx = 0;
+	nm_txq->sidx = na->num_tx_desc;
+	nm_txq->nid = idx;
+	nm_txq->iqidx = iqidx;
+	nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
+	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
+	    V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
+	    V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
+
+	snprintf(name, sizeof(name), "%d", idx);
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	    NULL, "netmap tx queue");
+	children = SYSCTL_CHILDREN(oid);
+
+	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
+	    &nm_txq->cntxt_id, 0, "SGE context id of the queue");
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I",
+	    "consumer index");
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
+	    CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I",
+	    "producer index");
+
 	return (rc);
 }
+
+static int
+free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
+{
+	struct adapter *sc = vi->pi->adapter;
+
+	free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba,
+	    nm_txq->desc);
+
+	return (0);
+}
 #endif
 
 static int
@@ -2021,6 +3319,7 @@
 {
 	int rc, cntxt_id;
 	struct fw_eq_ctrl_cmd c;
+	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
 
 	bzero(&c, sizeof(c));
 
@@ -2029,7 +3328,7 @@
 	    V_FW_EQ_CTRL_CMD_VFN(0));
 	c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
 	    F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
-	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */
+	c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
 	c.physeqid_pkd = htobe32(0);
 	c.fetchszm_to_iqid =
 	    htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
@@ -2039,7 +3338,7 @@
 	    htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
 		V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
 		V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
-		V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize));
+		V_FW_EQ_CTRL_CMD_EQSIZE(qsize));
 	c.eqaddr = htobe64(eq->ba);
 
 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
@@ -2061,10 +3360,11 @@
 }
 
 static int
-eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
 {
 	int rc, cntxt_id;
 	struct fw_eq_eth_cmd c;
+	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
 
 	bzero(&c, sizeof(c));
 
@@ -2073,20 +3373,20 @@
 	    V_FW_EQ_ETH_CMD_VFN(0));
 	c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
 	    F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
-	c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid));
+	c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
+	    F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
 	c.fetchszm_to_iqid =
-	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
+	    htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
 		V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
 		V_FW_EQ_ETH_CMD_IQID(eq->iqid));
 	c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
-		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
-		      V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
-		      V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize));
+	    V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+	    V_FW_EQ_ETH_CMD_EQSIZE(qsize));
 	c.eqaddr = htobe64(eq->ba);
 
 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
 	if (rc != 0) {
-		device_printf(pi->dev,
+		device_printf(vi->dev,
 		    "failed to create Ethernet egress queue: %d\n", rc);
 		return (rc);
 	}
@@ -2093,6 +3393,7 @@
 	eq->flags |= EQ_ALLOCATED;
 
 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
+	eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
 	if (cntxt_id >= sc->sge.neq)
 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
@@ -2104,10 +3405,11 @@
 
 #ifdef TCP_OFFLOAD
 static int
-ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
 {
 	int rc, cntxt_id;
 	struct fw_eq_ofld_cmd c;
+	int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
 
 	bzero(&c, sizeof(c));
 
@@ -2117,19 +3419,18 @@
 	c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
 	    F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
 	c.fetchszm_to_iqid =
-		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
+		htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
 		    V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
 		    F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
 	c.dcaen_to_eqsize =
 	    htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
 		V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
-		V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) |
-		V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize));
+		V_FW_EQ_OFLD_CMD_EQSIZE(qsize));
 	c.eqaddr = htobe64(eq->ba);
 
 	rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
 	if (rc != 0) {
-		device_printf(pi->dev,
+		device_printf(vi->dev,
 		    "failed to create egress queue for TCP offload: %d\n", rc);
 		return (rc);
 	}
@@ -2147,23 +3448,23 @@
 #endif
 
 static int
-alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq)
+alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
 {
-	int rc;
+	int rc, qsize;
 	size_t len;
 
 	mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF);
 
-	len = eq->qsize * EQ_ESIZE;
+	qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+	len = qsize * EQ_ESIZE;
 	rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map,
 	    &eq->ba, (void **)&eq->desc);
 	if (rc)
 		return (rc);
 
-	eq->cap = eq->qsize - spg_len / EQ_ESIZE;
-	eq->spg = (void *)&eq->desc[eq->cap];
-	eq->avail = eq->cap - 1;	/* one less to avoid cidx = pidx */
 	eq->pidx = eq->cidx = 0;
+	eq->equeqidx = eq->dbidx = 0;
+	eq->doorbells = sc->doorbells;
 
 	switch (eq->flags & EQ_TYPEMASK) {
 	case EQ_CTRL:
@@ -2171,12 +3472,12 @@
 		break;
 
 	case EQ_ETH:
-		rc = eth_eq_alloc(sc, pi, eq);
+		rc = eth_eq_alloc(sc, vi, eq);
 		break;
 
 #ifdef TCP_OFFLOAD
 	case EQ_OFLD:
-		rc = ofld_eq_alloc(sc, pi, eq);
+		rc = ofld_eq_alloc(sc, vi, eq);
 		break;
 #endif
 
@@ -2186,12 +3487,29 @@
 	}
 	if (rc != 0) {
 		device_printf(sc->dev,
-		    "failed to allocate egress queue(%d): %d",
+		    "failed to allocate egress queue(%d): %d\n",
 		    eq->flags & EQ_TYPEMASK, rc);
 	}
 
-	eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus;
+	if (isset(&eq->doorbells, DOORBELL_UDB) ||
+	    isset(&eq->doorbells, DOORBELL_UDBWC) ||
+	    isset(&eq->doorbells, DOORBELL_WCWR)) {
+		uint32_t s_qpp = sc->params.sge.eq_s_qpp;
+		uint32_t mask = (1 << s_qpp) - 1;
+		volatile uint8_t *udb;
 
+		udb = sc->udbs_base + UDBS_DB_OFFSET;
+		udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT;	/* pg offset */
+		eq->udb_qid = eq->cntxt_id & mask;		/* id in page */
+		if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE)
+	    		clrbit(&eq->doorbells, DOORBELL_WCWR);
+		else {
+			udb += eq->udb_qid << UDBS_SEG_SHIFT;	/* seg offset */
+			eq->udb_qid = 0;
+		}
+		eq->udb = (volatile void *)udb;
+	}
+
 	return (rc);
 }
 
@@ -2242,20 +3560,29 @@
 }
 
 static int
-alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq,
+alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq,
     struct sysctl_oid *oid)
 {
 	int rc;
-	struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx;
+	struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx;
 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
 
-	rc = alloc_eq(sc, pi, &wrq->eq);
+	rc = alloc_eq(sc, vi, &wrq->eq);
 	if (rc)
 		return (rc);
 
 	wrq->adapter = sc;
+	TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq);
+	TAILQ_INIT(&wrq->incomplete_wrs);
 	STAILQ_INIT(&wrq->wr_list);
+	wrq->nwr_pending = 0;
+	wrq->ndesc_needed = 0;
 
+	SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &wrq->eq.ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len,
+	    "desc ring size in bytes");
 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
 	    &wrq->eq.cntxt_id, 0, "SGE context id of the queue");
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx",
@@ -2264,15 +3591,15 @@
 	SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx",
 	    CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I",
 	    "producer index");
-	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD,
-	    &wrq->tx_wrs, "# of work requests");
-	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
-	    &wrq->no_desc, 0,
-	    "# of times queue ran out of hardware descriptors");
-	SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
-	    &wrq->eq.unstalled, 0, "# of times queue recovered after stall");
+	SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
+	    wrq->eq.sidx, "status page index");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD,
+	    &wrq->tx_wrs_direct, "# of work requests (direct)");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD,
+	    &wrq->tx_wrs_copied, "# of work requests (copied)");
+	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD,
+	    &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)");
 
-
 	return (rc);
 }
 
@@ -2290,97 +3617,136 @@
 }
 
 static int
-alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx,
+alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
     struct sysctl_oid *oid)
 {
 	int rc;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 	struct sge_eq *eq = &txq->eq;
 	char name[16];
 	struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid);
 
-	rc = alloc_eq(sc, pi, eq);
-	if (rc)
-		return (rc);
-
-	txq->ifp = pi->ifp;
-
-	txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE,
-	    M_ZERO | M_WAITOK);
-	txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock);
-
-	rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR,
-	    BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS,
-	    BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag);
+	rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx,
+	    M_CXGBE, M_WAITOK);
 	if (rc != 0) {
-		device_printf(sc->dev,
-		    "failed to create tx DMA tag: %d\n", rc);
+		device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc);
 		return (rc);
 	}
 
-	/*
-	 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE
-	 * limit for any WR).  txq->no_dmamap events shouldn't occur if maps is
-	 * sized for the worst case.
-	 */
-	rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8,
-	    M_WAITOK);
+	rc = alloc_eq(sc, vi, eq);
 	if (rc != 0) {
-		device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc);
+		mp_ring_free(txq->r);
+		txq->r = NULL;
 		return (rc);
 	}
 
+	/* Can't fail after this point. */
+
+	if (idx == 0)
+		sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
+	else
+		KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
+		    ("eq_base mismatch"));
+	KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
+	    ("PF with non-zero eq_base"));
+
+	TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
+	txq->ifp = vi->ifp;
+	txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
+	if (sc->flags & IS_VF)
+		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
+		    V_TXPKT_INTF(pi->tx_chan));
+	else
+		txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
+		    V_TXPKT_INTF(pi->tx_chan) |
+		    V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) |
+		    V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) |
+		    V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid)));
+	txq->tc_idx = -1;
+	txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
 	snprintf(name, sizeof(name), "%d", idx);
-	oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
+	oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD,
 	    NULL, "tx queue");
 	children = SYSCTL_CHILDREN(oid);
 
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
+	SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD,
+	    &eq->ba, "bus address of descriptor ring");
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL,
+	    eq->sidx * EQ_ESIZE + sc->params.sge.spg_len,
+	    "desc ring size in bytes");
+	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
+	    &eq->abs_id, 0, "absolute id of the queue");
+	SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
 	    &eq->cntxt_id, 0, "SGE context id of the queue");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
 	    CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I",
 	    "consumer index");
-	SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx",
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx",
 	    CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I",
 	    "producer index");
+	SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL,
+	    eq->sidx, "status page index");
 
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
+	SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc",
+	    CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I",
+	    "traffic class (-1 means none)");
+
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD,
 	    &txq->txcsum, "# of times hardware assisted with checksum");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion",
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion",
 	    CTLFLAG_RD, &txq->vlan_insertion,
 	    "# of times hardware inserted 802.1Q tag");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD,
 	    &txq->tso_wrs, "# of TSO work requests");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD,
 	    &txq->imm_wrs, "# of work requests with immediate data");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD,
 	    &txq->sgl_wrs, "# of work requests with direct SGL");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD,
 	    &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD,
-	    &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)");
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD,
-	    &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests");
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs",
+	    CTLFLAG_RD, &txq->txpkts0_wrs,
+	    "# of txpkts (type 0) work requests");
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs",
+	    CTLFLAG_RD, &txq->txpkts1_wrs,
+	    "# of txpkts (type 1) work requests");
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts",
+	    CTLFLAG_RD, &txq->txpkts0_pkts,
+	    "# of frames tx'd using type0 txpkts work requests");
+	SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts",
+	    CTLFLAG_RD, &txq->txpkts1_pkts,
+	    "# of frames tx'd using type1 txpkts work requests");
 
-	SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD,
-	    &txq->br->br_drops, "# of drops in the buf_ring for this queue");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD,
-	    &txq->no_dmamap, 0, "# of times txq ran out of DMA maps");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD,
-	    &txq->no_desc, 0, "# of times txq ran out of hardware descriptors");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD,
-	    &eq->egr_update, 0, "egress update notifications from the SGE");
-	SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD,
-	    &eq->unstalled, 0, "# of times txq recovered after stall");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues",
+	    CTLFLAG_RD, &txq->r->enqueues,
+	    "# of enqueues to the mp_ring for this queue");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops",
+	    CTLFLAG_RD, &txq->r->drops,
+	    "# of drops in the mp_ring for this queue");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts",
+	    CTLFLAG_RD, &txq->r->starts,
+	    "# of normal consumer starts in the mp_ring for this queue");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls",
+	    CTLFLAG_RD, &txq->r->stalls,
+	    "# of consumer stalls in the mp_ring for this queue");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts",
+	    CTLFLAG_RD, &txq->r->restarts,
+	    "# of consumer restarts in the mp_ring for this queue");
+	SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications",
+	    CTLFLAG_RD, &txq->r->abdications,
+	    "# of consumer abdications in the mp_ring for this queue");
 
-	return (rc);
+	return (0);
 }
 
 static int
-free_txq(struct port_info *pi, struct sge_txq *txq)
+free_txq(struct vi_info *vi, struct sge_txq *txq)
 {
 	int rc;
-	struct adapter *sc = pi->adapter;
+	struct adapter *sc = vi->pi->adapter;
 	struct sge_eq *eq = &txq->eq;
 
 	rc = free_eq(sc, eq);
@@ -2387,16 +3753,10 @@
 	if (rc)
 		return (rc);
 
+	sglist_free(txq->gl);
 	free(txq->sdesc, M_CXGBE);
+	mp_ring_free(txq->r);
 
-	if (txq->txmaps.maps)
-		t4_free_tx_maps(&txq->txmaps, txq->tx_tag);
-
-	buf_ring_free(txq->br, M_CXGBE);
-
-	if (txq->tx_tag)
-		bus_dma_tag_destroy(txq->tx_tag);
-
 	bzero(txq, sizeof(*txq));
 	return (0);
 }
@@ -2412,145 +3772,146 @@
 	*ba = error ? 0 : segs->ds_addr;
 }
 
-static inline bool
-is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl)
-{
-	*ctrl = (void *)((uintptr_t)iq->cdesc +
-	    (iq->esize - sizeof(struct rsp_ctrl)));
-
-	return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen);
-}
-
 static inline void
-iq_next(struct sge_iq *iq)
-{
-	iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize);
-	if (__predict_false(++iq->cidx == iq->qsize - 1)) {
-		iq->cidx = 0;
-		iq->gen ^= 1;
-		iq->cdesc = iq->desc;
-	}
-}
-
-#define FL_HW_IDX(x) ((x) >> 3)
-static inline void
 ring_fl_db(struct adapter *sc, struct sge_fl *fl)
 {
-	int ndesc = fl->pending / 8;
+	uint32_t n, v;
 
-	if (FL_HW_IDX(fl->pidx) == FL_HW_IDX(fl->cidx))
-		ndesc--;	/* hold back one credit */
+	n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx);
+	MPASS(n > 0);
 
-	if (ndesc <= 0)
-		return;		/* nothing to do */
-
 	wmb();
-
-	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO |
-	    V_QID(fl->cntxt_id) | V_PIDX(ndesc));
-	fl->pending -= ndesc * 8;
+	v = fl->dbval | V_PIDX(n);
+	if (fl->udb)
+		*fl->udb = htole32(v);
+	else
+		t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
+	IDXINCR(fl->dbidx, n, fl->sidx);
 }
 
 /*
- * Fill up the freelist by upto nbufs and maybe ring its doorbell.
+ * Fills up the freelist by allocating upto 'n' buffers.  Buffers that are
+ * recycled do not count towards this allocation budget.
  *
- * Returns non-zero to indicate that it should be added to the list of starving
- * freelists.
+ * Returns non-zero to indicate that this freelist should be added to the list
+ * of starving freelists.
  */
 static int
-refill_fl(struct adapter *sc, struct sge_fl *fl, int nbufs)
+refill_fl(struct adapter *sc, struct sge_fl *fl, int n)
 {
-	__be64 *d = &fl->desc[fl->pidx];
-	struct fl_sdesc *sd = &fl->sdesc[fl->pidx];
-	bus_dma_tag_t tag;
-	bus_addr_t pa;
+	__be64 *d;
+	struct fl_sdesc *sd;
+	uintptr_t pa;
 	caddr_t cl;
-	int rc;
+	struct cluster_layout *cll;
+	struct sw_zone_info *swz;
+	struct cluster_metadata *clm;
+	uint16_t max_pidx;
+	uint16_t hw_cidx = fl->hw_cidx;		/* stable snapshot */
 
 	FL_LOCK_ASSERT_OWNED(fl);
 
-	if (nbufs > fl->needed)
-		nbufs = fl->needed;
+	/*
+	 * We always stop at the begining of the hardware descriptor that's just
+	 * before the one with the hw cidx.  This is to avoid hw pidx = hw cidx,
+	 * which would mean an empty freelist to the chip.
+	 */
+	max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1;
+	if (fl->pidx == max_pidx * 8)
+		return (0);
 
-	while (nbufs--) {
+	d = &fl->desc[fl->pidx];
+	sd = &fl->sdesc[fl->pidx];
+	cll = &fl->cll_def;	/* default layout */
+	swz = &sc->sge.sw_zone_info[cll->zidx];
 
+	while (n > 0) {
+
 		if (sd->cl != NULL) {
 
+			if (sd->nmbuf == 0) {
+				/*
+				 * Fast recycle without involving any atomics on
+				 * the cluster's metadata (if the cluster has
+				 * metadata).  This happens when all frames
+				 * received in the cluster were small enough to
+				 * fit within a single mbuf each.
+				 */
+				fl->cl_fast_recycled++;
+#ifdef INVARIANTS
+				clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
+				if (clm != NULL)
+					MPASS(clm->refcount == 1);
+#endif
+				goto recycled_fast;
+			}
+
 			/*
-			 * This happens when a frame small enough to fit
-			 * entirely in an mbuf was received in cl last time.
-			 * We'd held on to cl and can reuse it now.  Note that
-			 * we reuse a cluster of the old size if fl->tag_idx is
-			 * no longer the same as sd->tag_idx.
+			 * Cluster is guaranteed to have metadata.  Clusters
+			 * without metadata always take the fast recycle path
+			 * when they're recycled.
 			 */
+			clm = cl_metadata(sc, fl, &sd->cll, sd->cl);
+			MPASS(clm != NULL);
 
-			KASSERT(*d == sd->ba_tag,
-			    ("%s: recyling problem at pidx %d",
-			    __func__, fl->pidx));
-
-			d++;
-			goto recycled;
-		}
-
-
-		if (fl->tag_idx != sd->tag_idx) {
-			bus_dmamap_t map;
-			bus_dma_tag_t newtag = fl->tag[fl->tag_idx];
-			bus_dma_tag_t oldtag = fl->tag[sd->tag_idx];
-
-			/*
-			 * An MTU change can get us here.  Discard the old map
-			 * which was created with the old tag, but only if
-			 * we're able to get a new one.
-			 */
-			rc = bus_dmamap_create(newtag, 0, &map);
-			if (rc == 0) {
-				bus_dmamap_destroy(oldtag, sd->map);
-				sd->map = map;
-				sd->tag_idx = fl->tag_idx;
+			if (atomic_fetchadd_int(&clm->refcount, -1) == 1) {
+				fl->cl_recycled++;
+				counter_u64_add(extfree_rels, 1);
+				goto recycled;
 			}
+			sd->cl = NULL;	/* gave up my reference */
 		}
+		MPASS(sd->cl == NULL);
+alloc:
+		cl = uma_zalloc(swz->zone, M_NOWAIT);
+		if (__predict_false(cl == NULL)) {
+			if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 ||
+			    fl->cll_def.zidx == fl->cll_alt.zidx)
+				break;
 
-		tag = fl->tag[sd->tag_idx];
-
-		cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx));
-		if (cl == NULL)
-			break;
-
-		rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx),
-		    oneseg_dma_callback, &pa, 0);
-		if (rc != 0 || pa == 0) {
-			fl->dmamap_failed++;
-			uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl);
-			break;
+			/* fall back to the safe zone */
+			cll = &fl->cll_alt;
+			swz = &sc->sge.sw_zone_info[cll->zidx];
+			goto alloc;
 		}
+		fl->cl_allocated++;
+		n--;
 
+		pa = pmap_kextract((vm_offset_t)cl);
+		pa += cll->region1;
 		sd->cl = cl;
-		*d++ = htobe64(pa | sd->tag_idx);
-
+		sd->cll = *cll;
+		*d = htobe64(pa | cll->hwidx);
+		clm = cl_metadata(sc, fl, cll, cl);
+		if (clm != NULL) {
+recycled:
 #ifdef INVARIANTS
-		sd->ba_tag = htobe64(pa | sd->tag_idx);
+			clm->sd = sd;
 #endif
+			clm->refcount = 1;
+		}
+		sd->nmbuf = 0;
+recycled_fast:
+		d++;
+		sd++;
+		if (__predict_false(++fl->pidx % 8 == 0)) {
+			uint16_t pidx = fl->pidx / 8;
 
-recycled:
-		/* sd->m is never recycled, should always be NULL */
-		KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__));
+			if (__predict_false(pidx == fl->sidx)) {
+				fl->pidx = 0;
+				pidx = 0;
+				sd = fl->sdesc;
+				d = fl->desc;
+			}
+			if (pidx == max_pidx)
+				break;
 
-		sd->m = m_gethdr(M_NOWAIT, MT_NOINIT);
-		if (sd->m == NULL)
-			break;
-
-		fl->pending++;
-		fl->needed--;
-		sd++;
-		if (++fl->pidx == fl->cap) {
-			fl->pidx = 0;
-			sd = fl->sdesc;
-			d = fl->desc;
+			if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4)
+				ring_fl_db(sc, fl);
 		}
 	}
 
-	if (fl->pending >= 8)
+	if (fl->pidx / 8 != fl->dbidx)
 		ring_fl_db(sc, fl);
 
 	return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING));
@@ -2565,7 +3926,7 @@
 	struct adapter *sc = arg;
 	struct sge_fl *fl, *fl_temp;
 
-	mtx_lock(&sc->sfl_lock);
+	mtx_assert(&sc->sfl_lock, MA_OWNED);
 	TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) {
 		FL_LOCK(fl);
 		refill_fl(sc, fl, 64);
@@ -2578,74 +3939,40 @@
 
 	if (!TAILQ_EMPTY(&sc->sfl))
 		callout_schedule(&sc->sfl_callout, hz / 5);
-	mtx_unlock(&sc->sfl_lock);
 }
 
 static int
 alloc_fl_sdesc(struct sge_fl *fl)
 {
-	struct fl_sdesc *sd;
-	bus_dma_tag_t tag;
-	int i, rc;
 
-	FL_LOCK_ASSERT_OWNED(fl);
-
-	fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE,
+	fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE,
 	    M_ZERO | M_WAITOK);
 
-	tag = fl->tag[fl->tag_idx];
-	sd = fl->sdesc;
-	for (i = 0; i < fl->cap; i++, sd++) {
-
-		sd->tag_idx = fl->tag_idx;
-		rc = bus_dmamap_create(tag, 0, &sd->map);
-		if (rc != 0)
-			goto failed;
-	}
-
 	return (0);
-failed:
-	while (--i >= 0) {
-		sd--;
-		bus_dmamap_destroy(tag, sd->map);
-		if (sd->m) {
-			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
-			m_free(sd->m);
-			sd->m = NULL;
-		}
-	}
-	KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__));
-
-	free(fl->sdesc, M_CXGBE);
-	fl->sdesc = NULL;
-
-	return (rc);
 }
 
 static void
-free_fl_sdesc(struct sge_fl *fl)
+free_fl_sdesc(struct adapter *sc, struct sge_fl *fl)
 {
 	struct fl_sdesc *sd;
+	struct cluster_metadata *clm;
+	struct cluster_layout *cll;
 	int i;
 
-	FL_LOCK_ASSERT_OWNED(fl);
-
 	sd = fl->sdesc;
-	for (i = 0; i < fl->cap; i++, sd++) {
+	for (i = 0; i < fl->sidx * 8; i++, sd++) {
+		if (sd->cl == NULL)
+			continue;
 
-		if (sd->m) {
-			m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0);
-			m_free(sd->m);
-			sd->m = NULL;
+		cll = &sd->cll;
+		clm = cl_metadata(sc, fl, cll, sd->cl);
+		if (sd->nmbuf == 0)
+			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
+		else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) {
+			uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl);
+			counter_u64_add(extfree_rels, 1);
 		}
-
-		if (sd->cl) {
-			bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map);
-			uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl);
-			sd->cl = NULL;
-		}
-
-		bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map);
+		sd->cl = NULL;
 	}
 
 	free(fl->sdesc, M_CXGBE);
@@ -2652,293 +3979,359 @@
 	fl->sdesc = NULL;
 }
 
-int
-t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count,
-    int flags)
+static inline void
+get_pkt_gl(struct mbuf *m, struct sglist *gl)
 {
-	struct tx_map *txm;
-	int i, rc;
+	int rc;
 
-	txmaps->map_total = txmaps->map_avail = count;
-	txmaps->map_cidx = txmaps->map_pidx = 0;
+	M_ASSERTPKTHDR(m);
 
-	txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE,
-	    M_ZERO | flags);
-
-	txm = txmaps->maps;
-	for (i = 0; i < count; i++, txm++) {
-		rc = bus_dmamap_create(tx_tag, 0, &txm->map);
-		if (rc != 0)
-			goto failed;
+	sglist_reset(gl);
+	rc = sglist_append_mbuf(gl, m);
+	if (__predict_false(rc != 0)) {
+		panic("%s: mbuf %p (%d segs) was vetted earlier but now fails "
+		    "with %d.", __func__, m, mbuf_nsegs(m), rc);
 	}
 
-	return (0);
-failed:
-	while (--i >= 0) {
-		txm--;
-		bus_dmamap_destroy(tx_tag, txm->map);
-	}
-	KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__));
+	KASSERT(gl->sg_nseg == mbuf_nsegs(m),
+	    ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
+	    mbuf_nsegs(m), gl->sg_nseg));
+	KASSERT(gl->sg_nseg > 0 &&
+	    gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS),
+	    ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__,
+		gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS));
+}
 
-	free(txmaps->maps, M_CXGBE);
-	txmaps->maps = NULL;
+/*
+ * len16 for a txpkt WR with a GL.  Includes the firmware work request header.
+ */
+static inline u_int
+txpkt_len16(u_int nsegs, u_int tso)
+{
+	u_int n;
 
-	return (rc);
+	MPASS(nsegs > 0);
+
+	nsegs--; /* first segment is part of ulptx_sgl */
+	n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) +
+	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+	if (tso)
+		n += sizeof(struct cpl_tx_pkt_lso_core);
+
+	return (howmany(n, 16));
 }
 
-void
-t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag)
+/*
+ * len16 for a txpkt_vm WR with a GL.  Includes the firmware work
+ * request header.
+ */
+static inline u_int
+txpkt_vm_len16(u_int nsegs, u_int tso)
 {
-	struct tx_map *txm;
-	int i;
+	u_int n;
 
-	txm = txmaps->maps;
-	for (i = 0; i < txmaps->map_total; i++, txm++) {
+	MPASS(nsegs > 0);
 
-		if (txm->m) {
-			bus_dmamap_unload(tx_tag, txm->map);
-			m_freem(txm->m);
-			txm->m = NULL;
-		}
+	nsegs--; /* first segment is part of ulptx_sgl */
+	n = sizeof(struct fw_eth_tx_pkt_vm_wr) +
+	    sizeof(struct cpl_tx_pkt_core) +
+	    sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+	if (tso)
+		n += sizeof(struct cpl_tx_pkt_lso_core);
 
-		bus_dmamap_destroy(tx_tag, txm->map);
-	}
+	return (howmany(n, 16));
+}
 
-	free(txmaps->maps, M_CXGBE);
-	txmaps->maps = NULL;
+/*
+ * len16 for a txpkts type 0 WR with a GL.  Does not include the firmware work
+ * request header.
+ */
+static inline u_int
+txpkts0_len16(u_int nsegs)
+{
+	u_int n;
+
+	MPASS(nsegs > 0);
+
+	nsegs--; /* first segment is part of ulptx_sgl */
+	n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) +
+	    sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) +
+	    8 * ((3 * nsegs) / 2 + (nsegs & 1));
+
+	return (howmany(n, 16));
 }
 
 /*
- * We'll do immediate data tx for non-TSO, but only when not coalescing.  We're
- * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes
- * of immediate data.
+ * len16 for a txpkts type 1 WR with a GL.  Does not include the firmware work
+ * request header.
  */
-#define IMM_LEN ( \
-      2 * EQ_ESIZE \
-    - sizeof(struct fw_eth_tx_pkt_wr) \
-    - sizeof(struct cpl_tx_pkt_core))
+static inline u_int
+txpkts1_len16(void)
+{
+	u_int n;
 
+	n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl);
+
+	return (howmany(n, 16));
+}
+
+static inline u_int
+imm_payload(u_int ndesc)
+{
+	u_int n;
+
+	n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) -
+	    sizeof(struct cpl_tx_pkt_core);
+
+	return (n);
+}
+
 /*
- * Returns non-zero on failure, no need to cleanup anything in that case.
+ * Write a VM txpkt WR for this packet to the hardware descriptors, update the
+ * software descriptor, and advance the pidx.  It is guaranteed that enough
+ * descriptors are available.
  *
- * Note 1: We always try to defrag the mbuf if required and return EFBIG only
- * if the resulting chain still won't fit in a tx descriptor.
- *
- * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf
- * does not have the TCP header in it.
+ * The return value is the # of hardware descriptors used.
  */
-static int
-get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl,
-    int sgl_only)
+static u_int
+write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq,
+    struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available)
 {
-	struct mbuf *m = *fp;
-	struct tx_maps *txmaps;
-	struct tx_map *txm;
-	int rc, defragged = 0, n;
+	struct sge_eq *eq = &txq->eq;
+	struct tx_sdesc *txsd;
+	struct cpl_tx_pkt_core *cpl;
+	uint32_t ctrl;	/* used in many unrelated places */
+	uint64_t ctrl1;
+	int csum_type, len16, ndesc, pktlen, nsegs;
+	caddr_t dst;
 
 	TXQ_LOCK_ASSERT_OWNED(txq);
+	M_ASSERTPKTHDR(m0);
+	MPASS(available > 0 && available < eq->sidx);
 
-	if (m->m_pkthdr.tso_segsz)
-		sgl_only = 1;	/* Do not allow immediate data with LSO */
+	len16 = mbuf_len16(m0);
+	nsegs = mbuf_nsegs(m0);
+	pktlen = m0->m_pkthdr.len;
+	ctrl = sizeof(struct cpl_tx_pkt_core);
+	if (needs_tso(m0))
+		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
+	ndesc = howmany(len16, EQ_ESIZE / 16);
+	MPASS(ndesc <= available);
 
-start:	sgl->nsegs = 0;
+	/* Firmware work request header */
+	MPASS(wr == (void *)&eq->desc[eq->pidx]);
+	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
+	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
 
-	if (m->m_pkthdr.len <= IMM_LEN && !sgl_only)
-		return (0);	/* nsegs = 0 tells caller to use imm. tx */
+	ctrl = V_FW_WR_LEN16(len16);
+	wr->equiq_to_len16 = htobe32(ctrl);
+	wr->r3[0] = 0;
+	wr->r3[1] = 0;
+	
+	/*
+	 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci.
+	 * vlantci is ignored unless the ethtype is 0x8100, so it's
+	 * simpler to always copy it rather than making it
+	 * conditional.  Also, it seems that we do not have to set
+	 * vlantci or fake the ethtype when doing VLAN tag insertion.
+	 */
+	m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst);
 
-	txmaps = &txq->txmaps;
-	if (txmaps->map_avail == 0) {
-		txq->no_dmamap++;
-		return (ENOMEM);
-	}
-	txm = &txmaps->maps[txmaps->map_pidx];
+	csum_type = -1;
+	if (needs_tso(m0)) {
+		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
 
-	if (m->m_pkthdr.tso_segsz && m->m_len < 50) {
-		*fp = m_pullup(m, 50);
-		m = *fp;
-		if (m == NULL)
-			return (ENOBUFS);
-	}
+		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+		    m0->m_pkthdr.l4hlen > 0,
+		    ("%s: mbuf %p needs TSO but missing header lengths",
+			__func__, m0));
 
-	rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
-	    &sgl->nsegs, BUS_DMA_NOWAIT);
-	if (rc == EFBIG && defragged == 0) {
-		m = m_defrag(m, M_NOWAIT);
-		if (m == NULL)
-			return (EFBIG);
+		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
+		    F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
+		    | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+		if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
+			ctrl |= V_LSO_ETHHDR_LEN(1);
+		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
+			ctrl |= F_LSO_IPV6;
 
-		defragged = 1;
-		*fp = m;
-		goto start;
-	}
-	if (rc != 0)
-		return (rc);
+		lso->lso_ctrl = htobe32(ctrl);
+		lso->ipid_ofst = htobe16(0);
+		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
+		lso->seqno_offset = htobe32(0);
+		lso->len = htobe32(pktlen);
 
-	txm->m = m;
-	txmaps->map_avail--;
-	if (++txmaps->map_pidx == txmaps->map_total)
-		txmaps->map_pidx = 0;
+		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
+			csum_type = TX_CSUM_TCPIP6;
+		else
+			csum_type = TX_CSUM_TCPIP;
 
-	KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS,
-	    ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs));
+		cpl = (void *)(lso + 1);
 
-	/*
-	 * Store the # of flits required to hold this frame's SGL in nflits.  An
-	 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by
-	 * multiple (len0 + len1, addr0, addr1) tuples.  If addr1 is not used
-	 * then len1 must be set to 0.
-	 */
-	n = sgl->nsegs - 1;
-	sgl->nflits = (3 * n) / 2 + (n & 1) + 2;
+		txq->tso_wrs++;
+	} else {
+		if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP)
+			csum_type = TX_CSUM_TCPIP;
+		else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP)
+			csum_type = TX_CSUM_UDPIP;
+		else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP)
+			csum_type = TX_CSUM_TCPIP6;
+		else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP)
+			csum_type = TX_CSUM_UDPIP6;
+#if defined(INET)
+		else if (m0->m_pkthdr.csum_flags & CSUM_IP) {
+			/*
+			 * XXX: The firmware appears to stomp on the
+			 * fragment/flags field of the IP header when
+			 * using TX_CSUM_IP.  Fall back to doing
+			 * software checksums.
+			 */
+			u_short *sump;
+			struct mbuf *m;
+			int offset;
 
-	return (0);
-}
+			m = m0;
+			offset = 0;
+			sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen +
+			    offsetof(struct ip, ip_sum));
+			*sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen +
+			    m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen);
+			m0->m_pkthdr.csum_flags &= ~CSUM_IP;
+		}
+#endif
 
+		cpl = (void *)(wr + 1);
+	}
 
-/*
- * Releases all the txq resources used up in the specified sgl.
- */
-static int
-free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl)
-{
-	struct tx_maps *txmaps;
-	struct tx_map *txm;
+	/* Checksum offload */
+	ctrl1 = 0;
+	if (needs_l3_csum(m0) == 0)
+		ctrl1 |= F_TXPKT_IPCSUM_DIS;
+	if (csum_type >= 0) {
+		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0,
+	    ("%s: mbuf %p needs checksum offload but missing header lengths",
+			__func__, m0));
 
-	TXQ_LOCK_ASSERT_OWNED(txq);
+		if (chip_id(sc) <= CHELSIO_T5) {
+			ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
+			    ETHER_HDR_LEN);
+		} else {
+			ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen -
+			    ETHER_HDR_LEN);
+		}
+		ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen);
+		ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type);
+	} else
+		ctrl1 |= F_TXPKT_L4CSUM_DIS;
+	if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
+	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
+		txq->txcsum++;	/* some hardware assistance provided */
 
-	if (sgl->nsegs == 0)
-		return (0);	/* didn't use any map */
+	/* VLAN tag insertion */
+	if (needs_vlan_insertion(m0)) {
+		ctrl1 |= F_TXPKT_VLAN_VLD |
+		    V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
+		txq->vlan_insertion++;
+	}
 
-	txmaps = &txq->txmaps;
+	/* CPL header */
+	cpl->ctrl0 = txq->cpl_ctrl0;
+	cpl->pack = 0;
+	cpl->len = htobe16(pktlen);
+	cpl->ctrl1 = htobe64(ctrl1);
 
-	/* 1 pkt uses exactly 1 map, back it out */
+	/* SGL */
+	dst = (void *)(cpl + 1);
 
-	txmaps->map_avail++;
-	if (txmaps->map_pidx > 0)
-		txmaps->map_pidx--;
-	else
-		txmaps->map_pidx = txmaps->map_total - 1;
+	/*
+	 * A packet using TSO will use up an entire descriptor for the
+	 * firmware work request header, LSO CPL, and TX_PKT_XT CPL.
+	 * If this descriptor is the last descriptor in the ring, wrap
+	 * around to the front of the ring explicitly for the start of
+	 * the sgl.
+	 */
+	if (dst == (void *)&eq->desc[eq->sidx]) {
+		dst = (void *)&eq->desc[0];
+		write_gl_to_txd(txq, m0, &dst, 0);
+	} else
+		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
+	txq->sgl_wrs++;
 
-	txm = &txmaps->maps[txmaps->map_pidx];
-	bus_dmamap_unload(txq->tx_tag, txm->map);
-	txm->m = NULL;
+	txq->txpkt_wrs++;
 
-	return (0);
+	txsd = &txq->sdesc[eq->pidx];
+	txsd->m = m0;
+	txsd->desc_used = ndesc;
+
+	return (ndesc);
 }
 
-static int
-write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m,
-    struct sgl *sgl)
+/*
+ * Write a txpkt WR for this packet to the hardware descriptors, update the
+ * software descriptor, and advance the pidx.  It is guaranteed that enough
+ * descriptors are available.
+ *
+ * The return value is the # of hardware descriptors used.
+ */
+static u_int
+write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr,
+    struct mbuf *m0, u_int available)
 {
 	struct sge_eq *eq = &txq->eq;
-	struct fw_eth_tx_pkt_wr *wr;
+	struct tx_sdesc *txsd;
 	struct cpl_tx_pkt_core *cpl;
 	uint32_t ctrl;	/* used in many unrelated places */
 	uint64_t ctrl1;
-	int nflits, ndesc, pktlen;
-	struct tx_sdesc *txsd;
+	int len16, ndesc, pktlen, nsegs;
 	caddr_t dst;
 
 	TXQ_LOCK_ASSERT_OWNED(txq);
+	M_ASSERTPKTHDR(m0);
+	MPASS(available > 0 && available < eq->sidx);
 
-	pktlen = m->m_pkthdr.len;
-
-	/*
-	 * Do we have enough flits to send this frame out?
-	 */
+	len16 = mbuf_len16(m0);
+	nsegs = mbuf_nsegs(m0);
+	pktlen = m0->m_pkthdr.len;
 	ctrl = sizeof(struct cpl_tx_pkt_core);
-	if (m->m_pkthdr.tso_segsz) {
-		nflits = TXPKT_LSO_WR_HDR;
+	if (needs_tso(m0))
 		ctrl += sizeof(struct cpl_tx_pkt_lso_core);
-	} else
-		nflits = TXPKT_WR_HDR;
-	if (sgl->nsegs > 0)
-		nflits += sgl->nflits;
-	else {
-		nflits += howmany(pktlen, 8);
+	else if (pktlen <= imm_payload(2) && available >= 2) {
+		/* Immediate data.  Recalculate len16 and set nsegs to 0. */
 		ctrl += pktlen;
+		len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) +
+		    sizeof(struct cpl_tx_pkt_core) + pktlen, 16);
+		nsegs = 0;
 	}
-	ndesc = howmany(nflits, 8);
-	if (ndesc > eq->avail)
-		return (ENOMEM);
+	ndesc = howmany(len16, EQ_ESIZE / 16);
+	MPASS(ndesc <= available);
 
 	/* Firmware work request header */
-	wr = (void *)&eq->desc[eq->pidx];
+	MPASS(wr == (void *)&eq->desc[eq->pidx]);
 	wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
 	    V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
-	ctrl = V_FW_WR_LEN16(howmany(nflits, 2));
-	if (eq->avail == ndesc) {
-		if (!(eq->flags & EQ_CRFLUSHED)) {
-			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
-			eq->flags |= EQ_CRFLUSHED;
-		}
-		eq->flags |= EQ_STALLED;
-	}
 
+	ctrl = V_FW_WR_LEN16(len16);
 	wr->equiq_to_len16 = htobe32(ctrl);
 	wr->r3 = 0;
 
-	if (m->m_pkthdr.tso_segsz) {
+	if (needs_tso(m0)) {
 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
-		struct ether_header *eh;
-		void *l3hdr;
-#if defined(INET) || defined(INET6)
-		struct tcphdr *tcp;
-#endif
-		uint16_t eh_type;
 
+		KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+		    m0->m_pkthdr.l4hlen > 0,
+		    ("%s: mbuf %p needs TSO but missing header lengths",
+			__func__, m0));
+
 		ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE |
-		    F_LSO_LAST_SLICE;
-
-		eh = mtod(m, struct ether_header *);
-		eh_type = ntohs(eh->ether_type);
-		if (eh_type == ETHERTYPE_VLAN) {
-			struct ether_vlan_header *evh = (void *)eh;
-
+		    F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2)
+		    | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+		if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header))
 			ctrl |= V_LSO_ETHHDR_LEN(1);
-			l3hdr = evh + 1;
-			eh_type = ntohs(evh->evl_proto);
-		} else
-			l3hdr = eh + 1;
-
-		switch (eh_type) {
-#ifdef INET6
-		case ETHERTYPE_IPV6:
-		{
-			struct ip6_hdr *ip6 = l3hdr;
-
-			/*
-			 * XXX-BZ For now we do not pretend to support
-			 * IPv6 extension headers.
-			 */
-			KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO "
-			    "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt));
-			tcp = (struct tcphdr *)(ip6 + 1);
+		if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr))
 			ctrl |= F_LSO_IPV6;
-			ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) |
-			    V_LSO_TCPHDR_LEN(tcp->th_off);
-			break;
-		}
-#endif
-#ifdef INET
-		case ETHERTYPE_IP:
-		{
-			struct ip *ip = l3hdr;
 
-			tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4);
-			ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) |
-			    V_LSO_TCPHDR_LEN(tcp->th_off);
-			break;
-		}
-#endif
-		default:
-			panic("%s: CSUM_TSO but no supported IP version "
-			    "(0x%04x)", __func__, eh_type);
-		}
-
 		lso->lso_ctrl = htobe32(ctrl);
 		lso->ipid_ofst = htobe16(0);
-		lso->mss = htobe16(m->m_pkthdr.tso_segsz);
+		lso->mss = htobe16(m0->m_pkthdr.tso_segsz);
 		lso->seqno_offset = htobe32(0);
 		lso->len = htobe32(pktlen);
 
@@ -2950,48 +4343,36 @@
 
 	/* Checksum offload */
 	ctrl1 = 0;
-	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
+	if (needs_l3_csum(m0) == 0)
 		ctrl1 |= F_TXPKT_IPCSUM_DIS;
-	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
-	    CSUM_TCP_IPV6 | CSUM_TSO)))
+	if (needs_l4_csum(m0) == 0)
 		ctrl1 |= F_TXPKT_L4CSUM_DIS;
-	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
+	if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
 	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
 		txq->txcsum++;	/* some hardware assistance provided */
 
 	/* VLAN tag insertion */
-	if (m->m_flags & M_VLANTAG) {
-		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+	if (needs_vlan_insertion(m0)) {
+		ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
 		txq->vlan_insertion++;
 	}
 
 	/* CPL header */
-	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
-	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
+	cpl->ctrl0 = txq->cpl_ctrl0;
 	cpl->pack = 0;
 	cpl->len = htobe16(pktlen);
 	cpl->ctrl1 = htobe64(ctrl1);
 
-	/* Software descriptor */
-	txsd = &txq->sdesc[eq->pidx];
-	txsd->desc_used = ndesc;
-
-	eq->pending += ndesc;
-	eq->avail -= ndesc;
-	eq->pidx += ndesc;
-	if (eq->pidx >= eq->cap)
-		eq->pidx -= eq->cap;
-
 	/* SGL */
 	dst = (void *)(cpl + 1);
-	if (sgl->nsegs > 0) {
-		txsd->credits = 1;
+	if (nsegs > 0) {
+
+		write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx);
 		txq->sgl_wrs++;
-		write_sgl_to_txd(eq, sgl, &dst);
 	} else {
-		txsd->credits = 0;
-		txq->imm_wrs++;
-		for (; m; m = m->m_next) {
+		struct mbuf *m;
+
+		for (m = m0; m != NULL; m = m->m_next) {
 			copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len);
 #ifdef INVARIANTS
 			pktlen -= m->m_len;
@@ -3000,245 +4381,225 @@
 #ifdef INVARIANTS
 		KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen));
 #endif
-
+		txq->imm_wrs++;
 	}
 
 	txq->txpkt_wrs++;
-	return (0);
+
+	txsd = &txq->sdesc[eq->pidx];
+	txsd->m = m0;
+	txsd->desc_used = ndesc;
+
+	return (ndesc);
 }
 
-/*
- * Returns 0 to indicate that m has been accepted into a coalesced tx work
- * request.  It has either been folded into txpkts or txpkts was flushed and m
- * has started a new coalesced work request (as the first frame in a fresh
- * txpkts).
- *
- * Returns non-zero to indicate a failure - caller is responsible for
- * transmitting m, if there was anything in txpkts it has been flushed.
- */
 static int
-add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts,
-    struct mbuf *m, struct sgl *sgl)
+try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available)
 {
-	struct sge_eq *eq = &txq->eq;
-	int can_coalesce;
-	struct tx_sdesc *txsd;
-	int flits;
+	u_int needed, nsegs1, nsegs2, l1, l2;
 
-	TXQ_LOCK_ASSERT_OWNED(txq);
+	if (cannot_use_txpkts(m) || cannot_use_txpkts(n))
+		return (1);
 
-	KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__));
+	nsegs1 = mbuf_nsegs(m);
+	nsegs2 = mbuf_nsegs(n);
+	if (nsegs1 + nsegs2 == 2) {
+		txp->wr_type = 1;
+		l1 = l2 = txpkts1_len16();
+	} else {
+		txp->wr_type = 0;
+		l1 = txpkts0_len16(nsegs1);
+		l2 = txpkts0_len16(nsegs2);
+	}
+	txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2;
+	needed = howmany(txp->len16, EQ_ESIZE / 16);
+	if (needed > SGE_MAX_WR_NDESC || needed > available)
+		return (1);
 
-	if (txpkts->npkt > 0) {
-		flits = TXPKTS_PKT_HDR + sgl->nflits;
-		can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
-		    txpkts->nflits + flits <= TX_WR_FLITS &&
-		    txpkts->nflits + flits <= eq->avail * 8 &&
-		    txpkts->plen + m->m_pkthdr.len < 65536;
+	txp->plen = m->m_pkthdr.len + n->m_pkthdr.len;
+	if (txp->plen > 65535)
+		return (1);
 
-		if (can_coalesce) {
-			txpkts->npkt++;
-			txpkts->nflits += flits;
-			txpkts->plen += m->m_pkthdr.len;
+	txp->npkt = 2;
+	set_mbuf_len16(m, l1);
+	set_mbuf_len16(n, l2);
 
-			txsd = &txq->sdesc[eq->pidx];
-			txsd->credits++;
+	return (0);
+}
 
-			return (0);
-		}
+static int
+add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available)
+{
+	u_int plen, len16, needed, nsegs;
 
-		/*
-		 * Couldn't coalesce m into txpkts.  The first order of business
-		 * is to send txpkts on its way.  Then we'll revisit m.
-		 */
-		write_txpkts_wr(txq, txpkts);
-	}
+	MPASS(txp->wr_type == 0 || txp->wr_type == 1);
 
-	/*
-	 * Check if we can start a new coalesced tx work request with m as
-	 * the first packet in it.
-	 */
+	nsegs = mbuf_nsegs(m);
+	if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1))
+		return (1);
 
-	KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__));
+	plen = txp->plen + m->m_pkthdr.len;
+	if (plen > 65535)
+		return (1);
 
-	flits = TXPKTS_WR_HDR + sgl->nflits;
-	can_coalesce = m->m_pkthdr.tso_segsz == 0 &&
-	    flits <= eq->avail * 8 && flits <= TX_WR_FLITS;
+	if (txp->wr_type == 0)
+		len16 = txpkts0_len16(nsegs);
+	else
+		len16 = txpkts1_len16();
+	needed = howmany(txp->len16 + len16, EQ_ESIZE / 16);
+	if (needed > SGE_MAX_WR_NDESC || needed > available)
+		return (1);
 
-	if (can_coalesce == 0)
-		return (EINVAL);
+	txp->npkt++;
+	txp->plen = plen;
+	txp->len16 += len16;
+	set_mbuf_len16(m, len16);
 
-	/*
-	 * Start a fresh coalesced tx WR with m as the first frame in it.
-	 */
-	txpkts->npkt = 1;
-	txpkts->nflits = flits;
-	txpkts->flitp = &eq->desc[eq->pidx].flit[2];
-	txpkts->plen = m->m_pkthdr.len;
-
-	txsd = &txq->sdesc[eq->pidx];
-	txsd->credits = 1;
-
 	return (0);
 }
 
 /*
- * Note that write_txpkts_wr can never run out of hardware descriptors (but
- * write_txpkt_wr can).  add_to_txpkts ensures that a frame is accepted for
- * coalescing only if sufficient hardware descriptors are available.
+ * Write a txpkts WR for the packets in txp to the hardware descriptors, update
+ * the software descriptor, and advance the pidx.  It is guaranteed that enough
+ * descriptors are available.
+ *
+ * The return value is the # of hardware descriptors used.
  */
-static void
-write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts)
+static u_int
+write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr,
+    struct mbuf *m0, const struct txpkts *txp, u_int available)
 {
 	struct sge_eq *eq = &txq->eq;
-	struct fw_eth_tx_pkts_wr *wr;
 	struct tx_sdesc *txsd;
+	struct cpl_tx_pkt_core *cpl;
 	uint32_t ctrl;
-	int ndesc;
+	uint64_t ctrl1;
+	int ndesc, checkwrap;
+	struct mbuf *m;
+	void *flitp;
 
 	TXQ_LOCK_ASSERT_OWNED(txq);
+	MPASS(txp->npkt > 0);
+	MPASS(txp->plen < 65536);
+	MPASS(m0 != NULL);
+	MPASS(m0->m_nextpkt != NULL);
+	MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16));
+	MPASS(available > 0 && available < eq->sidx);
 
-	ndesc = howmany(txpkts->nflits, 8);
+	ndesc = howmany(txp->len16, EQ_ESIZE / 16);
+	MPASS(ndesc <= available);
 
-	wr = (void *)&eq->desc[eq->pidx];
+	MPASS(wr == (void *)&eq->desc[eq->pidx]);
 	wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
-	ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2));
-	if (eq->avail == ndesc) {
-		if (!(eq->flags & EQ_CRFLUSHED)) {
-			ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ;
-			eq->flags |= EQ_CRFLUSHED;
-		}
-		eq->flags |= EQ_STALLED;
-	}
+	ctrl = V_FW_WR_LEN16(txp->len16);
 	wr->equiq_to_len16 = htobe32(ctrl);
-	wr->plen = htobe16(txpkts->plen);
-	wr->npkt = txpkts->npkt;
-	wr->r3 = wr->type = 0;
+	wr->plen = htobe16(txp->plen);
+	wr->npkt = txp->npkt;
+	wr->r3 = 0;
+	wr->type = txp->wr_type;
+	flitp = wr + 1;
 
-	/* Everything else already written */
+	/*
+	 * At this point we are 16B into a hardware descriptor.  If checkwrap is
+	 * set then we know the WR is going to wrap around somewhere.  We'll
+	 * check for that at appropriate points.
+	 */
+	checkwrap = eq->sidx - ndesc < eq->pidx;
+	for (m = m0; m != NULL; m = m->m_nextpkt) {
+		if (txp->wr_type == 0) {
+			struct ulp_txpkt *ulpmc;
+			struct ulptx_idata *ulpsc;
 
-	txsd = &txq->sdesc[eq->pidx];
-	txsd->desc_used = ndesc;
+			/* ULP master command */
+			ulpmc = flitp;
+			ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+			    V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid));
+			ulpmc->len = htobe32(mbuf_len16(m));
 
-	KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__));
+			/* ULP subcommand */
+			ulpsc = (void *)(ulpmc + 1);
+			ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+			    F_ULP_TX_SC_MORE);
+			ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
 
-	eq->pending += ndesc;
-	eq->avail -= ndesc;
-	eq->pidx += ndesc;
-	if (eq->pidx >= eq->cap)
-		eq->pidx -= eq->cap;
+			cpl = (void *)(ulpsc + 1);
+			if (checkwrap &&
+			    (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx])
+				cpl = (void *)&eq->desc[0];
+			txq->txpkts0_pkts += txp->npkt;
+			txq->txpkts0_wrs++;
+		} else {
+			cpl = flitp;
+			txq->txpkts1_pkts += txp->npkt;
+			txq->txpkts1_wrs++;
+		}
 
-	txq->txpkts_pkts += txpkts->npkt;
-	txq->txpkts_wrs++;
-	txpkts->npkt = 0;	/* emptied */
-}
+		/* Checksum offload */
+		ctrl1 = 0;
+		if (needs_l3_csum(m) == 0)
+			ctrl1 |= F_TXPKT_IPCSUM_DIS;
+		if (needs_l4_csum(m) == 0)
+			ctrl1 |= F_TXPKT_L4CSUM_DIS;
+		if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
+		    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
+			txq->txcsum++;	/* some hardware assistance provided */
 
-static inline void
-write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq,
-    struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl)
-{
-	struct ulp_txpkt *ulpmc;
-	struct ulptx_idata *ulpsc;
-	struct cpl_tx_pkt_core *cpl;
-	struct sge_eq *eq = &txq->eq;
-	uintptr_t flitp, start, end;
-	uint64_t ctrl;
-	caddr_t dst;
+		/* VLAN tag insertion */
+		if (needs_vlan_insertion(m)) {
+			ctrl1 |= F_TXPKT_VLAN_VLD |
+			    V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+			txq->vlan_insertion++;
+		}
 
-	KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__));
+		/* CPL header */
+		cpl->ctrl0 = txq->cpl_ctrl0;
+		cpl->pack = 0;
+		cpl->len = htobe16(m->m_pkthdr.len);
+		cpl->ctrl1 = htobe64(ctrl1);
 
-	start = (uintptr_t)eq->desc;
-	end = (uintptr_t)eq->spg;
+		flitp = cpl + 1;
+		if (checkwrap &&
+		    (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx])
+			flitp = (void *)&eq->desc[0];
 
-	/* Checksum offload */
-	ctrl = 0;
-	if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)))
-		ctrl |= F_TXPKT_IPCSUM_DIS;
-	if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 |
-	    CSUM_TCP_IPV6 | CSUM_TSO)))
-		ctrl |= F_TXPKT_L4CSUM_DIS;
-	if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP |
-	    CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO))
-		txq->txcsum++;	/* some hardware assistance provided */
+		write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap);
 
-	/* VLAN tag insertion */
-	if (m->m_flags & M_VLANTAG) {
-		ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
-		txq->vlan_insertion++;
 	}
 
-	/*
-	 * The previous packet's SGL must have ended at a 16 byte boundary (this
-	 * is required by the firmware/hardware).  It follows that flitp cannot
-	 * wrap around between the ULPTX master command and ULPTX subcommand (8
-	 * bytes each), and that it can not wrap around in the middle of the
-	 * cpl_tx_pkt_core either.
-	 */
-	flitp = (uintptr_t)txpkts->flitp;
-	KASSERT((flitp & 0xf) == 0,
-	    ("%s: last SGL did not end at 16 byte boundary: %p",
-	    __func__, txpkts->flitp));
+	txsd = &txq->sdesc[eq->pidx];
+	txsd->m = m0;
+	txsd->desc_used = ndesc;
 
-	/* ULP master command */
-	ulpmc = (void *)flitp;
-	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) |
-	    V_ULP_TXPKT_FID(eq->iqid));
-	ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) +
-	    sizeof(*cpl) + 8 * sgl->nflits, 16));
-
-	/* ULP subcommand */
-	ulpsc = (void *)(ulpmc + 1);
-	ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) |
-	    F_ULP_TX_SC_MORE);
-	ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core));
-
-	flitp += sizeof(*ulpmc) + sizeof(*ulpsc);
-	if (flitp == end)
-		flitp = start;
-
-	/* CPL_TX_PKT */
-	cpl = (void *)flitp;
-	cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
-	    V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf));
-	cpl->pack = 0;
-	cpl->len = htobe16(m->m_pkthdr.len);
-	cpl->ctrl1 = htobe64(ctrl);
-
-	flitp += sizeof(*cpl);
-	if (flitp == end)
-		flitp = start;
-
-	/* SGL for this frame */
-	dst = (caddr_t)flitp;
-	txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst);
-	txpkts->flitp = (void *)dst;
-
-	KASSERT(((uintptr_t)dst & 0xf) == 0,
-	    ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst));
+	return (ndesc);
 }
 
 /*
  * If the SGL ends on an address that is not 16 byte aligned, this function will
- * add a 0 filled flit at the end.  It returns 1 in that case.
+ * add a 0 filled flit at the end.
  */
-static int
-write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to)
+static void
+write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
 {
-	__be64 *flitp, *end;
+	struct sge_eq *eq = &txq->eq;
+	struct sglist *gl = txq->gl;
+	struct sglist_seg *seg;
+	__be64 *flitp, *wrap;
 	struct ulptx_sgl *usgl;
-	bus_dma_segment_t *seg;
-	int i, padded;
+	int i, nflits, nsegs;
 
-	KASSERT(sgl->nsegs > 0 && sgl->nflits > 0,
-	    ("%s: bad SGL - nsegs=%d, nflits=%d",
-	    __func__, sgl->nsegs, sgl->nflits));
-
 	KASSERT(((uintptr_t)(*to) & 0xf) == 0,
 	    ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
+	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
 
+	get_pkt_gl(m, gl);
+	nsegs = gl->sg_nseg;
+	MPASS(nsegs > 0);
+
+	nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
 	flitp = (__be64 *)(*to);
-	end = flitp + sgl->nflits;
-	seg = &sgl->seg[0];
+	wrap = (__be64 *)(&eq->desc[eq->sidx]);
+	seg = &gl->sg_segs[0];
 	usgl = (void *)flitp;
 
 	/*
@@ -3248,21 +4609,22 @@
 	 */
 
 	usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
-	    V_ULPTX_NSGE(sgl->nsegs));
-	usgl->len0 = htobe32(seg->ds_len);
-	usgl->addr0 = htobe64(seg->ds_addr);
+	    V_ULPTX_NSGE(nsegs));
+	usgl->len0 = htobe32(seg->ss_len);
+	usgl->addr0 = htobe64(seg->ss_paddr);
 	seg++;
 
-	if ((uintptr_t)end <= (uintptr_t)eq->spg) {
+	if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) {
 
 		/* Won't wrap around at all */
 
-		for (i = 0; i < sgl->nsegs - 1; i++, seg++) {
-			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len);
-			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr);
+		for (i = 0; i < nsegs - 1; i++, seg++) {
+			usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
+			usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
 		}
 		if (i & 1)
 			usgl->sge[i / 2].len[1] = htobe32(0);
+		flitp += nflits;
 	} else {
 
 		/* Will wrap somewhere in the rest of the SGL */
@@ -3269,37 +4631,38 @@
 
 		/* 2 flits already written, write the rest flit by flit */
 		flitp = (void *)(usgl + 1);
-		for (i = 0; i < sgl->nflits - 2; i++) {
-			if ((uintptr_t)flitp == (uintptr_t)eq->spg)
+		for (i = 0; i < nflits - 2; i++) {
+			if (flitp == wrap)
 				flitp = (void *)eq->desc;
-			*flitp++ = get_flit(seg, sgl->nsegs - 1, i);
+			*flitp++ = get_flit(seg, nsegs - 1, i);
 		}
-		end = flitp;
 	}
 
-	if ((uintptr_t)end & 0xf) {
-		*(uint64_t *)end = 0;
-		end++;
-		padded = 1;
-	} else
-		padded = 0;
+	if (nflits & 1) {
+		MPASS(((uintptr_t)flitp) & 0xf);
+		*flitp++ = 0;
+	}
 
-	if ((uintptr_t)end == (uintptr_t)eq->spg)
+	MPASS((((uintptr_t)flitp) & 0xf) == 0);
+	if (__predict_false(flitp == wrap))
 		*to = (void *)eq->desc;
 	else
-		*to = (void *)end;
-
-	return (padded);
+		*to = (void *)flitp;
 }
 
 static inline void
 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
 {
-	if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) {
+
+	MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+	MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
+
+	if (__predict_true((uintptr_t)(*to) + len <=
+	    (uintptr_t)&eq->desc[eq->sidx])) {
 		bcopy(from, *to, len);
 		(*to) += len;
 	} else {
-		int portion = (uintptr_t)eq->spg - (uintptr_t)(*to);
+		int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
 
 		bcopy(from, *to, portion);
 		from += portion;
@@ -3310,51 +4673,109 @@
 }
 
 static inline void
-ring_eq_db(struct adapter *sc, struct sge_eq *eq)
+ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
 {
+	u_int db;
+
+	MPASS(n > 0);
+
+	db = eq->doorbells;
+	if (n > 1)
+		clrbit(&db, DOORBELL_WCWR);
 	wmb();
-	t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
-	    V_QID(eq->cntxt_id) | V_PIDX(eq->pending));
-	eq->pending = 0;
+
+	switch (ffs(db) - 1) {
+	case DOORBELL_UDB:
+		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
+		break;
+
+	case DOORBELL_WCWR: {
+		volatile uint64_t *dst, *src;
+		int i;
+
+		/*
+		 * Queues whose 128B doorbell segment fits in the page do not
+		 * use relative qid (udb_qid is always 0).  Only queues with
+		 * doorbell segments can do WCWR.
+		 */
+		KASSERT(eq->udb_qid == 0 && n == 1,
+		    ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p",
+		    __func__, eq->doorbells, n, eq->dbidx, eq));
+
+		dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET -
+		    UDBS_DB_OFFSET);
+		i = eq->dbidx;
+		src = (void *)&eq->desc[i];
+		while (src != (void *)&eq->desc[i + 1])
+			*dst++ = *src++;
+		wmb();
+		break;
+	}
+
+	case DOORBELL_UDBWC:
+		*eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n));
+		wmb();
+		break;
+
+	case DOORBELL_KDB:
+		t4_write_reg(sc, sc->sge_kdoorbell_reg,
+		    V_QID(eq->cntxt_id) | V_PIDX(n));
+		break;
+	}
+
+	IDXINCR(eq->dbidx, n, eq->sidx);
 }
 
-static inline int
-reclaimable(struct sge_eq *eq)
+static inline u_int
+reclaimable_tx_desc(struct sge_eq *eq)
 {
-	unsigned int cidx;
+	uint16_t hw_cidx;
 
-	cidx = eq->spg->cidx;	/* stable snapshot */
-	cidx = be16toh(cidx);
+	hw_cidx = read_hw_cidx(eq);
+	return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx));
+}
 
-	if (cidx >= eq->cidx)
-		return (cidx - eq->cidx);
+static inline u_int
+total_available_tx_desc(struct sge_eq *eq)
+{
+	uint16_t hw_cidx, pidx;
+
+	hw_cidx = read_hw_cidx(eq);
+	pidx = eq->pidx;
+
+	if (pidx == hw_cidx)
+		return (eq->sidx - 1);
 	else
-		return (cidx + eq->cap - eq->cidx);
+		return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1);
 }
 
+static inline uint16_t
+read_hw_cidx(struct sge_eq *eq)
+{
+	struct sge_qstat *spg = (void *)&eq->desc[eq->sidx];
+	uint16_t cidx = spg->cidx;	/* stable snapshot */
+
+	return (be16toh(cidx));
+}
+
 /*
- * There are "can_reclaim" tx descriptors ready to be reclaimed.  Reclaim as
- * many as possible but stop when there are around "n" mbufs to free.
- *
- * The actual number reclaimed is provided as the return value.
+ * Reclaim 'n' descriptors approximately.
  */
-static int
-reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n)
+static u_int
+reclaim_tx_descs(struct sge_txq *txq, u_int n)
 {
 	struct tx_sdesc *txsd;
-	struct tx_maps *txmaps;
-	struct tx_map *txm;
-	unsigned int reclaimed, maps;
 	struct sge_eq *eq = &txq->eq;
+	u_int can_reclaim, reclaimed;
 
 	TXQ_LOCK_ASSERT_OWNED(txq);
+	MPASS(n > 0);
 
-	if (can_reclaim == 0)
-		can_reclaim = reclaimable(eq);
-
-	maps = reclaimed = 0;
-	while (can_reclaim && maps < n) {
+	reclaimed = 0;
+	can_reclaim = reclaimable_tx_desc(eq);
+	while (can_reclaim && reclaimed < n) {
 		int ndesc;
+		struct mbuf *m, *nextpkt;
 
 		txsd = &txq->sdesc[eq->cidx];
 		ndesc = txsd->desc_used;
@@ -3364,73 +4785,37 @@
 		    ("%s: unexpected number of credits: %d, %d",
 		    __func__, can_reclaim, ndesc));
 
-		maps += txsd->credits;
-
+		for (m = txsd->m; m != NULL; m = nextpkt) {
+			nextpkt = m->m_nextpkt;
+			m->m_nextpkt = NULL;
+			m_freem(m);
+		}
 		reclaimed += ndesc;
 		can_reclaim -= ndesc;
-
-		eq->cidx += ndesc;
-		if (__predict_false(eq->cidx >= eq->cap))
-			eq->cidx -= eq->cap;
+		IDXINCR(eq->cidx, ndesc, eq->sidx);
 	}
 
-	txmaps = &txq->txmaps;
-	txm = &txmaps->maps[txmaps->map_cidx];
-	if (maps)
-		prefetch(txm->m);
-
-	eq->avail += reclaimed;
-	KASSERT(eq->avail < eq->cap,	/* avail tops out at (cap - 1) */
-	    ("%s: too many descriptors available", __func__));
-
-	txmaps->map_avail += maps;
-	KASSERT(txmaps->map_avail <= txmaps->map_total,
-	    ("%s: too many maps available", __func__));
-
-	while (maps--) {
-		struct tx_map *next;
-
-		next = txm + 1;
-		if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total))
-			next = txmaps->maps;
-		prefetch(next->m);
-
-		bus_dmamap_unload(txq->tx_tag, txm->map);
-		m_freem(txm->m);
-		txm->m = NULL;
-
-		txm = next;
-		if (__predict_false(++txmaps->map_cidx == txmaps->map_total))
-			txmaps->map_cidx = 0;
-	}
-
 	return (reclaimed);
 }
 
 static void
-write_eqflush_wr(struct sge_eq *eq)
+tx_reclaim(void *arg, int n)
 {
-	struct fw_eq_flush_wr *wr;
+	struct sge_txq *txq = arg;
+	struct sge_eq *eq = &txq->eq;
 
-	EQ_LOCK_ASSERT_OWNED(eq);
-	KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__));
-	KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__));
-
-	wr = (void *)&eq->desc[eq->pidx];
-	bzero(wr, sizeof(*wr));
-	wr->opcode = FW_EQ_FLUSH_WR;
-	wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) |
-	    F_FW_WR_EQUEQ | F_FW_WR_EQUIQ);
-
-	eq->flags |= (EQ_CRFLUSHED | EQ_STALLED);
-	eq->pending++;
-	eq->avail--;
-	if (++eq->pidx == eq->cap)
-		eq->pidx = 0; 
+	do {
+		if (TXQ_TRYLOCK(txq) == 0)
+			break;
+		n = reclaim_tx_descs(txq, 32);
+		if (eq->cidx == eq->pidx)
+			eq->equeqidx = eq->pidx;
+		TXQ_UNLOCK(txq);
+	} while (n > 0);
 }
 
 static __be64
-get_flit(bus_dma_segment_t *sgl, int nsegs, int idx)
+get_flit(struct sglist_seg *segs, int nsegs, int idx)
 {
 	int i = (idx / 3) * 2;
 
@@ -3438,16 +4823,16 @@
 	case 0: {
 		__be64 rc;
 
-		rc = htobe32(sgl[i].ds_len);
+		rc = htobe32(segs[i].ss_len);
 		if (i + 1 < nsegs)
-			rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32;
+			rc |= (uint64_t)htobe32(segs[i + 1].ss_len) << 32;
 
 		return (rc);
 	}
 	case 1:
-		return htobe64(sgl[i].ds_addr);
+		return (htobe64(segs[i].ss_paddr));
 	case 2:
-		return htobe64(sgl[i + 1].ds_addr);
+		return (htobe64(segs[i + 1].ss_paddr));
 	}
 
 	return (0);
@@ -3454,19 +4839,181 @@
 }
 
 static void
-set_fl_tag_idx(struct sge_fl *fl, int bufsize)
+find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp)
 {
-	int i;
+	int8_t zidx, hwidx, idx;
+	uint16_t region1, region3;
+	int spare, spare_needed, n;
+	struct sw_zone_info *swz;
+	struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0];
 
-	for (i = 0; i < FL_BUF_SIZES - 1; i++) {
-		if (FL_BUF_SIZE(i) >= bufsize)
+	/*
+	 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize
+	 * large enough for the max payload and cluster metadata.  Otherwise
+	 * settle for the largest bufsize that leaves enough room in the cluster
+	 * for metadata.
+	 *
+	 * Without buffer packing: Look for the smallest zone which has a
+	 * bufsize large enough for the max payload.  Settle for the largest
+	 * bufsize available if there's nothing big enough for max payload.
+	 */
+	spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0;
+	swz = &sc->sge.sw_zone_info[0];
+	hwidx = -1;
+	for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) {
+		if (swz->size > largest_rx_cluster) {
+			if (__predict_true(hwidx != -1))
+				break;
+
+			/*
+			 * This is a misconfiguration.  largest_rx_cluster is
+			 * preventing us from finding a refill source.  See
+			 * dev.t5nex.<n>.buffer_sizes to figure out why.
+			 */
+			device_printf(sc->dev, "largest_rx_cluster=%u leaves no"
+			    " refill source for fl %p (dma %u).  Ignored.\n",
+			    largest_rx_cluster, fl, maxp);
+		}
+		for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) {
+			hwb = &hwb_list[idx];
+			spare = swz->size - hwb->size;
+			if (spare < spare_needed)
+				continue;
+
+			hwidx = idx;		/* best option so far */
+			if (hwb->size >= maxp) {
+
+				if ((fl->flags & FL_BUF_PACKING) == 0)
+					goto done; /* stop looking (not packing) */
+
+				if (swz->size >= safest_rx_cluster)
+					goto done; /* stop looking (packing) */
+			}
+			break;		/* keep looking, next zone */
+		}
+	}
+done:
+	/* A usable hwidx has been located. */
+	MPASS(hwidx != -1);
+	hwb = &hwb_list[hwidx];
+	zidx = hwb->zidx;
+	swz = &sc->sge.sw_zone_info[zidx];
+	region1 = 0;
+	region3 = swz->size - hwb->size;
+
+	/*
+	 * Stay within this zone and see if there is a better match when mbuf
+	 * inlining is allowed.  Remember that the hwidx's are sorted in
+	 * decreasing order of size (so in increasing order of spare area).
+	 */
+	for (idx = hwidx; idx != -1; idx = hwb->next) {
+		hwb = &hwb_list[idx];
+		spare = swz->size - hwb->size;
+
+		if (allow_mbufs_in_cluster == 0 || hwb->size < maxp)
 			break;
+
+		/*
+		 * Do not inline mbufs if doing so would violate the pad/pack
+		 * boundary alignment requirement.
+		 */
+		if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0)
+			continue;
+		if (fl->flags & FL_BUF_PACKING &&
+		    (MSIZE % sc->params.sge.pack_boundary) != 0)
+			continue;
+
+		if (spare < CL_METADATA_SIZE + MSIZE)
+			continue;
+		n = (spare - CL_METADATA_SIZE) / MSIZE;
+		if (n > howmany(hwb->size, maxp))
+			break;
+
+		hwidx = idx;
+		if (fl->flags & FL_BUF_PACKING) {
+			region1 = n * MSIZE;
+			region3 = spare - region1;
+		} else {
+			region1 = MSIZE;
+			region3 = spare - region1;
+			break;
+		}
 	}
 
-	fl->tag_idx = i;
+	KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES,
+	    ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp));
+	KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES,
+	    ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp));
+	KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 ==
+	    sc->sge.sw_zone_info[zidx].size,
+	    ("%s: bad buffer layout for fl %p, maxp %d. "
+		"cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
+		sc->sge.sw_zone_info[zidx].size, region1,
+		sc->sge.hw_buf_info[hwidx].size, region3));
+	if (fl->flags & FL_BUF_PACKING || region1 > 0) {
+		KASSERT(region3 >= CL_METADATA_SIZE,
+		    ("%s: no room for metadata.  fl %p, maxp %d; "
+		    "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
+		    sc->sge.sw_zone_info[zidx].size, region1,
+		    sc->sge.hw_buf_info[hwidx].size, region3));
+		KASSERT(region1 % MSIZE == 0,
+		    ("%s: bad mbuf region for fl %p, maxp %d. "
+		    "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp,
+		    sc->sge.sw_zone_info[zidx].size, region1,
+		    sc->sge.hw_buf_info[hwidx].size, region3));
+	}
+
+	fl->cll_def.zidx = zidx;
+	fl->cll_def.hwidx = hwidx;
+	fl->cll_def.region1 = region1;
+	fl->cll_def.region3 = region3;
 }
 
 static void
+find_safe_refill_source(struct adapter *sc, struct sge_fl *fl)
+{
+	struct sge *s = &sc->sge;
+	struct hw_buf_info *hwb;
+	struct sw_zone_info *swz;
+	int spare;
+	int8_t hwidx;
+
+	if (fl->flags & FL_BUF_PACKING)
+		hwidx = s->safe_hwidx2;	/* with room for metadata */
+	else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) {
+		hwidx = s->safe_hwidx2;
+		hwb = &s->hw_buf_info[hwidx];
+		swz = &s->sw_zone_info[hwb->zidx];
+		spare = swz->size - hwb->size;
+
+		/* no good if there isn't room for an mbuf as well */
+		if (spare < CL_METADATA_SIZE + MSIZE)
+			hwidx = s->safe_hwidx1;
+	} else
+		hwidx = s->safe_hwidx1;
+
+	if (hwidx == -1) {
+		/* No fallback source */
+		fl->cll_alt.hwidx = -1;
+		fl->cll_alt.zidx = -1;
+
+		return;
+	}
+
+	hwb = &s->hw_buf_info[hwidx];
+	swz = &s->sw_zone_info[hwb->zidx];
+	spare = swz->size - hwb->size;
+	fl->cll_alt.hwidx = hwidx;
+	fl->cll_alt.zidx = hwb->zidx;
+	if (allow_mbufs_in_cluster &&
+	    (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0))
+		fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE;
+	else
+		fl->cll_alt.region1 = 0;
+	fl->cll_alt.region3 = spare - fl->cll_alt.region1;
+}
+
+static void
 add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl)
 {
 	mtx_lock(&sc->sfl_lock);
@@ -3480,6 +5027,27 @@
 	mtx_unlock(&sc->sfl_lock);
 }
 
+static void
+handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq)
+{
+	struct sge_wrq *wrq = (void *)eq;
+
+	atomic_readandclear_int(&eq->equiq);
+	taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task);
+}
+
+static void
+handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq)
+{
+	struct sge_txq *txq = (void *)eq;
+
+	MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH);
+
+	atomic_readandclear_int(&eq->equiq);
+	mp_ring_check_drainage(txq->r, 0);
+	taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task);
+}
+
 static int
 handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
     struct mbuf *m)
@@ -3489,23 +5057,16 @@
 	struct adapter *sc = iq->adapter;
 	struct sge *s = &sc->sge;
 	struct sge_eq *eq;
+	static void (*h[])(struct adapter *, struct sge_eq *) = {NULL,
+		&handle_wrq_egr_update, &handle_eth_egr_update,
+		&handle_wrq_egr_update};
 
 	KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
 	    rss->opcode));
 
-	eq = s->eqmap[qid - s->eq_start];
-	EQ_LOCK(eq);
-	KASSERT(eq->flags & EQ_CRFLUSHED,
-	    ("%s: unsolicited egress update", __func__));
-	eq->flags &= ~EQ_CRFLUSHED;
-	eq->egr_update++;
+	eq = s->eqmap[qid - s->eq_start - s->eq_base];
+	(*h[eq->flags & EQ_TYPEMASK])(sc, eq);
 
-	if (__predict_false(eq->flags & EQ_DOOMED))
-		wakeup_one(eq);
-	else if (eq->flags & EQ_STALLED && can_resume_tx(eq))
-		taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task);
-	EQ_UNLOCK(eq);
-
 	return (0);
 }
 
@@ -3526,13 +5087,78 @@
 		const struct rss_header *rss2;
 
 		rss2 = (const struct rss_header *)&cpl->data[0];
-		return (sc->cpl_handler[rss2->opcode](iq, rss2, m));
+		return (t4_cpl_handler[rss2->opcode](iq, rss2, m));
 	}
 
-	return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0]));
+	return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0]));
 }
 
+/**
+ *	t4_handle_wrerr_rpl - process a FW work request error message
+ *	@adap: the adapter
+ *	@rpl: start of the FW message
+ */
 static int
+t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl)
+{
+	u8 opcode = *(const u8 *)rpl;
+	const struct fw_error_cmd *e = (const void *)rpl;
+	unsigned int i;
+
+	if (opcode != FW_ERROR_CMD) {
+		log(LOG_ERR,
+		    "%s: Received WRERR_RPL message with opcode %#x\n",
+		    device_get_nameunit(adap->dev), opcode);
+		return (EINVAL);
+	}
+	log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev),
+	    G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" :
+	    "non-fatal");
+	switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) {
+	case FW_ERROR_TYPE_EXCEPTION:
+		log(LOG_ERR, "exception info:\n");
+		for (i = 0; i < nitems(e->u.exception.info); i++)
+			log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ",
+			    be32toh(e->u.exception.info[i]));
+		log(LOG_ERR, "\n");
+		break;
+	case FW_ERROR_TYPE_HWMODULE:
+		log(LOG_ERR, "HW module regaddr %08x regval %08x\n",
+		    be32toh(e->u.hwmodule.regaddr),
+		    be32toh(e->u.hwmodule.regval));
+		break;
+	case FW_ERROR_TYPE_WR:
+		log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n",
+		    be16toh(e->u.wr.cidx),
+		    G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)),
+		    G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)),
+		    be32toh(e->u.wr.eqid));
+		for (i = 0; i < nitems(e->u.wr.wrhdr); i++)
+			log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ",
+			    e->u.wr.wrhdr[i]);
+		log(LOG_ERR, "\n");
+		break;
+	case FW_ERROR_TYPE_ACL:
+		log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s",
+		    be16toh(e->u.acl.cidx),
+		    G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)),
+		    G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)),
+		    be32toh(e->u.acl.eqid),
+		    G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" :
+		    "MAC");
+		for (i = 0; i < nitems(e->u.acl.val); i++)
+			log(LOG_ERR, " %02x", e->u.acl.val[i]);
+		log(LOG_ERR, "\n");
+		break;
+	default:
+		log(LOG_ERR, "type %#x\n",
+		    G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type)));
+		return (EINVAL);
+	}
+	return (0);
+}
+
+static int
 sysctl_uint16(SYSCTL_HANDLER_ARGS)
 {
 	uint16_t *id = arg1;
@@ -3540,3 +5166,114 @@
 
 	return sysctl_handle_int(oidp, &i, 0, req);
 }
+
+static int
+sysctl_bufsizes(SYSCTL_HANDLER_ARGS)
+{
+	struct sge *s = arg1;
+	struct hw_buf_info *hwb = &s->hw_buf_info[0];
+	struct sw_zone_info *swz = &s->sw_zone_info[0];
+	int i, rc;
+	struct sbuf sb;
+	char c;
+
+	sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
+	for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) {
+		if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster)
+			c = '*';
+		else
+			c = '\0';
+
+		sbuf_printf(&sb, "%u%c ", hwb->size, c);
+	}
+	sbuf_trim(&sb);
+	sbuf_finish(&sb);
+	rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
+	sbuf_delete(&sb);
+	return (rc);
+}
+
+static int
+sysctl_tc(SYSCTL_HANDLER_ARGS)
+{
+	struct vi_info *vi = arg1;
+	struct port_info *pi;
+	struct adapter *sc;
+	struct sge_txq *txq;
+	struct tx_cl_rl_params *tc;
+	int qidx = arg2, rc, tc_idx;
+	uint32_t fw_queue, fw_class;
+
+	MPASS(qidx >= 0 && qidx < vi->ntxq);
+	pi = vi->pi;
+	sc = pi->adapter;
+	txq = &sc->sge.txq[vi->first_txq + qidx];
+
+	tc_idx = txq->tc_idx;
+	rc = sysctl_handle_int(oidp, &tc_idx, 0, req);
+	if (rc != 0 || req->newptr == NULL)
+		return (rc);
+
+	if (sc->flags & IS_VF)
+		return (EPERM);
+
+	/* Note that -1 is legitimate input (it means unbind). */
+	if (tc_idx < -1 || tc_idx >= sc->chip_params->nsched_cls)
+		return (EINVAL);
+
+	mtx_lock(&sc->tc_lock);
+	if (tc_idx == txq->tc_idx) {
+		rc = 0;		/* No change, nothing to do. */
+		goto done;
+	}
+
+	fw_queue = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) |
+	    V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id);
+
+	if (tc_idx == -1)
+		fw_class = 0xffffffff;	/* Unbind. */
+	else {
+		/*
+		 * Bind to a different class.
+		 */
+		tc = &pi->sched_params->cl_rl[tc_idx];
+		if (tc->flags & TX_CLRL_ERROR) {
+			/* Previous attempt to set the cl-rl params failed. */
+			rc = EIO;
+			goto done;
+		} else {
+			/*
+			 * Ok to proceed.  Place a reference on the new class
+			 * while still holding on to the reference on the
+			 * previous class, if any.
+			 */
+			fw_class = tc_idx;
+			tc->refcount++;
+		}
+	}
+	mtx_unlock(&sc->tc_lock);
+
+	rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4stc");
+	if (rc)
+		return (rc);
+	rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, &fw_class);
+	end_synchronized_op(sc, 0);
+
+	mtx_lock(&sc->tc_lock);
+	if (rc == 0) {
+		if (txq->tc_idx != -1) {
+			tc = &pi->sched_params->cl_rl[txq->tc_idx];
+			MPASS(tc->refcount > 0);
+			tc->refcount--;
+		}
+		txq->tc_idx = tc_idx;
+	} else if (tc_idx != -1) {
+		tc = &pi->sched_params->cl_rl[tc_idx];
+		MPASS(tc->refcount > 0);
+		tc->refcount--;
+	}
+done:
+	mtx_unlock(&sc->tc_lock);
+	return (rc);
+}

Added: trunk/sys/dev/cxgbe/t4_tracer.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_tracer.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_tracer.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,520 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: Navdeep Parhar <np at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_tracer.c 309560 2016-12-05 20:43:25Z jhb $");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/types.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sx.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_clone.h>
+#include <net/if_types.h>
+
+#include "common/common.h"
+#include "common/t4_msg.h"
+#include "common/t4_regs.h"
+#include "t4_ioctl.h"
+
+/*
+ * Locking notes
+ * =============
+ *
+ * An interface cloner is registered during mod_load and it can be used to
+ * create or destroy the tracing ifnet for an adapter at any time.  It is
+ * possible for the cloned interface to outlive the adapter (adapter disappears
+ * in t4_detach but the tracing ifnet may live till mod_unload when removal of
+ * the cloner finally destroys any remaining cloned interfaces).  When tracing
+ * filters are active, this ifnet is also receiving data.  There are potential
+ * bad races between ifnet create, ifnet destroy, ifnet rx, ifnet ioctl,
+ * cxgbe_detach/t4_detach, mod_unload.
+ *
+ * a) The driver selects an iq for tracing (sc->traceq) inside a synch op.  The
+ *    iq is destroyed inside a synch op too (and sc->traceq updated).
+ * b) The cloner looks for an adapter that matches the name of the ifnet it's
+ *    been asked to create, starts a synch op on that adapter, and proceeds only
+ *    if the adapter has a tracing iq.
+ * c) The cloned ifnet and the adapter are coupled to each other via
+ *    ifp->if_softc and sc->ifp.  These can be modified only with the global
+ *    t4_trace_lock sx as well as the sc->ifp_lock mutex held.  Holding either
+ *    of these will prevent any change.
+ *
+ * The order in which all the locks involved should be acquired are:
+ * t4_list_lock
+ * adapter lock
+ * (begin synch op and let go of the above two)
+ * t4_trace_lock
+ * sc->ifp_lock
+ */
+
+static struct sx t4_trace_lock;
+static const char *t4_cloner_name = "tXnex";
+static struct if_clone *t4_cloner;
+
+/* tracer ifnet routines.  mostly no-ops. */
+static void tracer_init(void *);
+static int tracer_ioctl(struct ifnet *, unsigned long, caddr_t);
+static int tracer_transmit(struct ifnet *, struct mbuf *);
+static void tracer_qflush(struct ifnet *);
+static int tracer_media_change(struct ifnet *);
+static void tracer_media_status(struct ifnet *, struct ifmediareq *);
+
+/* match name (request/response) */
+struct match_rr {
+	const char *name;
+	int lock;	/* set to 1 to returned sc locked. */
+	struct adapter *sc;
+	int rc;
+};
+
+static void
+match_name(struct adapter *sc, void *arg)
+{
+	struct match_rr *mrr = arg;
+
+	if (strcmp(device_get_nameunit(sc->dev), mrr->name) != 0)
+		return;
+
+	KASSERT(mrr->sc == NULL, ("%s: multiple matches (%p, %p) for %s",
+	    __func__, mrr->sc, sc, mrr->name));
+
+	mrr->sc = sc;
+	if (mrr->lock)
+		mrr->rc = begin_synchronized_op(mrr->sc, NULL, 0, "t4clon");
+	else
+		mrr->rc = 0;
+}
+
+static int
+t4_cloner_match(struct if_clone *ifc, const char *name)
+{
+
+	if (strncmp(name, "t4nex", 5) != 0 &&
+	    strncmp(name, "t5nex", 5) != 0 &&
+	    strncmp(name, "t6nex", 5) != 0)
+		return (0);
+	if (name[5] < '0' || name[5] > '9')
+		return (0);
+	return (1);
+}
+
+static int
+t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
+{
+	struct match_rr mrr;
+	struct adapter *sc;
+	struct ifnet *ifp;
+	int rc, unit;
+	const uint8_t lla[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+	mrr.name = name;
+	mrr.lock = 1;
+	mrr.sc = NULL;
+	mrr.rc = ENOENT;
+	t4_iterate(match_name, &mrr);
+
+	if (mrr.rc != 0)
+		return (mrr.rc);
+	sc = mrr.sc;
+
+	KASSERT(sc != NULL, ("%s: name (%s) matched but softc is NULL",
+	    __func__, name));
+	ASSERT_SYNCHRONIZED_OP(sc);
+
+	sx_xlock(&t4_trace_lock);
+
+	if (sc->ifp != NULL) {
+		rc = EEXIST;
+		goto done;
+	}
+	if (sc->traceq < 0) {
+		rc = EAGAIN;
+		goto done;
+	}
+
+
+	unit = -1;
+	rc = ifc_alloc_unit(ifc, &unit);
+	if (rc != 0)
+		goto done;
+
+	ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		ifc_free_unit(ifc, unit);
+		rc = ENOMEM;
+		goto done;
+	}
+
+	/* Note that if_xname is not <if_dname><if_dunit>. */
+	strlcpy(ifp->if_xname, name, sizeof(ifp->if_xname));
+	ifp->if_dname = t4_cloner_name;
+	ifp->if_dunit = unit;
+	ifp->if_init = tracer_init;
+	ifp->if_flags = IFF_SIMPLEX | IFF_DRV_RUNNING;
+	ifp->if_ioctl = tracer_ioctl;
+	ifp->if_transmit = tracer_transmit;
+	ifp->if_qflush = tracer_qflush;
+	ifp->if_capabilities = IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
+	ifmedia_init(&sc->media, IFM_IMASK, tracer_media_change,
+	    tracer_media_status);
+	ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE, 0, NULL);
+	ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | IFM_NONE);
+	ether_ifattach(ifp, lla);
+
+	mtx_lock(&sc->ifp_lock);
+	ifp->if_softc = sc;
+	sc->ifp = ifp;
+	mtx_unlock(&sc->ifp_lock);
+done:
+	sx_xunlock(&t4_trace_lock);
+	end_synchronized_op(sc, 0);
+	return (rc);
+}
+
+static int
+t4_cloner_destroy(struct if_clone *ifc, struct ifnet *ifp)
+{
+	struct adapter *sc;
+	int unit = ifp->if_dunit;
+
+	sx_xlock(&t4_trace_lock);
+	sc = ifp->if_softc;
+	if (sc != NULL) {
+		mtx_lock(&sc->ifp_lock);
+		sc->ifp = NULL;
+		ifp->if_softc = NULL;
+		mtx_unlock(&sc->ifp_lock);
+		ifmedia_removeall(&sc->media);
+	}
+	ether_ifdetach(ifp);
+	if_free(ifp);
+	ifc_free_unit(ifc, unit);
+	sx_xunlock(&t4_trace_lock);
+
+	return (0);
+}
+
+void
+t4_tracer_modload()
+{
+
+	sx_init(&t4_trace_lock, "T4/T5 tracer lock");
+	t4_cloner = if_clone_advanced(t4_cloner_name, 0, t4_cloner_match,
+	    t4_cloner_create, t4_cloner_destroy);
+}
+
+void
+t4_tracer_modunload()
+{
+
+	if (t4_cloner != NULL) {
+		/*
+		 * The module is being unloaded so the nexus drivers have
+		 * detached.  The tracing interfaces can not outlive the nexus
+		 * (ifp->if_softc is the nexus) and must have been destroyed
+		 * already.  XXX: but if_clone is opaque to us and we can't
+		 * assert LIST_EMPTY(&t4_cloner->ifc_iflist) at this time.
+		 */
+		if_clone_detach(t4_cloner);
+	}
+	sx_destroy(&t4_trace_lock);
+}
+
+void
+t4_tracer_port_detach(struct adapter *sc)
+{
+
+	sx_xlock(&t4_trace_lock);
+	if (sc->ifp != NULL) {
+		mtx_lock(&sc->ifp_lock);
+		sc->ifp->if_softc = NULL;
+		sc->ifp = NULL;
+		mtx_unlock(&sc->ifp_lock);
+	}
+	ifmedia_removeall(&sc->media);
+	sx_xunlock(&t4_trace_lock);
+}
+
+int
+t4_get_tracer(struct adapter *sc, struct t4_tracer *t)
+{
+	int rc, i, enabled;
+	struct trace_params tp;
+
+	if (t->idx >= NTRACE) {
+		t->idx = 0xff;
+		t->enabled = 0;
+		t->valid = 0;
+		return (0);
+	}
+
+	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4gett");
+	if (rc)
+		return (rc);
+
+	for (i = t->idx; i < NTRACE; i++) {
+		if (isset(&sc->tracer_valid, t->idx)) {
+			t4_get_trace_filter(sc, &tp, i, &enabled);
+			t->idx = i;
+			t->enabled = enabled;
+			t->valid = 1;
+			memcpy(&t->tp.data[0], &tp.data[0], sizeof(t->tp.data));
+			memcpy(&t->tp.mask[0], &tp.mask[0], sizeof(t->tp.mask));
+			t->tp.snap_len = tp.snap_len;
+			t->tp.min_len = tp.min_len;
+			t->tp.skip_ofst = tp.skip_ofst;
+			t->tp.skip_len = tp.skip_len;
+			t->tp.invert = tp.invert;
+
+			/* convert channel to port iff 0 <= port < 8. */
+			if (tp.port < 4)
+				t->tp.port = sc->chan_map[tp.port];
+			else if (tp.port < 8)
+				t->tp.port = sc->chan_map[tp.port - 4] + 4;
+			else
+				t->tp.port = tp.port;
+
+			goto done;
+		}
+	}
+
+	t->idx = 0xff;
+	t->enabled = 0;
+	t->valid = 0;
+done:
+	end_synchronized_op(sc, LOCK_HELD);
+
+	return (rc);
+}
+
+int
+t4_set_tracer(struct adapter *sc, struct t4_tracer *t)
+{
+	int rc;
+	struct trace_params tp, *tpp;
+
+	if (t->idx >= NTRACE)
+		return (EINVAL);
+
+	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4sett");
+	if (rc)
+		return (rc);
+
+	/*
+	 * If no tracing filter is specified this time then check if the filter
+	 * at the index is valid anyway because it was set previously.  If so
+	 * then this is a legitimate enable/disable operation.
+	 */
+	if (t->valid == 0) {
+		if (isset(&sc->tracer_valid, t->idx))
+			tpp = NULL;
+		else
+			rc = EINVAL;
+		goto done;
+	}
+
+	if (t->tp.port > 19 || t->tp.snap_len > 9600 ||
+	    t->tp.min_len > M_TFMINPKTSIZE || t->tp.skip_len > M_TFLENGTH ||
+	    t->tp.skip_ofst > M_TFOFFSET) {
+		rc = EINVAL;
+		goto done;
+	}
+
+	memcpy(&tp.data[0], &t->tp.data[0], sizeof(tp.data));
+	memcpy(&tp.mask[0], &t->tp.mask[0], sizeof(tp.mask));
+	tp.snap_len = t->tp.snap_len;
+	tp.min_len = t->tp.min_len;
+	tp.skip_ofst = t->tp.skip_ofst;
+	tp.skip_len = t->tp.skip_len;
+	tp.invert = !!t->tp.invert;
+
+	/* convert port to channel iff 0 <= port < 8. */
+	if (t->tp.port < 4) {
+		if (sc->port[t->tp.port] == NULL) {
+			rc = EINVAL;
+			goto done;
+		}
+		tp.port = sc->port[t->tp.port]->tx_chan;
+	} else if (t->tp.port < 8) {
+		if (sc->port[t->tp.port - 4] == NULL) {
+			rc = EINVAL;
+			goto done;
+		}
+		tp.port = sc->port[t->tp.port - 4]->tx_chan + 4;
+	}
+	tpp = &tp;
+done:
+	if (rc == 0) {
+		rc = -t4_set_trace_filter(sc, tpp, t->idx, t->enabled);
+		if (rc == 0) {
+			if (t->enabled) {
+				setbit(&sc->tracer_valid, t->idx);
+				if (sc->tracer_enabled == 0) {
+					t4_set_reg_field(sc, A_MPS_TRC_CFG,
+					    F_TRCEN, F_TRCEN);
+				}
+				setbit(&sc->tracer_enabled, t->idx);
+			} else {
+				clrbit(&sc->tracer_enabled, t->idx);
+				if (sc->tracer_enabled == 0) {
+					t4_set_reg_field(sc, A_MPS_TRC_CFG,
+					    F_TRCEN, 0);
+				}
+			}
+		}
+	}
+	end_synchronized_op(sc, LOCK_HELD);
+
+	return (rc);
+}
+
+int
+t4_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+	struct adapter *sc = iq->adapter;
+	struct ifnet *ifp;
+
+	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
+	    rss->opcode));
+
+	mtx_lock(&sc->ifp_lock);
+	ifp = sc->ifp;
+	if (sc->ifp) {
+		m_adj(m, sizeof(struct cpl_trace_pkt));
+		m->m_pkthdr.rcvif = ifp;
+		ETHER_BPF_MTAP(ifp, m);
+	}
+	mtx_unlock(&sc->ifp_lock);
+	m_freem(m);
+
+	return (0);
+}
+
+int
+t5_trace_pkt(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+	struct adapter *sc = iq->adapter;
+	struct ifnet *ifp;
+
+	KASSERT(m != NULL, ("%s: no payload with opcode %02x", __func__,
+	    rss->opcode));
+
+	mtx_lock(&sc->ifp_lock);
+	ifp = sc->ifp;
+	if (ifp != NULL) {
+		m_adj(m, sizeof(struct cpl_t5_trace_pkt));
+		m->m_pkthdr.rcvif = ifp;
+		ETHER_BPF_MTAP(ifp, m);
+	}
+	mtx_unlock(&sc->ifp_lock);
+	m_freem(m);
+
+	return (0);
+}
+
+
+static void
+tracer_init(void *arg)
+{
+
+	return;
+}
+
+static int
+tracer_ioctl(struct ifnet *ifp, unsigned long cmd, caddr_t data)
+{
+	int rc = 0;
+	struct adapter *sc;
+	struct ifreq *ifr = (struct ifreq *)data;
+
+	switch (cmd) {
+	case SIOCSIFMTU:
+	case SIOCSIFFLAGS:
+	case SIOCADDMULTI:
+	case SIOCDELMULTI:
+	case SIOCSIFCAP:
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+	case SIOCGIFXMEDIA:
+		sx_xlock(&t4_trace_lock);
+		sc = ifp->if_softc;
+		if (sc == NULL)
+			rc = EIO;
+		else
+			rc = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
+		sx_xunlock(&t4_trace_lock);
+		break;
+	default:
+		rc = ether_ioctl(ifp, cmd, data);
+	}
+
+	return (rc);
+}
+
+static int
+tracer_transmit(struct ifnet *ifp, struct mbuf *m)
+{
+
+	m_freem(m);
+	return (0);
+}
+
+static void
+tracer_qflush(struct ifnet *ifp)
+{
+
+	return;
+}
+
+static int
+tracer_media_change(struct ifnet *ifp)
+{
+
+	return (EOPNOTSUPP);
+}
+
+static void
+tracer_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+
+	ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+
+	return;
+}


Property changes on: trunk/sys/dev/cxgbe/t4_tracer.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/cxgbe/t4_vf.c
===================================================================
--- trunk/sys/dev/cxgbe/t4_vf.c	                        (rev 0)
+++ trunk/sys/dev/cxgbe/t4_vf.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -0,0 +1,1016 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ * All rights reserved.
+ * Written by: John Baldwin <jhb at FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_vf.c 318851 2017-05-25 01:43:28Z np $");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/counter.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/priv.h>
+#include <dev/pci/pcivar.h>
+#if defined(__i386__) || defined(__amd64__)
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#endif
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "t4_ioctl.h"
+#include "t4_mp_ring.h"
+
+/*
+ * Some notes:
+ *
+ * The Virtual Interfaces are connected to an internal switch on the chip
+ * which allows VIs attached to the same port to talk to each other even when
+ * the port link is down.  As a result, we might want to always report a
+ * VF's link as being "up".
+ *
+ * XXX: Add a TUNABLE and possible per-device sysctl for this?
+ */
+
+struct intrs_and_queues {
+	uint16_t intr_type;	/* MSI, or MSI-X */
+	uint16_t nirq;		/* Total # of vectors */
+	uint16_t intr_flags_10g;/* Interrupt flags for each 10G port */
+	uint16_t intr_flags_1g;	/* Interrupt flags for each 1G port */
+	uint16_t ntxq10g;	/* # of NIC txq's for each 10G port */
+	uint16_t nrxq10g;	/* # of NIC rxq's for each 10G port */
+	uint16_t ntxq1g;	/* # of NIC txq's for each 1G port */
+	uint16_t nrxq1g;	/* # of NIC rxq's for each 1G port */
+};
+
+struct {
+	uint16_t device;
+	char *desc;
+} t4vf_pciids[] = {
+	{0x4800, "Chelsio T440-dbg VF"},
+	{0x4801, "Chelsio T420-CR VF"},
+	{0x4802, "Chelsio T422-CR VF"},
+	{0x4803, "Chelsio T440-CR VF"},
+	{0x4804, "Chelsio T420-BCH VF"},
+	{0x4805, "Chelsio T440-BCH VF"},
+	{0x4806, "Chelsio T440-CH VF"},
+	{0x4807, "Chelsio T420-SO VF"},
+	{0x4808, "Chelsio T420-CX VF"},
+	{0x4809, "Chelsio T420-BT VF"},
+	{0x480a, "Chelsio T404-BT VF"},
+	{0x480e, "Chelsio T440-LP-CR VF"},
+}, t5vf_pciids[] = {
+	{0x5800, "Chelsio T580-dbg VF"},
+	{0x5801,  "Chelsio T520-CR VF"},	/* 2 x 10G */
+	{0x5802,  "Chelsio T522-CR VF"},	/* 2 x 10G, 2 X 1G */
+	{0x5803,  "Chelsio T540-CR VF"},	/* 4 x 10G */
+	{0x5807,  "Chelsio T520-SO VF"},	/* 2 x 10G, nomem */
+	{0x5809,  "Chelsio T520-BT VF"},	/* 2 x 10GBaseT */
+	{0x580a,  "Chelsio T504-BT VF"},	/* 4 x 1G */
+	{0x580d,  "Chelsio T580-CR VF"},	/* 2 x 40G */
+	{0x580e,  "Chelsio T540-LP-CR VF"},	/* 4 x 10G */
+	{0x5810,  "Chelsio T580-LP-CR VF"},	/* 2 x 40G */
+	{0x5811,  "Chelsio T520-LL-CR VF"},	/* 2 x 10G */
+	{0x5812,  "Chelsio T560-CR VF"},	/* 1 x 40G, 2 x 10G */
+	{0x5814,  "Chelsio T580-LP-SO-CR VF"},	/* 2 x 40G, nomem */
+	{0x5815,  "Chelsio T502-BT VF"},	/* 2 x 1G */
+#ifdef notyet
+	{0x5804,  "Chelsio T520-BCH VF"},
+	{0x5805,  "Chelsio T540-BCH VF"},
+	{0x5806,  "Chelsio T540-CH VF"},
+	{0x5808,  "Chelsio T520-CX VF"},
+	{0x580b,  "Chelsio B520-SR VF"},
+	{0x580c,  "Chelsio B504-BT VF"},
+	{0x580f,  "Chelsio Amsterdam VF"},
+	{0x5813,  "Chelsio T580-CHR VF"},
+#endif
+}, t6vf_pciids[] = {
+	{0x6800, "Chelsio T6-DBG-25 VF"},	/* 2 x 10/25G, debug */
+	{0x6801, "Chelsio T6225-CR VF"},	/* 2 x 10/25G */
+	{0x6802, "Chelsio T6225-SO-CR VF"},	/* 2 x 10/25G, nomem */
+	{0x6803, "Chelsio T6425-CR VF"},	/* 4 x 10/25G */
+	{0x6804, "Chelsio T6425-SO-CR VF"},	/* 4 x 10/25G, nomem */
+	{0x6805, "Chelsio T6225-OCP-SO VF"},	/* 2 x 10/25G, nomem */
+	{0x6806, "Chelsio T62100-OCP-SO VF"},	/* 2 x 40/50/100G, nomem */
+	{0x6807, "Chelsio T62100-LP-CR VF"},	/* 2 x 40/50/100G */
+	{0x6808, "Chelsio T62100-SO-CR VF"},	/* 2 x 40/50/100G, nomem */
+	{0x6809, "Chelsio T6210-BT VF"},	/* 2 x 10GBASE-T */
+	{0x680d, "Chelsio T62100-CR VF"},	/* 2 x 40/50/100G */
+	{0x6810, "Chelsio T6-DBG-100 VF"},	/* 2 x 40/50/100G, debug */
+	{0x6811, "Chelsio T6225-LL-CR VF"},	/* 2 x 10/25G */
+	{0x6814, "Chelsio T61100-OCP-SO VF"},	/* 1 x 40/50/100G, nomem */
+	{0x6815, "Chelsio T6201-BT VF"},	/* 2 x 1000BASE-T */
+
+	/* Custom */
+	{0x6880, "Chelsio T6225 80 VF"},
+	{0x6881, "Chelsio T62100 81 VF"},
+};
+
+static d_ioctl_t t4vf_ioctl;
+
+static struct cdevsw t4vf_cdevsw = {
+       .d_version = D_VERSION,
+       .d_ioctl = t4vf_ioctl,
+       .d_name = "t4vf",
+};
+
+static int
+t4vf_probe(device_t dev)
+{
+	uint16_t d;
+	size_t i;
+
+	d = pci_get_device(dev);
+	for (i = 0; i < nitems(t4vf_pciids); i++) {
+		if (d == t4vf_pciids[i].device) {
+			device_set_desc(dev, t4vf_pciids[i].desc);
+			return (BUS_PROBE_DEFAULT);
+		}
+	}
+	return (ENXIO);
+}
+
+static int
+t5vf_probe(device_t dev)
+{
+	uint16_t d;
+	size_t i;
+
+	d = pci_get_device(dev);
+	for (i = 0; i < nitems(t5vf_pciids); i++) {
+		if (d == t5vf_pciids[i].device) {
+			device_set_desc(dev, t5vf_pciids[i].desc);
+			return (BUS_PROBE_DEFAULT);
+		}
+	}
+	return (ENXIO);
+}
+
+static int
+t6vf_probe(device_t dev)
+{
+	uint16_t d;
+	size_t i;
+
+	d = pci_get_device(dev);
+	for (i = 0; i < nitems(t6vf_pciids); i++) {
+		if (d == t6vf_pciids[i].device) {
+			device_set_desc(dev, t6vf_pciids[i].desc);
+			return (BUS_PROBE_DEFAULT);
+		}
+	}
+	return (ENXIO);
+}
+
+#define FW_PARAM_DEV(param) \
+	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+#define FW_PARAM_PFVF(param) \
+	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+	 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+
+static int
+get_params__pre_init(struct adapter *sc)
+{
+	int rc;
+	uint32_t param[3], val[3];
+
+	param[0] = FW_PARAM_DEV(FWREV);
+	param[1] = FW_PARAM_DEV(TPREV);
+	param[2] = FW_PARAM_DEV(CCLK);
+	rc = -t4vf_query_params(sc, nitems(param), param, val);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "failed to query parameters (pre_init): %d.\n", rc);
+		return (rc);
+	}
+
+	sc->params.fw_vers = val[0];
+	sc->params.tp_vers = val[1];
+	sc->params.vpd.cclk = val[2];
+
+	snprintf(sc->fw_version, sizeof(sc->fw_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
+
+	snprintf(sc->tp_version, sizeof(sc->tp_version), "%u.%u.%u.%u",
+	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
+	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
+
+	return (0);
+}
+
+static int
+get_params__post_init(struct adapter *sc)
+{
+	int rc;
+
+	rc = -t4vf_get_sge_params(sc);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "unable to retrieve adapter SGE parameters: %d\n", rc);
+		return (rc);
+	}
+
+	rc = -t4vf_get_rss_glb_config(sc);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "unable to retrieve adapter RSS parameters: %d\n", rc);
+		return (rc);
+	}
+	if (sc->params.rss.mode != FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		device_printf(sc->dev,
+		    "unable to operate with global RSS mode %d\n",
+		    sc->params.rss.mode);
+		return (EINVAL);
+	}
+
+	rc = t4_read_chip_settings(sc);
+	if (rc != 0)
+		return (rc);
+
+	/*
+	 * Grab our Virtual Interface resource allocation, extract the
+	 * features that we're interested in and do a bit of sanity testing on
+	 * what we discover.
+	 */
+	rc = -t4vf_get_vfres(sc);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "unable to get virtual interface resources: %d\n", rc);
+		return (rc);
+	}
+
+	/*
+	 * Check for various parameter sanity issues.
+	 */
+	if (sc->params.vfres.pmask == 0) {
+		device_printf(sc->dev, "no port access configured/usable!\n");
+		return (EINVAL);
+	}
+	if (sc->params.vfres.nvi == 0) {
+		device_printf(sc->dev,
+		    "no virtual interfaces configured/usable!\n");
+		return (EINVAL);
+	}
+	sc->params.portvec = sc->params.vfres.pmask;
+
+	return (0);
+}
+
+static int
+set_params__post_init(struct adapter *sc)
+{
+	uint32_t param, val;
+
+	/* ask for encapsulated CPLs */
+	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+	val = 1;
+	(void)t4vf_set_params(sc, 1, &param, &val);
+
+	return (0);
+}
+
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+static int
+cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
+    struct intrs_and_queues *iaq)
+{
+	struct vf_resources *vfres;
+	int nrxq10g, nrxq1g, nrxq;
+	int ntxq10g, ntxq1g, ntxq;
+	int itype, iq_avail, navail, rc;
+
+	/*
+	 * Figure out the layout of queues across our VIs and ensure
+	 * we can allocate enough interrupts for our layout.
+	 */
+	vfres = &sc->params.vfres;
+	bzero(iaq, sizeof(*iaq));
+
+	for (itype = INTR_MSIX; itype != 0; itype >>= 1) {
+		if (itype == INTR_INTX)
+			continue;
+
+		if (itype == INTR_MSIX)
+			navail = pci_msix_count(sc->dev);
+		else
+			navail = pci_msi_count(sc->dev);
+
+		if (navail == 0)
+			continue;
+
+		iaq->intr_type = itype;
+		iaq->intr_flags_10g = 0;
+		iaq->intr_flags_1g = 0;
+
+		/*
+		 * XXX: The Linux driver reserves an Ingress Queue for
+		 * forwarded interrupts when using MSI (but not MSI-X).
+		 * It seems it just always asks for 2 interrupts and
+		 * forwards all rxqs to the forwarded interrupt.
+		 *
+		 * We must reserve one IRQ for the for the firmware
+		 * event queue.
+		 *
+		 * Every rxq requires an ingress queue with a free
+		 * list and interrupts and an egress queue.  Every txq
+		 * requires an ETH egress queue.
+		 */
+		iaq->nirq = T4VF_EXTRA_INTR;
+
+		/*
+		 * First, determine how many queues we can allocate.
+		 * Start by finding the upper bound on rxqs from the
+		 * limit on ingress queues.
+		 */
+		iq_avail = vfres->niqflint - iaq->nirq;
+		if (iq_avail < n10g + n1g) {
+			device_printf(sc->dev,
+			    "Not enough ingress queues (%d) for %d ports\n",
+			    vfres->niqflint, n10g + n1g);
+			return (ENXIO);
+		}
+
+		/*
+		 * Try to honor the cap on interrupts.  If there aren't
+		 * enough interrupts for at least one interrupt per
+		 * port, then don't bother, we will just forward all
+		 * interrupts to one interrupt in that case.
+		 */
+		if (iaq->nirq + n10g + n1g <= navail) {
+			if (iq_avail > navail - iaq->nirq)
+				iq_avail = navail - iaq->nirq;
+		}
+
+		nrxq10g = t4_nrxq10g;
+		nrxq1g = t4_nrxq1g;
+		nrxq = n10g * nrxq10g + n1g * nrxq1g;
+		if (nrxq > iq_avail && nrxq1g > 1) {
+			/* Too many ingress queues.  Try just 1 for 1G. */
+			nrxq1g = 1;
+			nrxq = n10g * nrxq10g + n1g * nrxq1g;
+		}
+		if (nrxq > iq_avail) {
+			/*
+			 * Still too many ingress queues.  Use what we
+			 * can for each 10G port.
+			 */
+			nrxq10g = (iq_avail - n1g) / n10g;
+			nrxq = n10g * nrxq10g + n1g * nrxq1g;
+		}
+		KASSERT(nrxq <= iq_avail, ("too many ingress queues"));
+
+		/*
+		 * Next, determine the upper bound on txqs from the limit
+		 * on ETH queues.
+		 */
+		if (vfres->nethctrl < n10g + n1g) {
+			device_printf(sc->dev,
+			    "Not enough ETH queues (%d) for %d ports\n",
+			    vfres->nethctrl, n10g + n1g);
+			return (ENXIO);
+		}
+
+		ntxq10g = t4_ntxq10g;
+		ntxq1g = t4_ntxq1g;
+		ntxq = n10g * ntxq10g + n1g * ntxq1g;
+		if (ntxq > vfres->nethctrl) {
+			/* Too many ETH queues.  Try just 1 for 1G. */
+			ntxq1g = 1;
+			ntxq = n10g * ntxq10g + n1g * ntxq1g;
+		}
+		if (ntxq > vfres->nethctrl) {
+			/*
+			 * Still too many ETH queues.  Use what we
+			 * can for each 10G port.
+			 */
+			ntxq10g = (vfres->nethctrl - n1g) / n10g;
+			ntxq = n10g * ntxq10g + n1g * ntxq1g;
+		}
+		KASSERT(ntxq <= vfres->nethctrl, ("too many ETH queues"));
+
+		/*
+		 * Finally, ensure we have enough egress queues.
+		 */
+		if (vfres->neq < (n10g + n1g) * 2) {
+			device_printf(sc->dev,
+			    "Not enough egress queues (%d) for %d ports\n",
+			    vfres->neq, n10g + n1g);
+			return (ENXIO);
+		}
+		if (nrxq + ntxq > vfres->neq) {
+			/* Just punt and use 1 for everything. */
+			nrxq1g = ntxq1g = nrxq10g = ntxq10g = 1;
+			nrxq = n10g * nrxq10g + n1g * nrxq1g;
+			ntxq = n10g * ntxq10g + n1g * ntxq1g;
+		}
+		KASSERT(nrxq <= iq_avail, ("too many ingress queues"));
+		KASSERT(ntxq <= vfres->nethctrl, ("too many ETH queues"));
+		KASSERT(nrxq + ntxq <= vfres->neq, ("too many egress queues"));
+
+		/*
+		 * Do we have enough interrupts?  For MSI the interrupts
+		 * have to be a power of 2 as well.
+		 */
+		iaq->nirq += nrxq;
+		iaq->ntxq10g = ntxq10g;
+		iaq->ntxq1g = ntxq1g;
+		iaq->nrxq10g = nrxq10g;
+		iaq->nrxq1g = nrxq1g;
+		if (iaq->nirq <= navail &&
+		    (itype != INTR_MSI || powerof2(iaq->nirq))) {
+			navail = iaq->nirq;
+			if (itype == INTR_MSIX)
+				rc = pci_alloc_msix(sc->dev, &navail);
+			else
+				rc = pci_alloc_msi(sc->dev, &navail);
+			if (rc != 0) {
+				device_printf(sc->dev,
+		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
+				    itype, rc, iaq->nirq, navail);
+				return (rc);
+			}
+			if (navail == iaq->nirq) {
+				iaq->intr_flags_10g = INTR_RXQ;
+				iaq->intr_flags_1g = INTR_RXQ;
+				return (0);
+			}
+			pci_release_msi(sc->dev);
+		}
+
+		/* Fall back to a single interrupt. */
+		iaq->nirq = 1;
+		navail = iaq->nirq;
+		if (itype == INTR_MSIX)
+			rc = pci_alloc_msix(sc->dev, &navail);
+		else
+			rc = pci_alloc_msi(sc->dev, &navail);
+		if (rc != 0)
+			device_printf(sc->dev,
+		    "failed to allocate vectors:%d, type=%d, req=%d, rcvd=%d\n",
+			    itype, rc, iaq->nirq, navail);
+		iaq->intr_flags_10g = 0;
+		iaq->intr_flags_1g = 0;
+		return (rc);
+	}
+
+	device_printf(sc->dev,
+	    "failed to find a usable interrupt type.  "
+	    "allowed=%d, msi-x=%d, msi=%d, intx=1", t4_intr_types,
+	    pci_msix_count(sc->dev), pci_msi_count(sc->dev));
+
+	return (ENXIO);
+}
+
+static int
+t4vf_attach(device_t dev)
+{
+	struct adapter *sc;
+	int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
+	struct make_dev_args mda;
+	struct intrs_and_queues iaq;
+	struct sge *s;
+
+	sc = device_get_softc(dev);
+	sc->dev = dev;
+	pci_enable_busmaster(dev);
+	pci_set_max_read_req(dev, 4096);
+	sc->params.pci.mps = pci_get_max_payload(dev);
+
+	sc->flags |= IS_VF;
+
+	sc->sge_gts_reg = VF_SGE_REG(A_SGE_VF_GTS);
+	sc->sge_kdoorbell_reg = VF_SGE_REG(A_SGE_VF_KDOORBELL);
+	snprintf(sc->lockname, sizeof(sc->lockname), "%s",
+	    device_get_nameunit(dev));
+	mtx_init(&sc->sc_lock, sc->lockname, 0, MTX_DEF);
+	t4_add_adapter(sc);
+
+	mtx_init(&sc->sfl_lock, "starving freelists", 0, MTX_DEF);
+	TAILQ_INIT(&sc->sfl);
+	callout_init_mtx(&sc->sfl_callout, &sc->sfl_lock, 0);
+
+	mtx_init(&sc->reg_lock, "indirect register access", 0, MTX_DEF);
+
+	rc = t4_map_bars_0_and_4(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	rc = -t4vf_prep_adapter(sc);
+	if (rc != 0)
+		goto done;
+
+	t4_init_devnames(sc);
+	if (sc->names == NULL) {
+		rc = ENOTSUP;
+		goto done; /* error message displayed already */
+	}
+
+	/*
+	 * Leave the 'pf' and 'mbox' values as zero.  This ensures
+	 * that various firmware messages do not set the fields which
+	 * is the correct thing to do for a VF.
+	 */
+
+	memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+
+	make_dev_args_init(&mda);
+	mda.mda_devsw = &t4vf_cdevsw;
+	mda.mda_uid = UID_ROOT;
+	mda.mda_gid = GID_WHEEL;
+	mda.mda_mode = 0600;
+	mda.mda_si_drv1 = sc;
+	rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
+	if (rc != 0)
+		device_printf(dev, "failed to create nexus char device: %d.\n",
+		    rc);
+
+#if defined(__i386__)
+	if ((cpu_feature & CPUID_CX8) == 0) {
+		device_printf(dev, "64 bit atomics not available.\n");
+		rc = ENOTSUP;
+		goto done;
+	}
+#endif
+
+	/*
+	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+	 * 2.6.31 and later we can't call pci_reset_function() in order to
+	 * issue an FLR because of a self- deadlock on the device semaphore.
+	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+	 * cases where they're needed -- for instance, some versions of KVM
+	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
+	 * use the firmware based reset in order to reset any per function
+	 * state.
+	 */
+	rc = -t4vf_fw_reset(sc);
+	if (rc != 0) {
+		device_printf(dev, "FW reset failed: %d\n", rc);
+		goto done;
+	}
+	sc->flags |= FW_OK;
+
+	/*
+	 * Grab basic operational parameters.  These will predominantly have
+	 * been set up by the Physical Function Driver or will be hard coded
+	 * into the adapter.  We just have to live with them ...  Note that
+	 * we _must_ get our VPD parameters before our SGE parameters because
+	 * we need to know the adapter's core clock from the VPD in order to
+	 * properly decode the SGE Timer Values.
+	 */
+	rc = get_params__pre_init(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+	rc = get_params__post_init(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	rc = set_params__post_init(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	rc = t4_map_bar_2(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	rc = t4_create_dma_tag(sc);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	/*
+	 * The number of "ports" which we support is equal to the number of
+	 * Virtual Interfaces with which we've been provisioned.
+	 */
+	sc->params.nports = imin(sc->params.vfres.nvi, MAX_NPORTS);
+
+	/*
+	 * We may have been provisioned with more VIs than the number of
+	 * ports we're allowed to access (our Port Access Rights Mask).
+	 * Just use a single VI for each port.
+	 */
+	sc->params.nports = imin(sc->params.nports,
+	    bitcount32(sc->params.vfres.pmask));
+
+#ifdef notyet
+	/*
+	 * XXX: The Linux VF driver will lower nports if it thinks there
+	 * are too few resources in vfres (niqflint, nethctrl, neq).
+	 */
+#endif
+
+	/*
+	 * First pass over all the ports - allocate VIs and initialize some
+	 * basic parameters like mac address, port type, etc.  We also figure
+	 * out whether a port is 10G or 1G and use that information when
+	 * calculating how many interrupts to attempt to allocate.
+	 */
+	n10g = n1g = 0;
+	for_each_port(sc, i) {
+		struct port_info *pi;
+
+		pi = malloc(sizeof(*pi), M_CXGBE, M_ZERO | M_WAITOK);
+		sc->port[i] = pi;
+
+		/* These must be set before t4_port_init */
+		pi->adapter = sc;
+		pi->port_id = i;
+		pi->nvi = 1;
+		pi->vi = malloc(sizeof(struct vi_info) * pi->nvi, M_CXGBE,
+		    M_ZERO | M_WAITOK);
+
+		/*
+		 * Allocate the "main" VI and initialize parameters
+		 * like mac addr.
+		 */
+		rc = -t4_port_init(sc, sc->mbox, sc->pf, 0, i);
+		if (rc != 0) {
+			device_printf(dev, "unable to initialize port %d: %d\n",
+			    i, rc);
+			free(pi->vi, M_CXGBE);
+			free(pi, M_CXGBE);
+			sc->port[i] = NULL;
+			goto done;
+		}
+
+		/* No t4_link_start. */
+
+		snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
+		    device_get_nameunit(dev), i);
+		mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
+		sc->chan_map[pi->tx_chan] = i;
+
+		if (port_top_speed(pi) >= 10) {
+			n10g++;
+		} else {
+			n1g++;
+		}
+
+		pi->dev = device_add_child(dev, sc->names->vf_ifnet_name, -1);
+		if (pi->dev == NULL) {
+			device_printf(dev,
+			    "failed to add device for port %d.\n", i);
+			rc = ENXIO;
+			goto done;
+		}
+		pi->vi[0].dev = pi->dev;
+		device_set_softc(pi->dev, pi);
+	}
+
+	/*
+	 * Interrupt type, # of interrupts, # of rx/tx queues, etc.
+	 */
+	rc = cfg_itype_and_nqueues(sc, n10g, n1g, &iaq);
+	if (rc != 0)
+		goto done; /* error message displayed already */
+
+	sc->intr_type = iaq.intr_type;
+	sc->intr_count = iaq.nirq;
+
+	s = &sc->sge;
+	s->nrxq = n10g * iaq.nrxq10g + n1g * iaq.nrxq1g;
+	s->ntxq = n10g * iaq.ntxq10g + n1g * iaq.ntxq1g;
+	s->neq = s->ntxq + s->nrxq;	/* the free list in an rxq is an eq */
+	s->neq += sc->params.nports + 1;/* ctrl queues: 1 per port + 1 mgmt */
+	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
+
+	s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
+	    M_ZERO | M_WAITOK);
+
+	/*
+	 * Second pass over the ports.  This time we know the number of rx and
+	 * tx queues that each port should get.
+	 */
+	rqidx = tqidx = 0;
+	for_each_port(sc, i) {
+		struct port_info *pi = sc->port[i];
+		struct vi_info *vi;
+
+		if (pi == NULL)
+			continue;
+
+		for_each_vi(pi, j, vi) {
+			vi->pi = pi;
+			vi->qsize_rxq = t4_qsize_rxq;
+			vi->qsize_txq = t4_qsize_txq;
+
+			vi->first_rxq = rqidx;
+			vi->first_txq = tqidx;
+			if (port_top_speed(pi) >= 10) {
+				vi->tmr_idx = t4_tmr_idx_10g;
+				vi->pktc_idx = t4_pktc_idx_10g;
+				vi->flags |= iaq.intr_flags_10g & INTR_RXQ;
+				vi->nrxq = j == 0 ? iaq.nrxq10g : 1;
+				vi->ntxq = j == 0 ? iaq.ntxq10g : 1;
+			} else {
+				vi->tmr_idx = t4_tmr_idx_1g;
+				vi->pktc_idx = t4_pktc_idx_1g;
+				vi->flags |= iaq.intr_flags_1g & INTR_RXQ;
+				vi->nrxq = j == 0 ? iaq.nrxq1g : 1;
+				vi->ntxq = j == 0 ? iaq.ntxq1g : 1;
+			}
+			rqidx += vi->nrxq;
+			tqidx += vi->ntxq;
+
+			vi->rsrv_noflowq = 0;
+		}
+	}
+
+	rc = t4_setup_intr_handlers(sc);
+	if (rc != 0) {
+		device_printf(dev,
+		    "failed to setup interrupt handlers: %d\n", rc);
+		goto done;
+	}
+
+	rc = bus_generic_attach(dev);
+	if (rc != 0) {
+		device_printf(dev,
+		    "failed to attach all child ports: %d\n", rc);
+		goto done;
+	}
+
+	device_printf(dev,
+	    "%d ports, %d %s interrupt%s, %d eq, %d iq\n",
+	    sc->params.nports, sc->intr_count, sc->intr_type == INTR_MSIX ?
+	    "MSI-X" : "MSI", sc->intr_count > 1 ? "s" : "", sc->sge.neq,
+	    sc->sge.niq);
+
+done:
+	if (rc != 0)
+		t4_detach_common(dev);
+	else
+		t4_sysctls(sc);
+
+	return (rc);
+}
+
+static void
+get_regs(struct adapter *sc, struct t4_regdump *regs, uint8_t *buf)
+{
+
+	/* 0x3f is used as the revision for VFs. */
+	regs->version = chip_id(sc) | (0x3f << 10);
+	t4_get_regs(sc, buf, regs->len);
+}
+
+static void
+t4_clr_vi_stats(struct adapter *sc)
+{
+	int reg;
+
+	for (reg = A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L;
+	     reg <= A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H; reg += 4)
+		t4_write_reg(sc, VF_MPS_REG(reg), 0);
+}
+
+static int
+t4vf_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
+    struct thread *td)
+{
+	int rc;
+	struct adapter *sc = dev->si_drv1;
+
+	rc = priv_check(td, PRIV_DRIVER);
+	if (rc != 0)
+		return (rc);
+
+	switch (cmd) {
+	case CHELSIO_T4_GETREG: {
+		struct t4_reg *edata = (struct t4_reg *)data;
+
+		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+			return (EFAULT);
+
+		if (edata->size == 4)
+			edata->val = t4_read_reg(sc, edata->addr);
+		else if (edata->size == 8)
+			edata->val = t4_read_reg64(sc, edata->addr);
+		else
+			return (EINVAL);
+
+		break;
+	}
+	case CHELSIO_T4_SETREG: {
+		struct t4_reg *edata = (struct t4_reg *)data;
+
+		if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len)
+			return (EFAULT);
+
+		if (edata->size == 4) {
+			if (edata->val & 0xffffffff00000000)
+				return (EINVAL);
+			t4_write_reg(sc, edata->addr, (uint32_t) edata->val);
+		} else if (edata->size == 8)
+			t4_write_reg64(sc, edata->addr, edata->val);
+		else
+			return (EINVAL);
+		break;
+	}
+	case CHELSIO_T4_REGDUMP: {
+		struct t4_regdump *regs = (struct t4_regdump *)data;
+		int reglen = t4_get_regs_len(sc);
+		uint8_t *buf;
+
+		if (regs->len < reglen) {
+			regs->len = reglen; /* hint to the caller */
+			return (ENOBUFS);
+		}
+
+		regs->len = reglen;
+		buf = malloc(reglen, M_CXGBE, M_WAITOK | M_ZERO);
+		get_regs(sc, regs, buf);
+		rc = copyout(buf, regs->data, reglen);
+		free(buf, M_CXGBE);
+		break;
+	}
+	case CHELSIO_T4_CLEAR_STATS: {
+		int i, v;
+		u_int port_id = *(uint32_t *)data;
+		struct port_info *pi;
+		struct vi_info *vi;
+
+		if (port_id >= sc->params.nports)
+			return (EINVAL);
+		pi = sc->port[port_id];
+
+		/* MAC stats */
+		pi->tx_parse_error = 0;
+		t4_clr_vi_stats(sc);
+
+		/*
+		 * Since this command accepts a port, clear stats for
+		 * all VIs on this port.
+		 */
+		for_each_vi(pi, v, vi) {
+			if (vi->flags & VI_INIT_DONE) {
+				struct sge_rxq *rxq;
+				struct sge_txq *txq;
+
+				for_each_rxq(vi, i, rxq) {
+#if defined(INET) || defined(INET6)
+					rxq->lro.lro_queued = 0;
+					rxq->lro.lro_flushed = 0;
+#endif
+					rxq->rxcsum = 0;
+					rxq->vlan_extraction = 0;
+				}
+
+				for_each_txq(vi, i, txq) {
+					txq->txcsum = 0;
+					txq->tso_wrs = 0;
+					txq->vlan_insertion = 0;
+					txq->imm_wrs = 0;
+					txq->sgl_wrs = 0;
+					txq->txpkt_wrs = 0;
+					txq->txpkts0_wrs = 0;
+					txq->txpkts1_wrs = 0;
+					txq->txpkts0_pkts = 0;
+					txq->txpkts1_pkts = 0;
+					mp_ring_reset_stats(txq->r);
+				}
+			}
+		}
+		break;
+	}
+	case CHELSIO_T4_SCHED_CLASS:
+		rc = t4_set_sched_class(sc, (struct t4_sched_params *)data);
+		break;
+	case CHELSIO_T4_SCHED_QUEUE:
+		rc = t4_set_sched_queue(sc, (struct t4_sched_queue *)data);
+		break;
+	default:
+		rc = ENOTTY;
+	}
+
+	return (rc);
+}
+
+static device_method_t t4vf_methods[] = {
+	DEVMETHOD(device_probe,		t4vf_probe),
+	DEVMETHOD(device_attach,	t4vf_attach),
+	DEVMETHOD(device_detach,	t4_detach_common),
+
+	DEVMETHOD_END
+};
+
+static driver_t t4vf_driver = {
+	"t4vf",
+	t4vf_methods,
+	sizeof(struct adapter)
+};
+
+static device_method_t t5vf_methods[] = {
+	DEVMETHOD(device_probe,		t5vf_probe),
+	DEVMETHOD(device_attach,	t4vf_attach),
+	DEVMETHOD(device_detach,	t4_detach_common),
+
+	DEVMETHOD_END
+};
+
+static driver_t t5vf_driver = {
+	"t5vf",
+	t5vf_methods,
+	sizeof(struct adapter)
+};
+
+static device_method_t t6vf_methods[] = {
+	DEVMETHOD(device_probe,		t6vf_probe),
+	DEVMETHOD(device_attach,	t4vf_attach),
+	DEVMETHOD(device_detach,	t4_detach_common),
+
+	DEVMETHOD_END
+};
+
+static driver_t t6vf_driver = {
+	"t6vf",
+	t6vf_methods,
+	sizeof(struct adapter)
+};
+
+static driver_t cxgbev_driver = {
+	"cxgbev",
+	cxgbe_methods,
+	sizeof(struct port_info)
+};
+
+static driver_t cxlv_driver = {
+	"cxlv",
+	cxgbe_methods,
+	sizeof(struct port_info)
+};
+
+static driver_t ccv_driver = {
+	"ccv",
+	cxgbe_methods,
+	sizeof(struct port_info)
+};
+
+static devclass_t t4vf_devclass, t5vf_devclass, t6vf_devclass;
+static devclass_t cxgbev_devclass, cxlv_devclass, ccv_devclass;
+
+DRIVER_MODULE(t4vf, pci, t4vf_driver, t4vf_devclass, 0, 0);
+MODULE_VERSION(t4vf, 1);
+MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
+
+DRIVER_MODULE(t5vf, pci, t5vf_driver, t5vf_devclass, 0, 0);
+MODULE_VERSION(t5vf, 1);
+MODULE_DEPEND(t5vf, t5nex, 1, 1, 1);
+
+DRIVER_MODULE(t6vf, pci, t6vf_driver, t6vf_devclass, 0, 0);
+MODULE_VERSION(t6vf, 1);
+MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
+
+DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, cxgbev_devclass, 0, 0);
+MODULE_VERSION(cxgbev, 1);
+
+DRIVER_MODULE(cxlv, t5vf, cxlv_driver, cxlv_devclass, 0, 0);
+MODULE_VERSION(cxlv, 1);
+
+DRIVER_MODULE(ccv, t6vf, ccv_driver, ccv_devclass, 0, 0);
+MODULE_VERSION(ccv, 1);


Property changes on: trunk/sys/dev/cxgbe/t4_vf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/cxgbe/tom/t4_connect.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_connect.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_connect.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_connect.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_connect.c 318804 2017-05-24 20:01:12Z np $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -57,6 +58,7 @@
 #include "common/common.h"
 #include "common/t4_msg.h"
 #include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
 #include "tom/t4_tom_l2t.h"
 #include "tom/t4_tom.h"
 
@@ -106,7 +108,7 @@
 }
 
 /*
- * Active open failed.
+ * Active open succeeded.
  */
 static int
 do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
@@ -114,8 +116,8 @@
 {
 	struct adapter *sc = iq->adapter;
 	const struct cpl_act_establish *cpl = (const void *)(rss + 1);
-	unsigned int tid = GET_TID(cpl);
-	unsigned int atid = G_TID_TID(ntohl(cpl->tos_atid));
+	u_int tid = GET_TID(cpl);
+	u_int atid = G_TID_TID(ntohl(cpl->tos_atid));
 	struct toepcb *toep = lookup_atid(sc, atid);
 	struct inpcb *inp = toep->inp;
 
@@ -125,9 +127,10 @@
 	CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid);
 	free_atid(sc, atid);
 
+	CURVNET_SET(toep->vnet);
 	INP_WLOCK(inp);
 	toep->tid = tid;
-	insert_tid(sc, tid, toep);
+	insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
 	if (inp->inp_flags & INP_DROPPED) {
 
 		/* socket closed by the kernel before hw told us it connected */
@@ -140,19 +143,10 @@
 	make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt);
 done:
 	INP_WUNLOCK(inp);
+	CURVNET_RESTORE();
 	return (0);
 }
 
-static inline int
-act_open_has_tid(unsigned int status)
-{
-
-	return (status != CPL_ERR_TCAM_FULL &&
-	    status != CPL_ERR_TCAM_PARITY &&
-	    status != CPL_ERR_CONN_EXIST &&
-	    status != CPL_ERR_ARP_MISS);
-}
-
 /*
  * Convert an ACT_OPEN_RPL status to an errno.
  */
@@ -177,6 +171,30 @@
 	}
 }
 
+void
+act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
+{
+	struct toepcb *toep = lookup_atid(sc, atid);
+	struct inpcb *inp = toep->inp;
+	struct toedev *tod = &toep->td->tod;
+
+	free_atid(sc, atid);
+	toep->tid = -1;
+
+	CURVNET_SET(toep->vnet);
+	if (status != EAGAIN)
+		INP_INFO_RLOCK(&V_tcbinfo);
+	INP_WLOCK(inp);
+	toe_connect_failed(tod, inp, status);
+	final_cpl_received(toep);	/* unlocks inp */
+	if (status != EAGAIN)
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+	CURVNET_RESTORE();
+}
+
+/*
+ * Active open failed.
+ */
 static int
 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
     struct mbuf *m)
@@ -183,11 +201,9 @@
 {
 	struct adapter *sc = iq->adapter;
 	const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
-	unsigned int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
-	unsigned int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
+	u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
+	u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
 	struct toepcb *toep = lookup_atid(sc, atid);
-	struct inpcb *inp = toep->inp;
-	struct toedev *tod = &toep->td->tod;
 	int rc;
 
 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
@@ -199,20 +215,11 @@
 	if (negative_advice(status))
 		return (0);
 
-	free_atid(sc, atid);
-	toep->tid = -1;
-
 	if (status && act_open_has_tid(status))
 		release_tid(sc, GET_TID(cpl), toep->ctrlq);
 
 	rc = act_open_rpl_status_to_errno(status);
-	if (rc != EAGAIN)
-		INP_INFO_WLOCK(&V_tcbinfo);
-	INP_WLOCK(inp);
-	toe_connect_failed(tod, inp, rc);
-	final_cpl_received(toep);	/* unlocks inp */
-	if (rc != EAGAIN)
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+	act_open_failure_cleanup(sc, atid, rc);
 
 	return (0);
 }
@@ -224,10 +231,13 @@
 calc_opt2a(struct socket *so, struct toepcb *toep)
 {
 	struct tcpcb *tp = so_sototcpcb(so);
-	struct port_info *pi = toep->port;
+	struct port_info *pi = toep->vi->pi;
 	struct adapter *sc = pi->adapter;
-	uint32_t opt2 = 0;
+	uint32_t opt2;
 
+	opt2 = V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]) |
+	    F_RSS_QUEUE_VALID | V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
+
 	if (tp->t_flags & TF_SACK_PERMIT)
 		opt2 |= F_SACK_EN;
 
@@ -240,9 +250,15 @@
 	if (V_tcp_do_ecn)
 		opt2 |= F_CCTRL_ECN;
 
-	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
-	opt2 |= F_RX_COALESCE_VALID | V_RX_COALESCE(M_RX_COALESCE);
-	opt2 |= F_RSS_QUEUE_VALID | V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id);
+	/* RX_COALESCE is always a valid value (M_RX_COALESCE). */
+	if (is_t4(sc))
+		opt2 |= F_RX_COALESCE_VALID;
+	else {
+		opt2 |= F_T5_OPT_2_VALID;
+		opt2 |= F_T5_ISS;
+	}
+	if (sc->tt.rx_coalesce)
+		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
 
 #ifdef USE_DDP_RX_FLOW_CONTROL
 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
@@ -253,13 +269,21 @@
 }
 
 void
-t4_init_connect_cpl_handlers(struct adapter *sc)
+t4_init_connect_cpl_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_ACT_ESTABLISH, do_act_establish);
-	t4_register_cpl_handler(sc, CPL_ACT_OPEN_RPL, do_act_open_rpl);
+	t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
+	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
 }
 
+void
+t4_uninit_connect_cpl_handlers(void)
+{
+
+	t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL);
+	t4_register_cpl_handler(CPL_ACT_OPEN_RPL, NULL);
+}
+
 #define DONT_OFFLOAD_ACTIVE_OPEN(x)	do { \
 	reason = __LINE__; \
 	rc = (x); \
@@ -266,6 +290,31 @@
 	goto failed; \
 } while (0)
 
+static inline int
+act_open_cpl_size(struct adapter *sc, int isipv6)
+{
+	int idx;
+	static const int sz_table[3][2] = {
+		{
+			sizeof (struct cpl_act_open_req),
+			sizeof (struct cpl_act_open_req6)
+		},
+		{
+			sizeof (struct cpl_t5_act_open_req),
+			sizeof (struct cpl_t5_act_open_req6)
+		},
+		{
+			sizeof (struct cpl_t6_act_open_req),
+			sizeof (struct cpl_t6_act_open_req6)
+		},
+	};
+
+	MPASS(chip_id(sc) >= CHELSIO_T4);
+	idx = min(chip_id(sc) - CHELSIO_T4, 2);
+
+	return (sz_table[idx][!!isipv6]);
+}
+
 /*
  * active open (soconnect).
  *
@@ -285,7 +334,7 @@
 	struct toepcb *toep = NULL;
 	struct wrqe *wr = NULL;
 	struct ifnet *rt_ifp = rt->rt_ifp;
-	struct port_info *pi;
+	struct vi_info *vi;
 	int mtu_idx, rscale, qid_atid, rc, isipv6;
 	struct inpcb *inp = sotoinpcb(so);
 	struct tcpcb *tp = intotcpcb(inp);
@@ -296,17 +345,17 @@
 	    ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family));
 
 	if (rt_ifp->if_type == IFT_ETHER)
-		pi = rt_ifp->if_softc;
+		vi = rt_ifp->if_softc;
 	else if (rt_ifp->if_type == IFT_L2VLAN) {
 		struct ifnet *ifp = VLAN_COOKIE(rt_ifp);
 
-		pi = ifp->if_softc;
+		vi = ifp->if_softc;
 	} else if (rt_ifp->if_type == IFT_IEEE8023ADLAG)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */
 	else
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
 
-	toep = alloc_toepcb(pi, -1, -1, M_NOWAIT);
+	toep = alloc_toepcb(vi, -1, -1, M_NOWAIT | M_ZERO);
 	if (toep == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
@@ -314,17 +363,17 @@
 	if (toep->tid < 0)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
-	toep->l2te = t4_l2t_get(pi, rt_ifp,
+	toep->l2te = t4_l2t_get(vi->pi, rt_ifp,
 	    rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam);
 	if (toep->l2te == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
 	isipv6 = nam->sa_family == AF_INET6;
-	wr = alloc_wrqe(isipv6 ? sizeof(struct cpl_act_open_req6) :
-	    sizeof(struct cpl_act_open_req), toep->ctrlq);
+	wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq);
 	if (wr == NULL)
 		DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM);
 
+	toep->vnet = so->so_vnet;
 	if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)
 		set_tcpddp_ulp_mode(toep);
 	else
@@ -348,23 +397,35 @@
 
 	if (isipv6) {
 		struct cpl_act_open_req6 *cpl = wrtod(wr);
+		struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
+		struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
 
-		if ((inp->inp_vflag & INP_IPV6) == 0) {
-			/* XXX think about this a bit more */
-			log(LOG_ERR,
-			    "%s: time to think about AF_INET6 + vflag 0x%x.\n",
-			    __func__, inp->inp_vflag);
+		if ((inp->inp_vflag & INP_IPV6) == 0)
 			DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
-		}
 
-		toep->ce = hold_lip(td, &inp->in6p_laddr);
+		toep->ce = hold_lip(td, &inp->in6p_laddr, NULL);
 		if (toep->ce == NULL)
 			DONT_OFFLOAD_ACTIVE_OPEN(ENOENT);
 
-		INIT_TP_WR(cpl, 0);
+		switch (chip_id(sc)) {
+		case CHELSIO_T4:
+			INIT_TP_WR(cpl, 0);
+			cpl->params = select_ntuple(vi, toep->l2te);
+			break;
+		case CHELSIO_T5:
+			INIT_TP_WR(cpl5, 0);
+			cpl5->iss = htobe32(tp->iss);
+			cpl5->params = select_ntuple(vi, toep->l2te);
+			break;
+		case CHELSIO_T6:
+		default:
+			INIT_TP_WR(cpl6, 0);
+			cpl6->iss = htobe32(tp->iss);
+			cpl6->params = select_ntuple(vi, toep->l2te);
+			break;
+		}
 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 		    qid_atid));
-
 		cpl->local_port = inp->inp_lport;
 		cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0];
 		cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8];
@@ -371,21 +432,37 @@
 		cpl->peer_port = inp->inp_fport;
 		cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0];
 		cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8];
-		cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
+		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
 		    toep->rx_credits, toep->ulp_mode);
-		cpl->params = select_ntuple(pi, toep->l2te, sc->filter_mode);
 		cpl->opt2 = calc_opt2a(so, toep);
 	} else {
 		struct cpl_act_open_req *cpl = wrtod(wr);
+		struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
+		struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
 
-		INIT_TP_WR(cpl, 0);
+		switch (chip_id(sc)) {
+		case CHELSIO_T4:
+			INIT_TP_WR(cpl, 0);
+			cpl->params = select_ntuple(vi, toep->l2te);
+			break;
+		case CHELSIO_T5:
+			INIT_TP_WR(cpl5, 0);
+			cpl5->iss = htobe32(tp->iss);
+			cpl5->params = select_ntuple(vi, toep->l2te);
+			break;
+		case CHELSIO_T6:
+		default:
+			INIT_TP_WR(cpl6, 0);
+			cpl6->iss = htobe32(tp->iss);
+			cpl6->params = select_ntuple(vi, toep->l2te);
+			break;
+		}
 		OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 		    qid_atid));
 		inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port,
 		    &cpl->peer_ip, &cpl->peer_port);
-		cpl->opt0 = calc_opt0(so, pi, toep->l2te, mtu_idx, rscale,
+		cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale,
 		    toep->rx_credits, toep->ulp_mode);
-		cpl->params = select_ntuple(pi, toep->l2te, sc->filter_mode);
 		cpl->opt2 = calc_opt2a(so, toep);
 	}
 

Modified: trunk/sys/dev/cxgbe/tom/t4_cpl_io.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_cpl_io.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_cpl_io.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2012 Chelsio Communications, Inc.
+ * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
  * All rights reserved.
  * Written by: Navdeep Parhar <np at FreeBSD.org>
  *
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_cpl_io.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_cpl_io.c 313178 2017-02-03 23:33:06Z jhb $");
 
 #include "opt_inet.h"
 
@@ -44,6 +45,7 @@
 #include <netinet/in.h>
 #include <netinet/in_pcb.h>
 #include <netinet/ip.h>
+#include <netinet/ip6.h>
 #include <netinet/tcp_var.h>
 #define TCPSTATES
 #include <netinet/tcp_fsm.h>
@@ -70,6 +72,40 @@
 VNET_DECLARE(int, tcp_autorcvbuf_max);
 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
 
+static inline struct mbuf *
+mbufq_dequeue(struct mbufq *q)
+{
+	struct mbuf *m;
+
+	m = q->head;
+	if (m) {
+		if (q->tail == m)
+			q->tail = NULL;
+		q->head = m->m_nextpkt;
+		m->m_nextpkt = NULL;
+	}
+	return (m);
+}
+
+static inline void
+mbufq_enqueue(struct mbufq *q, struct mbuf *m)
+{
+
+	m->m_nextpkt = NULL;
+	if (q->tail)
+		q->tail->m_nextpkt = m;
+	else
+		q->head = m;
+	q->tail = m;
+}
+
+static inline struct mbuf *
+mbufq_first(const struct mbufq *q)
+{
+
+	return (q->head);
+}
+
 void
 send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
 {
@@ -76,19 +112,18 @@
 	struct wrqe *wr;
 	struct fw_flowc_wr *flowc;
 	unsigned int nparams = ftxp ? 8 : 6, flowclen;
-	struct port_info *pi = toep->port;
+	struct vi_info *vi = toep->vi;
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
-	unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
+	unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
 	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
 
 	KASSERT(!(toep->flags & TPF_FLOWC_WR_SENT),
 	    ("%s: flowc for tid %u sent already", __func__, toep->tid));
 
-	CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
-
 	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
 
-	wr = alloc_wrqe(roundup(flowclen, 16), toep->ofld_txq);
+	wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq);
 	if (wr == NULL) {
 		/* XXX */
 		panic("%s: allocation failure.", __func__);
@@ -120,11 +155,18 @@
 		flowc->mnemval[6].val = htobe32(sndbuf);
 		flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 		flowc->mnemval[7].val = htobe32(ftxp->mss);
+
+		CTR6(KTR_CXGBE,
+		    "%s: tid %u, mss %u, sndbuf %u, snd_nxt 0x%x, rcv_nxt 0x%x",
+		    __func__, toep->tid, ftxp->mss, sndbuf, ftxp->snd_nxt,
+		    ftxp->rcv_nxt);
 	} else {
 		flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 		flowc->mnemval[4].val = htobe32(512);
 		flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_MSS;
 		flowc->mnemval[5].val = htobe32(512);
+
+		CTR2(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
 	}
 
 	txsd->tx_credits = howmany(flowclen, 16);
@@ -204,12 +246,21 @@
 assign_rxopt(struct tcpcb *tp, unsigned int opt)
 {
 	struct toepcb *toep = tp->t_toe;
+	struct inpcb *inp = tp->t_inpcb;
 	struct adapter *sc = td_adapter(toep->td);
+	int n;
 
-	INP_LOCK_ASSERT(tp->t_inpcb);
+	INP_LOCK_ASSERT(inp);
 
-	tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - 40;
+	if (inp->inp_inc.inc_flags & INC_ISIPV6)
+		n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
+	else
+		n = sizeof(struct ip) + sizeof(struct tcphdr);
+	tp->t_maxseg = tp->t_maxopd = sc->params.mtus[G_TCPOPT_MSS(opt)] - n;
 
+	CTR4(KTR_CXGBE, "%s: tid %d, mtu_idx %u (%u)", __func__, toep->tid,
+	    G_TCPOPT_MSS(opt), sc->params.mtus[G_TCPOPT_MSS(opt)]);
+
 	if (G_TCPOPT_TSTAMP(opt)) {
 		tp->t_flags |= TF_RCVD_TSTMP;	/* timestamps ok */
 		tp->ts_recent = 0;		/* hmmm */
@@ -343,19 +394,18 @@
 	    __func__, sb, sb->sb_cc, toep->sb_cc));
 	toep->rx_credits += toep->sb_cc - sb->sb_cc;
 	toep->sb_cc = sb->sb_cc;
-	credits = toep->rx_credits;
-	SOCKBUF_UNLOCK(sb);
 
-	if (credits > 0 &&
-	    (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) {
+	if (toep->rx_credits > 0 &&
+	    (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 ||
+	    (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) ||
+	    toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) {
 
-		credits = send_rx_credits(sc, toep, credits);
-		SOCKBUF_LOCK(sb);
+		credits = send_rx_credits(sc, toep, toep->rx_credits);
 		toep->rx_credits -= credits;
-		SOCKBUF_UNLOCK(sb);
 		tp->rcv_wnd += credits;
 		tp->rcv_adv += credits;
 	}
+	SOCKBUF_UNLOCK(sb);
 }
 
 /*
@@ -444,29 +494,30 @@
 
 static inline void
 write_tx_wr(void *dst, struct toepcb *toep, unsigned int immdlen,
-    unsigned int plen, uint8_t credits, int more_to_come)
+    unsigned int plen, uint8_t credits, int shove, int ulp_submode, int txalign)
 {
 	struct fw_ofld_tx_data_wr *txwr = dst;
-	int shove = !more_to_come;
-	int compl = 1;
 
-	/*
-	 * We always request completion notifications from the firmware.  The
-	 * only exception is when we know we'll get more data to send shortly
-	 * and that we'll have some tx credits remaining to transmit that data.
-	 */
-	if (more_to_come && toep->tx_credits - credits >= MIN_OFLD_TX_CREDITS)
-		compl = 0;
-
 	txwr->op_to_immdlen = htobe32(V_WR_OP(FW_OFLD_TX_DATA_WR) |
-	    V_FW_WR_COMPL(compl) | V_FW_WR_IMMDLEN(immdlen));
+	    V_FW_WR_IMMDLEN(immdlen));
 	txwr->flowid_len16 = htobe32(V_FW_WR_FLOWID(toep->tid) |
 	    V_FW_WR_LEN16(credits));
-	txwr->tunnel_to_proxy =
-	    htobe32(V_FW_OFLD_TX_DATA_WR_ULPMODE(toep->ulp_mode) |
-		V_FW_OFLD_TX_DATA_WR_URGENT(0) |	/* XXX */
-		V_FW_OFLD_TX_DATA_WR_SHOVE(shove));
+	txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(toep->ulp_mode) |
+	    V_TX_ULP_SUBMODE(ulp_submode) | V_TX_URG(0) | V_TX_SHOVE(shove));
 	txwr->plen = htobe32(plen);
+
+	if (txalign > 0) {
+		struct tcpcb *tp = intotcpcb(toep->inp);
+
+		if (plen < 2 * tp->t_maxseg || is_10G_port(toep->vi->pi))
+			txwr->lsodisable_to_flags |=
+			    htobe32(F_FW_OFLD_TX_DATA_WR_LSODISABLE);
+		else
+			txwr->lsodisable_to_flags |=
+			    htobe32(F_FW_OFLD_TX_DATA_WR_ALIGNPLD |
+				(tp->t_flags & TF_NODELAY ? 0 :
+				F_FW_OFLD_TX_DATA_WR_ALIGNPLDSHOVE));
+	}
 }
 
 /*
@@ -529,19 +580,26 @@
  * The socket's so_snd buffer consists of a stream of data starting with sb_mb
  * and linked together with m_next.  sb_sndptr, if set, is the last mbuf that
  * was transmitted.
+ *
+ * drop indicates the number of bytes that should be dropped from the head of
+ * the send buffer.  It is an optimization that lets do_fw4_ack avoid creating
+ * contention on the send buffer lock (before this change it used to do
+ * sowwakeup and then t4_push_frames right after that when recovering from tx
+ * stalls).  When drop is set this function MUST drop the bytes and wake up any
+ * writers.
  */
-static void
-t4_push_frames(struct adapter *sc, struct toepcb *toep)
+void
+t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
 {
 	struct mbuf *sndptr, *m, *sb_sndptr;
 	struct fw_ofld_tx_data_wr *txwr;
 	struct wrqe *wr;
-	unsigned int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
+	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
 	struct inpcb *inp = toep->inp;
 	struct tcpcb *tp = intotcpcb(inp);
 	struct socket *so = inp->inp_socket;
 	struct sockbuf *sb = &so->so_snd;
-	int tx_credits;
+	int tx_credits, shove, compl, sowwakeup;
 	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
 
 	INP_WLOCK_ASSERT(inp);
@@ -548,16 +606,23 @@
 	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
 	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
 
-	if (__predict_false(toep->ulp_mode != ULP_MODE_NONE &&
-	    toep->ulp_mode != ULP_MODE_TCPDDP))
-		CXGBE_UNIMPLEMENTED("ulp_mode");
+	KASSERT(toep->ulp_mode == ULP_MODE_NONE ||
+	    toep->ulp_mode == ULP_MODE_TCPDDP ||
+	    toep->ulp_mode == ULP_MODE_RDMA,
+	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
 
+	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
+		return;
+
 	/*
 	 * This function doesn't resume by itself.  Someone else must clear the
 	 * flag and call this function.
 	 */
-	if (__predict_false(toep->flags & TPF_TX_SUSPENDED))
+	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
+		KASSERT(drop == 0,
+		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
 		return;
+	}
 
 	do {
 		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
@@ -565,6 +630,11 @@
 		max_nsegs = max_dsgl_nsegs(tx_credits);
 
 		SOCKBUF_LOCK(sb);
+		sowwakeup = drop;
+		if (drop) {
+			sbdrop_locked(sb, drop);
+			drop = 0;
+		}
 		sb_sndptr = sb->sb_sndptr;
 		sndptr = sb_sndptr ? sb_sndptr->m_next : sb->sb_mb;
 		plen = 0;
@@ -583,7 +653,11 @@
 				if (plen == 0) {
 					/* Too few credits */
 					toep->flags |= TPF_TX_SUSPENDED;
-					SOCKBUF_UNLOCK(sb);
+					if (sowwakeup)
+						sowwakeup_locked(so);
+					else
+						SOCKBUF_UNLOCK(sb);
+					SOCKBUF_UNLOCK_ASSERT(sb);
 					return;
 				}
 				break;
@@ -600,23 +674,29 @@
 			}
 		}
 
+		if (sb->sb_cc > sb->sb_hiwat * 5 / 8 &&
+		    toep->plen_nocompl + plen >= sb->sb_hiwat / 4)
+			compl = 1;
+		else
+			compl = 0;
+
 		if (sb->sb_flags & SB_AUTOSIZE &&
 		    V_tcp_do_autosndbuf &&
 		    sb->sb_hiwat < V_tcp_autosndbuf_max &&
-		    sbspace(sb) < sb->sb_hiwat / 8 * 7) {
+		    sb->sb_cc >= sb->sb_hiwat * 7 / 8) {
 			int newsize = min(sb->sb_hiwat + V_tcp_autosndbuf_inc,
 			    V_tcp_autosndbuf_max);
 
 			if (!sbreserve_locked(sb, newsize, so, NULL))
 				sb->sb_flags &= ~SB_AUTOSIZE;
-			else {
-				sowwakeup_locked(so);	/* room available */
-				SOCKBUF_UNLOCK_ASSERT(sb);
-				goto unlocked;
-			}
+			else
+				sowwakeup = 1;	/* room available */
 		}
-		SOCKBUF_UNLOCK(sb);
-unlocked:
+		if (sowwakeup)
+			sowwakeup_locked(so);
+		else
+			SOCKBUF_UNLOCK(sb);
+		SOCKBUF_UNLOCK_ASSERT(sb);
 
 		/* nothing to send */
 		if (plen == 0) {
@@ -628,11 +708,12 @@
 		if (__predict_false(toep->flags & TPF_FIN_SENT))
 			panic("%s: excess tx.", __func__);
 
+		shove = m == NULL && !(tp->t_flags & TF_MORETOCOME);
 		if (plen <= max_imm) {
 
 			/* Immediate data tx */
 
-			wr = alloc_wrqe(roundup(sizeof(*txwr) + plen, 16),
+			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
 					toep->ofld_txq);
 			if (wr == NULL) {
 				/* XXX: how will we recover from this? */
@@ -641,9 +722,10 @@
 			}
 			txwr = wrtod(wr);
 			credits = howmany(wr->wr_len, 16);
-			write_tx_wr(txwr, toep, plen, plen, credits,
-			    tp->t_flags & TF_MORETOCOME);
+			write_tx_wr(txwr, toep, plen, plen, credits, shove, 0,
+			    sc->tt.tx_align);
 			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
+			nsegs = 0;
 		} else {
 			int wr_len;
 
@@ -651,7 +733,7 @@
 
 			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
 			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
-			wr = alloc_wrqe(roundup(wr_len, 16), toep->ofld_txq);
+			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
 			if (wr == NULL) {
 				/* XXX: how will we recover from this? */
 				toep->flags |= TPF_TX_SUSPENDED;
@@ -659,8 +741,8 @@
 			}
 			txwr = wrtod(wr);
 			credits = howmany(wr_len, 16);
-			write_tx_wr(txwr, toep, 0, plen, credits,
-			    tp->t_flags & TF_MORETOCOME);
+			write_tx_wr(txwr, toep, 0, plen, credits, shove, 0,
+			    sc->tt.tx_align);
 			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
 			    max_nsegs_1mbuf);
 			if (wr_len & 0xf) {
@@ -674,7 +756,18 @@
 			("%s: not enough credits", __func__));
 
 		toep->tx_credits -= credits;
+		toep->tx_nocompl += credits;
+		toep->plen_nocompl += plen;
+		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
+		    toep->tx_nocompl >= toep->tx_total / 4)
+			compl = 1;
 
+		if (compl || toep->ulp_mode == ULP_MODE_RDMA) {
+			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
+			toep->tx_nocompl = 0;
+			toep->plen_nocompl = 0;
+		}
+
 		tp->snd_nxt += plen;
 		tp->snd_max += plen;
 
@@ -684,6 +777,8 @@
 		SOCKBUF_UNLOCK(sb);
 
 		toep->flags |= TPF_TX_DATA_SENT;
+		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
+			toep->flags |= TPF_TX_SUSPENDED;
 
 		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
 		txsd->plen = plen;
@@ -703,6 +798,198 @@
 		close_conn(sc, toep);
 }
 
+static inline void
+rqdrop_locked(struct mbufq *q, int plen)
+{
+	struct mbuf *m;
+
+	while (plen > 0) {
+		m = mbufq_dequeue(q);
+
+		/* Too many credits. */
+		MPASS(m != NULL);
+		M_ASSERTPKTHDR(m);
+
+		/* Partial credits. */
+		MPASS(plen >= m->m_pkthdr.len);
+
+		plen -= m->m_pkthdr.len;
+		m_freem(m);
+	}
+}
+
+void
+t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
+{
+	struct mbuf *sndptr, *m;
+	struct fw_ofld_tx_data_wr *txwr;
+	struct wrqe *wr;
+	u_int plen, nsegs, credits, max_imm, max_nsegs, max_nsegs_1mbuf;
+	u_int adjusted_plen, ulp_submode;
+	struct inpcb *inp = toep->inp;
+	struct tcpcb *tp = intotcpcb(inp);
+	int tx_credits, shove;
+	struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+	struct mbufq *pduq = &toep->ulp_pduq;
+	static const u_int ulp_extra_len[] = {0, 4, 4, 8};
+
+	INP_WLOCK_ASSERT(inp);
+	KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
+	    ("%s: flowc_wr not sent for tid %u.", __func__, toep->tid));
+	KASSERT(toep->ulp_mode == ULP_MODE_ISCSI,
+	    ("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
+
+	if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
+		return;
+
+	/*
+	 * This function doesn't resume by itself.  Someone else must clear the
+	 * flag and call this function.
+	 */
+	if (__predict_false(toep->flags & TPF_TX_SUSPENDED)) {
+		KASSERT(drop == 0,
+		    ("%s: drop (%d) != 0 but tx is suspended", __func__, drop));
+		return;
+	}
+
+	if (drop)
+		rqdrop_locked(&toep->ulp_pdu_reclaimq, drop);
+
+	while ((sndptr = mbufq_first(pduq)) != NULL) {
+		M_ASSERTPKTHDR(sndptr);
+
+		tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
+		max_imm = max_imm_payload(tx_credits);
+		max_nsegs = max_dsgl_nsegs(tx_credits);
+
+		plen = 0;
+		nsegs = 0;
+		max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
+		for (m = sndptr; m != NULL; m = m->m_next) {
+			int n = sglist_count(mtod(m, void *), m->m_len);
+
+			nsegs += n;
+			plen += m->m_len;
+
+			/*
+			 * This mbuf would send us _over_ the nsegs limit.
+			 * Suspend tx because the PDU can't be sent out.
+			 */
+			if (plen > max_imm && nsegs > max_nsegs) {
+				toep->flags |= TPF_TX_SUSPENDED;
+				return;
+			}
+
+			if (max_nsegs_1mbuf < n)
+				max_nsegs_1mbuf = n;
+		}
+
+		if (__predict_false(toep->flags & TPF_FIN_SENT))
+			panic("%s: excess tx.", __func__);
+
+		/*
+		 * We have a PDU to send.  All of it goes out in one WR so 'm'
+		 * is NULL.  A PDU's length is always a multiple of 4.
+		 */
+		MPASS(m == NULL);
+		MPASS((plen & 3) == 0);
+		MPASS(sndptr->m_pkthdr.len == plen);
+
+		shove = !(tp->t_flags & TF_MORETOCOME);
+		ulp_submode = mbuf_ulp_submode(sndptr);
+		MPASS(ulp_submode < nitems(ulp_extra_len));
+
+		/*
+		 * plen doesn't include header and data digests, which are
+		 * generated and inserted in the right places by the TOE, but
+		 * they do occupy TCP sequence space and need to be accounted
+		 * for.
+		 */
+		adjusted_plen = plen + ulp_extra_len[ulp_submode];
+		if (plen <= max_imm) {
+
+			/* Immediate data tx */
+
+			wr = alloc_wrqe(roundup2(sizeof(*txwr) + plen, 16),
+					toep->ofld_txq);
+			if (wr == NULL) {
+				/* XXX: how will we recover from this? */
+				toep->flags |= TPF_TX_SUSPENDED;
+				return;
+			}
+			txwr = wrtod(wr);
+			credits = howmany(wr->wr_len, 16);
+			write_tx_wr(txwr, toep, plen, adjusted_plen, credits,
+			    shove, ulp_submode, sc->tt.tx_align);
+			m_copydata(sndptr, 0, plen, (void *)(txwr + 1));
+			nsegs = 0;
+		} else {
+			int wr_len;
+
+			/* DSGL tx */
+			wr_len = sizeof(*txwr) + sizeof(struct ulptx_sgl) +
+			    ((3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1)) * 8;
+			wr = alloc_wrqe(roundup2(wr_len, 16), toep->ofld_txq);
+			if (wr == NULL) {
+				/* XXX: how will we recover from this? */
+				toep->flags |= TPF_TX_SUSPENDED;
+				return;
+			}
+			txwr = wrtod(wr);
+			credits = howmany(wr_len, 16);
+			write_tx_wr(txwr, toep, 0, adjusted_plen, credits,
+			    shove, ulp_submode, sc->tt.tx_align);
+			write_tx_sgl(txwr + 1, sndptr, m, nsegs,
+			    max_nsegs_1mbuf);
+			if (wr_len & 0xf) {
+				uint64_t *pad = (uint64_t *)
+				    ((uintptr_t)txwr + wr_len);
+				*pad = 0;
+			}
+		}
+
+		KASSERT(toep->tx_credits >= credits,
+			("%s: not enough credits", __func__));
+
+		m = mbufq_dequeue(pduq);
+		MPASS(m == sndptr);
+		mbufq_enqueue(&toep->ulp_pdu_reclaimq, m);
+
+		toep->tx_credits -= credits;
+		toep->tx_nocompl += credits;
+		toep->plen_nocompl += plen;
+		if (toep->tx_credits <= toep->tx_total * 3 / 8 &&
+		    toep->tx_nocompl >= toep->tx_total / 4) {
+			txwr->op_to_immdlen |= htobe32(F_FW_WR_COMPL);
+			toep->tx_nocompl = 0;
+			toep->plen_nocompl = 0;
+		}
+
+		tp->snd_nxt += adjusted_plen;
+		tp->snd_max += adjusted_plen;
+
+		toep->flags |= TPF_TX_DATA_SENT;
+		if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
+			toep->flags |= TPF_TX_SUSPENDED;
+
+		KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+		txsd->plen = plen;
+		txsd->tx_credits = credits;
+		txsd++;
+		if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) {
+			toep->txsd_pidx = 0;
+			txsd = &toep->txsd[0];
+		}
+		toep->txsd_avail--;
+
+		t4_l2t_send(sc, wr, toep->l2te);
+	}
+
+	/* Send a FIN if requested, but only if there are no more PDUs to send */
+	if (mbufq_first(pduq) == NULL && toep->flags & TPF_SEND_FIN)
+		close_conn(sc, toep);
+}
+
 int
 t4_tod_output(struct toedev *tod, struct tcpcb *tp)
 {
@@ -717,7 +1004,10 @@
 	    ("%s: inp %p dropped.", __func__, inp));
 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
 
-	t4_push_frames(sc, toep);
+	if (toep->ulp_mode == ULP_MODE_ISCSI)
+		t4_push_pdus(sc, toep, 0);
+	else
+		t4_push_frames(sc, toep, 0);
 
 	return (0);
 }
@@ -737,7 +1027,12 @@
 	KASSERT(toep != NULL, ("%s: toep is NULL", __func__));
 
 	toep->flags |= TPF_SEND_FIN;
-	t4_push_frames(sc, toep);
+	if (tp->t_state >= TCPS_ESTABLISHED) {
+		if (toep->ulp_mode == ULP_MODE_ISCSI)
+			t4_push_pdus(sc, toep, 0);
+		else
+			t4_push_frames(sc, toep, 0);
+	}
 
 	return (0);
 }
@@ -811,7 +1106,8 @@
 
 	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
 
-	INP_INFO_WLOCK(&V_tcbinfo);
+	CURVNET_SET(toep->vnet);
+	INP_INFO_RLOCK(&V_tcbinfo);
 	INP_WLOCK(inp);
 	tp = intotcpcb(inp);
 
@@ -827,32 +1123,15 @@
 	sb = &so->so_rcv;
 	SOCKBUF_LOCK(sb);
 	if (__predict_false(toep->ddp_flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE))) {
-		m = m_get(M_NOWAIT, MT_DATA);
-		if (m == NULL)
-			CXGBE_UNIMPLEMENTED("mbuf alloc failure");
-
-		m->m_len = be32toh(cpl->rcv_nxt) - tp->rcv_nxt;
-		m->m_flags |= M_DDP;	/* Data is already where it should be */
-		m->m_data = "nothing to see here";
-		tp->rcv_nxt = be32toh(cpl->rcv_nxt);
-
-		toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
-
-		KASSERT(toep->sb_cc >= sb->sb_cc,
-		    ("%s: sb %p has more data (%d) than last time (%d).",
-		    __func__, sb, sb->sb_cc, toep->sb_cc));
-		toep->rx_credits += toep->sb_cc - sb->sb_cc;
-#ifdef USE_DDP_RX_FLOW_CONTROL
-		toep->rx_credits -= m->m_len;	/* adjust for F_RX_FC_DDP */
-#endif
-		sbappendstream_locked(sb, m);
-		toep->sb_cc = sb->sb_cc;
+		handle_ddp_close(toep, tp, sb, cpl->rcv_nxt);
 	}
 	socantrcvmore_locked(so);	/* unlocks the sockbuf */
 
-	KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
-	    ("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
-	    be32toh(cpl->rcv_nxt)));
+	if (toep->ulp_mode != ULP_MODE_RDMA) {
+		KASSERT(tp->rcv_nxt == be32toh(cpl->rcv_nxt),
+	    		("%s: rcv_nxt mismatch: %u %u", __func__, tp->rcv_nxt,
+	    		be32toh(cpl->rcv_nxt)));
+	}
 
 	switch (tp->t_state) {
 	case TCPS_SYN_RECEIVED:
@@ -870,7 +1149,8 @@
 	case TCPS_FIN_WAIT_2:
 		tcp_twstart(tp);
 		INP_UNLOCK_ASSERT(inp);	 /* safe, we have a ref on the inp */
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+		CURVNET_RESTORE();
 
 		INP_WLOCK(inp);
 		final_cpl_received(toep);
@@ -882,7 +1162,8 @@
 	}
 done:
 	INP_WUNLOCK(inp);
-	INP_INFO_WUNLOCK(&V_tcbinfo);
+	INP_INFO_RUNLOCK(&V_tcbinfo);
+	CURVNET_RESTORE();
 	return (0);
 }
 
@@ -909,7 +1190,8 @@
 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
 	KASSERT(toep->tid == tid, ("%s: toep tid mismatch", __func__));
 
-	INP_INFO_WLOCK(&V_tcbinfo);
+	CURVNET_SET(toep->vnet);
+	INP_INFO_RLOCK(&V_tcbinfo);
 	INP_WLOCK(inp);
 	tp = intotcpcb(inp);
 
@@ -927,7 +1209,8 @@
 		tcp_twstart(tp);
 release:
 		INP_UNLOCK_ASSERT(inp);	/* safe, we have a ref on the  inp */
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+		CURVNET_RESTORE();
 
 		INP_WLOCK(inp);
 		final_cpl_received(toep);	/* no more CPLs expected */
@@ -951,7 +1234,8 @@
 	}
 done:
 	INP_WUNLOCK(inp);
-	INP_INFO_WUNLOCK(&V_tcbinfo);
+	INP_INFO_RUNLOCK(&V_tcbinfo);
+	CURVNET_RESTORE();
 	return (0);
 }
 
@@ -1025,7 +1309,8 @@
 	}
 
 	inp = toep->inp;
-	INP_INFO_WLOCK(&V_tcbinfo);	/* for tcp_close */
+	CURVNET_SET(toep->vnet);
+	INP_INFO_RLOCK(&V_tcbinfo);	/* for tcp_close */
 	INP_WLOCK(inp);
 
 	tp = intotcpcb(inp);
@@ -1059,7 +1344,8 @@
 
 	final_cpl_received(toep);
 done:
-	INP_INFO_WUNLOCK(&V_tcbinfo);
+	INP_INFO_RUNLOCK(&V_tcbinfo);
+	CURVNET_RESTORE();
 	send_abort_rpl(sc, ofld_txq, tid, CPL_ABORT_NO_RST);
 	return (0);
 }
@@ -1158,7 +1444,11 @@
 		ddp_placed = be32toh(cpl->seq) - tp->rcv_nxt;
 
 	tp->rcv_nxt += len;
-	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
+	if (tp->rcv_wnd < len) {
+		KASSERT(toep->ulp_mode == ULP_MODE_RDMA,
+				("%s: negative window size", __func__));
+	}
+
 	tp->rcv_wnd -= len;
 	tp->t_rcvtime = ticks;
 
@@ -1173,17 +1463,21 @@
 		SOCKBUF_UNLOCK(sb);
 		INP_WUNLOCK(inp);
 
-		INP_INFO_WLOCK(&V_tcbinfo);
+		CURVNET_SET(toep->vnet);
+		INP_INFO_RLOCK(&V_tcbinfo);
 		INP_WLOCK(inp);
 		tp = tcp_drop(tp, ECONNRESET);
 		if (tp)
 			INP_WUNLOCK(inp);
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+		CURVNET_RESTORE();
 
 		return (0);
 	}
 
 	/* receive buffer autosize */
+	MPASS(toep->vnet == so->so_vnet);
+	CURVNET_SET(toep->vnet);
 	if (sb->sb_flags & SB_AUTOSIZE &&
 	    V_tcp_do_autorcvbuf &&
 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
@@ -1260,10 +1554,19 @@
 	toep->rx_credits += toep->sb_cc - sb->sb_cc;
 	sbappendstream_locked(sb, m);
 	toep->sb_cc = sb->sb_cc;
+	if (toep->rx_credits > 0 && toep->sb_cc + tp->rcv_wnd < sb->sb_lowat) {
+		int credits;
+
+		credits = send_rx_credits(sc, toep, toep->rx_credits);
+		toep->rx_credits -= credits;
+		tp->rcv_wnd += credits;
+		tp->rcv_adv += credits;
+	}
 	sorwakeup_locked(so);
 	SOCKBUF_UNLOCK_ASSERT(sb);
 
 	INP_WUNLOCK(inp);
+	CURVNET_RESTORE();
 	return (0);
 }
 
@@ -1272,18 +1575,18 @@
 #define V_CPL_FW4_ACK_OPCODE(x) ((x) << S_CPL_FW4_ACK_OPCODE)
 #define G_CPL_FW4_ACK_OPCODE(x) \
     (((x) >> S_CPL_FW4_ACK_OPCODE) & M_CPL_FW4_ACK_OPCODE)
- 
+
 #define S_CPL_FW4_ACK_FLOWID    0
 #define M_CPL_FW4_ACK_FLOWID    0xffffff
 #define V_CPL_FW4_ACK_FLOWID(x) ((x) << S_CPL_FW4_ACK_FLOWID)
 #define G_CPL_FW4_ACK_FLOWID(x) \
     (((x) >> S_CPL_FW4_ACK_FLOWID) & M_CPL_FW4_ACK_FLOWID)
- 
+
 #define S_CPL_FW4_ACK_CR        24
 #define M_CPL_FW4_ACK_CR        0xff
 #define V_CPL_FW4_ACK_CR(x)     ((x) << S_CPL_FW4_ACK_CR)
 #define G_CPL_FW4_ACK_CR(x)     (((x) >> S_CPL_FW4_ACK_CR) & M_CPL_FW4_ACK_CR)
- 
+
 #define S_CPL_FW4_ACK_SEQVAL    0
 #define M_CPL_FW4_ACK_SEQVAL    0x1
 #define V_CPL_FW4_ACK_SEQVAL(x) ((x) << S_CPL_FW4_ACK_SEQVAL)
@@ -1373,32 +1676,58 @@
 		}
 	}
 
-	if (plen > 0) {
+	if (toep->tx_credits == toep->tx_total) {
+		toep->tx_nocompl = 0;
+		toep->plen_nocompl = 0;
+	}
+
+	if (toep->flags & TPF_TX_SUSPENDED &&
+	    toep->tx_credits >= toep->tx_total / 4) {
+		toep->flags &= ~TPF_TX_SUSPENDED;
+		CURVNET_SET(toep->vnet);
+		if (toep->ulp_mode == ULP_MODE_ISCSI)
+			t4_push_pdus(sc, toep, plen);
+		else
+			t4_push_frames(sc, toep, plen);
+		CURVNET_RESTORE();
+	} else if (plen > 0) {
 		struct sockbuf *sb = &so->so_snd;
+		int sbu;
 
 		SOCKBUF_LOCK(sb);
-		sbdrop_locked(sb, plen);
-		sowwakeup_locked(so);
+		sbu = sb->sb_cc;
+		if (toep->ulp_mode == ULP_MODE_ISCSI) {
+
+			if (__predict_false(sbu > 0)) {
+				/*
+				 * The data trasmitted before the tid's ULP mode
+				 * changed to ISCSI is still in so_snd.
+				 * Incoming credits should account for so_snd
+				 * first.
+				 */
+				sbdrop_locked(sb, min(sbu, plen));
+				plen -= min(sbu, plen);
+			}
+			sowwakeup_locked(so);	/* unlocks so_snd */
+			rqdrop_locked(&toep->ulp_pdu_reclaimq, plen);
+		} else {
+			sbdrop_locked(sb, plen);
+			sowwakeup_locked(so);	/* unlocks so_snd */
+		}
 		SOCKBUF_UNLOCK_ASSERT(sb);
 	}
 
-	/* XXX */
-	if ((toep->flags & TPF_TX_SUSPENDED &&
-	    toep->tx_credits >= MIN_OFLD_TX_CREDITS) ||
-	    toep->tx_credits == toep->txsd_total *
-	    howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16)) {
-		toep->flags &= ~TPF_TX_SUSPENDED;
-		t4_push_frames(sc, toep);
-	}
 	INP_WUNLOCK(inp);
 
 	return (0);
 }
 
-static int
+int
 do_set_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 {
+#ifdef INVARIANTS
 	struct adapter *sc = iq->adapter;
+#endif
 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
 	unsigned int tid = GET_TID(cpl);
 #ifdef INVARIANTS
@@ -1408,22 +1737,35 @@
 	KASSERT(opcode == CPL_SET_TCB_RPL,
 	    ("%s: unexpected opcode 0x%x", __func__, opcode));
 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
+	MPASS(iq != &sc->sge.fwq);
 
-	if (tid >= sc->tids.ftid_base &&
-	    tid < sc->tids.ftid_base + sc->tids.nftids)
-		return (t4_filter_rpl(iq, rss, m)); /* TCB is a filter */
+	/*
+	 * TOM and/or other ULPs don't request replies for CPL_SET_TCB or
+	 * CPL_SET_TCB_FIELD requests.  This can easily change and when it does
+	 * the dispatch code will go here.
+	 */
+#ifdef INVARIANTS
+	panic("%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p", __func__,
+	    tid, iq);
+#else
+	log(LOG_ERR, "%s: Unexpected CPL_SET_TCB_RPL for tid %u on iq %p\n",
+	    __func__, tid, iq);
+#endif
 
-	CXGBE_UNIMPLEMENTED(__func__);
+	return (0);
 }
 
 void
-t4_set_tcb_field(struct adapter *sc, struct toepcb *toep, uint16_t word,
-    uint64_t mask, uint64_t val)
+t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, int tid,
+    uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie, int iqid)
 {
 	struct wrqe *wr;
 	struct cpl_set_tcb_field *req;
 
-	wr = alloc_wrqe(sizeof(*req), toep->ctrlq);
+	MPASS((cookie & ~M_COOKIE) == 0);
+	MPASS((iqid & ~M_QUEUENO) == 0);
+
+	wr = alloc_wrqe(sizeof(*req), wrq);
 	if (wr == NULL) {
 		/* XXX */
 		panic("%s: allocation failure.", __func__);
@@ -1430,10 +1772,11 @@
 	}
 	req = wrtod(wr);
 
-	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
-	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
-	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
-	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+	INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
+	req->reply_ctrl = htobe16(V_QUEUENO(iqid));
+	if (reply == 0)
+		req->reply_ctrl |= htobe16(F_NO_REPLY);
+	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
 	req->mask = htobe64(mask);
 	req->val = htobe64(val);
 
@@ -1441,22 +1784,26 @@
 }
 
 void
-t4_init_cpl_io_handlers(struct adapter *sc)
+t4_init_cpl_io_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_PEER_CLOSE, do_peer_close);
-	t4_register_cpl_handler(sc, CPL_CLOSE_CON_RPL, do_close_con_rpl);
-	t4_register_cpl_handler(sc, CPL_ABORT_REQ_RSS, do_abort_req);
-	t4_register_cpl_handler(sc, CPL_ABORT_RPL_RSS, do_abort_rpl);
-	t4_register_cpl_handler(sc, CPL_RX_DATA, do_rx_data);
-	t4_register_cpl_handler(sc, CPL_FW4_ACK, do_fw4_ack);
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, do_set_tcb_rpl);
+	t4_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
+	t4_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
+	t4_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
+	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
+	t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
+	t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
 }
 
 void
-t4_uninit_cpl_io_handlers(struct adapter *sc)
+t4_uninit_cpl_io_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_SET_TCB_RPL, t4_filter_rpl);
+	t4_register_cpl_handler(CPL_PEER_CLOSE, NULL);
+	t4_register_cpl_handler(CPL_CLOSE_CON_RPL, NULL);
+	t4_register_cpl_handler(CPL_ABORT_REQ_RSS, NULL);
+	t4_register_cpl_handler(CPL_ABORT_RPL_RSS, NULL);
+	t4_register_cpl_handler(CPL_RX_DATA, NULL);
+	t4_register_cpl_handler(CPL_FW4_ACK, NULL);
 }
 #endif

Modified: trunk/sys/dev/cxgbe/tom/t4_ddp.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_ddp.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_ddp.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_ddp.c 248078 2013-03-09 00:39:54Z marius $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_ddp.c 312337 2017-01-17 07:43:37Z np $");
 
 #include "opt_inet.h"
 
@@ -65,121 +66,40 @@
 #include "common/t4_tcb.h"
 #include "tom/t4_tom.h"
 
-#define PPOD_SZ(n)	((n) * sizeof(struct pagepod))
-#define PPOD_SIZE	(PPOD_SZ(1))
+VNET_DECLARE(int, tcp_do_autorcvbuf);
+#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
+VNET_DECLARE(int, tcp_autorcvbuf_inc);
+#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
+VNET_DECLARE(int, tcp_autorcvbuf_max);
+#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
 
-/* XXX: must match A_ULP_RX_TDDP_PSZ */ 
-static int t4_ddp_pgsz[] = {4096, 4096 << 2, 4096 << 4, 4096 << 6};
+static struct mbuf *get_ddp_mbuf(int len);
 
-#if 0
-static void
-t4_dump_tcb(struct adapter *sc, int tid)
-{
-	uint32_t tcb_base, off, i, j;
+#define MAX_DDP_BUFFER_SIZE		(M_TCB_RX_DDP_BUF0_LEN)
 
-	/* Dump TCB for the tid */
-	tcb_base = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
-	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2),
-	    tcb_base + tid * TCB_SIZE);
-	t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2));
-	off = 0;
-	printf("\n");
-	for (i = 0; i < 4; i++) {
-		uint32_t buf[8];
-		for (j = 0; j < 8; j++, off += 4)
-			buf[j] = htonl(t4_read_reg(sc, MEMWIN2_BASE + off));
-
-		printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
-		    buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
-		    buf[7]);
-	}
-}
-#endif
-
-#define MAX_DDP_BUFFER_SIZE		(M_TCB_RX_DDP_BUF0_LEN)
-static int
-alloc_ppods(struct tom_data *td, int n, struct ppod_region *pr)
+static struct ddp_buffer *
+alloc_ddp_buffer(vm_page_t *pages, int npages, int offset, int len)
 {
-	int ppod;
+	struct ddp_buffer *db;
 
-	KASSERT(n > 0, ("%s: nonsense allocation (%d)", __func__, n));
-
-	mtx_lock(&td->ppod_lock);
-	if (n > td->nppods_free) {
-		mtx_unlock(&td->ppod_lock);
-		return (-1);
+	db = malloc(sizeof(*db), M_CXGBE, M_NOWAIT | M_ZERO);
+	if (db == NULL) {
+		CTR1(KTR_CXGBE, "%s: malloc failed.", __func__);
+		return (NULL);
 	}
 
-	if (td->nppods_free_head >= n) {
-		td->nppods_free_head -= n;
-		ppod = td->nppods_free_head;
-		TAILQ_INSERT_HEAD(&td->ppods, pr, link);
-	} else {
-		struct ppod_region *p;
+	db->npages = npages;
+	db->pages = pages;
+	db->offset = offset;
+	db->len = len;
 
-		ppod = td->nppods_free_head;
-		TAILQ_FOREACH(p, &td->ppods, link) {
-			ppod += p->used + p->free;
-			if (n <= p->free) {
-				ppod -= n;
-				p->free -= n;
-				TAILQ_INSERT_AFTER(&td->ppods, p, pr, link);
-				goto allocated;
-			}
-		}
-
-		if (__predict_false(ppod != td->nppods)) {
-			panic("%s: ppods TAILQ (%p) corrupt."
-			    "  At %d instead of %d at the end of the queue.",
-			    __func__, &td->ppods, ppod, td->nppods);
-		}
-
-		mtx_unlock(&td->ppod_lock);
-		return (-1);
-	}
-
-allocated:
-	pr->used = n;
-	pr->free = 0;
-	td->nppods_free -= n;
-	mtx_unlock(&td->ppod_lock);
-
-	return (ppod);
+	return (db);
 }
 
 static void
-free_ppods(struct tom_data *td, struct ppod_region *pr)
+free_ddp_buffer(struct ddp_buffer *db)
 {
-	struct ppod_region *p;
 
-	KASSERT(pr->used > 0, ("%s: nonsense free (%d)", __func__, pr->used));
-
-	mtx_lock(&td->ppod_lock);
-	p = TAILQ_PREV(pr, ppod_head, link);
-	if (p != NULL)
-		p->free += pr->used + pr->free;
-	else
-		td->nppods_free_head += pr->used + pr->free;
-	td->nppods_free += pr->used;
-	KASSERT(td->nppods_free <= td->nppods,
-	    ("%s: nppods_free (%d) > nppods (%d).  %d freed this time.",
-	    __func__, td->nppods_free, td->nppods, pr->used));
-	TAILQ_REMOVE(&td->ppods, pr, link);
-	mtx_unlock(&td->ppod_lock);
-}
-
-static inline int
-pages_to_nppods(int npages, int ddp_pgsz)
-{
-	int nsegs = npages * PAGE_SIZE / ddp_pgsz;
-
-	return (howmany(nsegs, PPOD_PAGES));
-}
-
-static void
-free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
-{
-
 	if (db == NULL)
 		return;
 
@@ -186,8 +106,8 @@
 	if (db->pages)
 		free(db->pages, M_CXGBE);
 
-	if (db->nppods > 0)
-		free_ppods(td, &db->ppod_region);
+	if (db->prsv.prsv_nppods > 0)
+		t4_free_page_pods(&db->prsv);
 
 	free(db, M_CXGBE);
 }
@@ -199,7 +119,7 @@
 
 	for (i = 0; i < nitems(toep->db); i++) {
 		if (toep->db[i] != NULL) {
-			free_ddp_buffer(toep->td, toep->db[i]);
+			free_ddp_buffer(toep->db[i]);
 			toep->db[i] = NULL;
 		}
 	}
@@ -217,13 +137,7 @@
 	INP_WLOCK_ASSERT(inp);
 	SOCKBUF_LOCK_ASSERT(sb);
 
-	m = m_get(M_NOWAIT, MT_DATA);
-	if (m == NULL)
-		CXGBE_UNIMPLEMENTED("mbuf alloc failure");
-	m->m_len = n;
-	m->m_flags |= M_DDP;	/* Data is already where it should be */
-	m->m_data = "nothing to see here";
-
+	m = get_ddp_mbuf(n);
 	tp->rcv_nxt += n;
 #ifndef USE_DDP_RX_FLOW_CONTROL
 	KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
@@ -358,8 +272,8 @@
 	 * The ULPTX master commands that follow must all end at 16B boundaries
 	 * too so we round up the size to 16.
 	 */
-	len = sizeof(*wrh) + 3 * roundup(LEN__SET_TCB_FIELD_ULP, 16) +
-	    roundup(LEN__RX_DATA_ACK_ULP, 16);
+	len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
+	    roundup2(LEN__RX_DATA_ACK_ULP, 16);
 
 	wr = alloc_wrqe(len, toep->ctrlq);
 	if (wr == NULL)
@@ -372,7 +286,7 @@
 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
 	    W_TCB_RX_DDP_BUF0_TAG + db_idx,
 	    V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
-	    V_TCB_RX_DDP_BUF0_TAG(db->tag));
+	    V_TCB_RX_DDP_BUF0_TAG(db->prsv.prsv_tag));
 
 	/* Update the current offset in the DDP buffer and its total length */
 	if (db_idx == 0)
@@ -450,6 +364,19 @@
 	}
 
 	tp = intotcpcb(inp);
+
+	/*
+	 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
+	 * sequence number of the next byte to receive.  The length of
+	 * the data received for this message must be computed by
+	 * comparing the new and old values of rcv_nxt.
+	 * 
+	 * For RX_DATA_DDP, len might be non-zero, but it is only the
+	 * length of the most recent DMA.  It does not include the
+	 * total length of the data received since the previous update
+	 * for this DDP buffer.  rcv_nxt is the sequence number of the
+	 * first received byte from the most recent DMA.
+	 */
 	len += be32toh(rcv_nxt) - tp->rcv_nxt;
 	tp->rcv_nxt += len;
 	tp->t_rcvtime = ticks;
@@ -457,14 +384,8 @@
 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
 	tp->rcv_wnd -= len;
 #endif
+	m = get_ddp_mbuf(len);
 
-	m = m_get(M_NOWAIT, MT_DATA);
-	if (m == NULL)
-		CXGBE_UNIMPLEMENTED("mbuf alloc failure");
-	m->m_len = len;
-	m->m_flags |= M_DDP;	/* Data is already where it should be */
-	m->m_data = "nothing to see here";
-
 	SOCKBUF_LOCK(sb);
 	if (report & F_DDP_BUF_COMPLETE)
 		toep->ddp_score = DDP_HIGH_SCORE;
@@ -471,6 +392,24 @@
 	else
 		discourage_ddp(toep);
 
+	/* receive buffer autosize */
+	MPASS(toep->vnet == so->so_vnet);
+	CURVNET_SET(toep->vnet);
+	if (sb->sb_flags & SB_AUTOSIZE &&
+	    V_tcp_do_autorcvbuf &&
+	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
+	    len > (sbspace(sb) / 8 * 7)) {
+		unsigned int hiwat = sb->sb_hiwat;
+		unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
+		    V_tcp_autorcvbuf_max);
+
+		if (!sbreserve_locked(sb, newsize, so, NULL))
+			sb->sb_flags &= ~SB_AUTOSIZE;
+		else
+			toep->rx_credits += newsize - hiwat;
+	}
+	CURVNET_RESTORE();
+
 	KASSERT(toep->sb_cc >= sb->sb_cc,
 	    ("%s: sb %p has more data (%d) than last time (%d).",
 	    __func__, sb, sb->sb_cc, toep->sb_cc));
@@ -492,11 +431,44 @@
 	return (0);
 }
 
+void
+handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, struct sockbuf *sb,
+    __be32 rcv_nxt)
+{
+	struct mbuf *m;
+	int len;
+
+	SOCKBUF_LOCK_ASSERT(sb);
+	INP_WLOCK_ASSERT(toep->inp);
+	len = be32toh(rcv_nxt) - tp->rcv_nxt;
+
+	/* Signal handle_ddp() to break out of its sleep loop. */
+	toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
+	if (len == 0)
+		return;
+
+	tp->rcv_nxt += len;
+	KASSERT(toep->sb_cc >= sb->sb_cc,
+	    ("%s: sb %p has more data (%d) than last time (%d).",
+	    __func__, sb, sb->sb_cc, toep->sb_cc));
+	toep->rx_credits += toep->sb_cc - sb->sb_cc;
+#ifdef USE_DDP_RX_FLOW_CONTROL
+	toep->rx_credits -= len;	/* adjust for F_RX_FC_DDP */
+#endif
+
+	m = get_ddp_mbuf(len);
+
+	sbappendstream_locked(sb, m);
+	toep->sb_cc = sb->sb_cc;
+}
+
 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
 	 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
 	 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
 	 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
 
+extern cpl_handler_t t4_cpl_handler[];
+
 static int
 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
 {
@@ -517,6 +489,11 @@
 		    __func__, vld, tid, toep);
 	}
 
+	if (toep->ulp_mode == ULP_MODE_ISCSI) {
+		t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
+		return (0);
+	}
+
 	handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
 
 	return (0);
@@ -553,13 +530,14 @@
 	    __func__, toep->tid, time_uptime);
 
 	toep->ddp_flags |= DDP_SC_REQ;
-	t4_set_tcb_field(sc, toep, W_TCB_RX_DDP_FLAGS,
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
 	    V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
 	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
 	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
-	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1));
-	t4_set_tcb_field(sc, toep, W_TCB_T_FLAGS,
-	    V_TF_RCV_COALESCE_ENABLE(1), 0);
+	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0,
+	    toep->ofld_rxq->iq.abs_id);
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+	    V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0, toep->ofld_rxq->iq.abs_id);
 }
 
 static inline void
@@ -574,10 +552,11 @@
 	    __func__, toep->tid, time_uptime);
 
 	toep->ddp_flags |= DDP_SC_REQ;
-	t4_set_tcb_field(sc, toep, W_TCB_T_FLAGS,
-	    V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1));
-	t4_set_tcb_field(sc, toep, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1),
-	    V_TF_DDP_OFF(1));
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+	    V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1), 0, 0,
+	    toep->ofld_rxq->iq.abs_id);
+	t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_RX_DDP_FLAGS,
+	    V_TF_DDP_OFF(1), V_TF_DDP_OFF(1), 0, 0, toep->ofld_rxq->iq.abs_id);
 }
 
 static int
@@ -659,14 +638,54 @@
 	return (b);
 }
 
-static struct ddp_buffer *
-alloc_ddp_buffer(struct tom_data *td, vm_page_t *pages, int npages, int offset,
-    int len)
+static inline int
+pages_to_nppods(int npages, int ddp_page_shift)
 {
-	int i, hcf, seglen, idx, ppod, nppods;
-	struct ddp_buffer *db;
 
+	MPASS(ddp_page_shift >= PAGE_SHIFT);
+
+	return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES));
+}
+
+static int
+alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx,
+    struct ppod_reservation *prsv)
+{
+	vmem_addr_t addr;       /* relative to start of region */
+
+	if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT,
+	    &addr) != 0)
+		return (ENOMEM);
+
+	CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d",
+	    __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask,
+	    nppods, 1 << pr->pr_page_shift[pgsz_idx]);
+
 	/*
+	 * The hardware tagmask includes an extra invalid bit but the arena was
+	 * seeded with valid values only.  An allocation out of this arena will
+	 * fit inside the tagmask but won't have the invalid bit set.
+	 */
+	MPASS((addr & pr->pr_tag_mask) == addr);
+	MPASS((addr & pr->pr_invalid_bit) == 0);
+
+	prsv->prsv_pr = pr;
+	prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr;
+	prsv->prsv_nppods = nppods;
+
+	return (0);
+}
+
+int
+t4_alloc_page_pods_for_db(struct ppod_region *pr, struct ddp_buffer *db)
+{
+	int i, hcf, seglen, idx, nppods;
+	struct ppod_reservation *prsv = &db->prsv;
+
+	KASSERT(prsv->prsv_nppods == 0,
+	    ("%s: page pods already allocated", __func__));
+
+	/*
 	 * The DDP page size is unrelated to the VM page size.  We combine
 	 * contiguous physical pages into larger segments to get the best DDP
 	 * page size possible.  This is the largest of the four sizes in
@@ -674,97 +693,157 @@
 	 * the page list.
 	 */
 	hcf = 0;
-	for (i = 0; i < npages; i++) {
+	for (i = 0; i < db->npages; i++) {
 		seglen = PAGE_SIZE;
-		while (i < npages - 1 &&
-		    pages[i]->phys_addr + PAGE_SIZE == pages[i + 1]->phys_addr) {
+		while (i < db->npages - 1 &&
+		    db->pages[i]->phys_addr + PAGE_SIZE ==
+		    db->pages[i + 1]->phys_addr) {
 			seglen += PAGE_SIZE;
 			i++;
 		}
 
 		hcf = calculate_hcf(hcf, seglen);
-		if (hcf < t4_ddp_pgsz[1]) {
+		if (hcf < (1 << pr->pr_page_shift[1])) {
 			idx = 0;
 			goto have_pgsz;	/* give up, short circuit */
 		}
 	}
 
-	if (hcf % t4_ddp_pgsz[0] != 0) {
-		/* hmmm.  This could only happen when PAGE_SIZE < 4K */
-		KASSERT(PAGE_SIZE < 4096,
-		    ("%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf));
-		CTR3(KTR_CXGBE, "%s: PAGE_SIZE %d, hcf %d",
-		    __func__, PAGE_SIZE, hcf);
-		return (NULL);
+#define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
+	MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
+	for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
+		if ((hcf & PR_PAGE_MASK(idx)) == 0)
+			break;
 	}
+#undef PR_PAGE_MASK
 
-	for (idx = nitems(t4_ddp_pgsz) - 1; idx > 0; idx--) {
-		if (hcf % t4_ddp_pgsz[idx] == 0)
-			break;
-	}
 have_pgsz:
+	MPASS(idx <= M_PPOD_PGSZ);
 
-	db = malloc(sizeof(*db), M_CXGBE, M_NOWAIT);
-	if (db == NULL) {
-		CTR1(KTR_CXGBE, "%s: malloc failed.", __func__);
-		return (NULL);
+	nppods = pages_to_nppods(db->npages, pr->pr_page_shift[idx]);
+	if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
+		return (0);
+	MPASS(prsv->prsv_nppods > 0);
+
+	return (1);
+}
+
+int
+t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len,
+    struct ppod_reservation *prsv)
+{
+	int hcf, seglen, idx, npages, nppods;
+	uintptr_t start_pva, end_pva, pva, p1;
+
+	MPASS(buf > 0);
+	MPASS(len > 0);
+
+	/*
+	 * The DDP page size is unrelated to the VM page size.  We combine
+	 * contiguous physical pages into larger segments to get the best DDP
+	 * page size possible.  This is the largest of the four sizes in
+	 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
+	 * in the page list.
+	 */
+	hcf = 0;
+	start_pva = trunc_page(buf);
+	end_pva = trunc_page(buf + len - 1);
+	pva = start_pva;
+	while (pva <= end_pva) {
+		seglen = PAGE_SIZE;
+		p1 = pmap_kextract(pva);
+		pva += PAGE_SIZE;
+		while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) {
+			seglen += PAGE_SIZE;
+			pva += PAGE_SIZE;
+		}
+
+		hcf = calculate_hcf(hcf, seglen);
+		if (hcf < (1 << pr->pr_page_shift[1])) {
+			idx = 0;
+			goto have_pgsz;	/* give up, short circuit */
+		}
 	}
 
-	nppods = pages_to_nppods(npages, t4_ddp_pgsz[idx]);
-	ppod = alloc_ppods(td, nppods, &db->ppod_region);
-	if (ppod < 0) {
-		free(db, M_CXGBE);
-		CTR4(KTR_CXGBE, "%s: no pods, nppods %d, resid %d, pgsz %d",
-		    __func__, nppods, len, t4_ddp_pgsz[idx]);
-		return (NULL);
+#define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
+	MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
+	for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
+		if ((hcf & PR_PAGE_MASK(idx)) == 0)
+			break;
 	}
+#undef PR_PAGE_MASK
 
-	KASSERT(idx <= M_PPOD_PGSZ && ppod <= M_PPOD_TAG,
-	    ("%s: DDP pgsz_idx = %d, ppod = %d", __func__, idx, ppod));
+have_pgsz:
+	MPASS(idx <= M_PPOD_PGSZ);
 
-	db->tag = V_PPOD_PGSZ(idx) | V_PPOD_TAG(ppod);
-	db->nppods = nppods;
-	db->npages = npages;
-	db->pages = pages;
-	db->offset = offset;
-	db->len = len;
+	npages = 1;
+	npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
+	nppods = howmany(npages, PPOD_PAGES);
+	if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
+		return (ENOMEM);
+	MPASS(prsv->prsv_nppods > 0);
 
-	CTR6(KTR_CXGBE, "New DDP buffer.  "
-	    "ddp_pgsz %d, ppod 0x%x, npages %d, nppods %d, offset %d, len %d",
-	    t4_ddp_pgsz[idx], ppod, db->npages, db->nppods, db->offset,
-	    db->len);
+	return (0);
+}
 
-	return (db);
+void
+t4_free_page_pods(struct ppod_reservation *prsv)
+{
+	struct ppod_region *pr = prsv->prsv_pr;
+	vmem_addr_t addr;
+
+	MPASS(prsv != NULL);
+	MPASS(prsv->prsv_nppods != 0);
+
+	addr = prsv->prsv_tag & pr->pr_tag_mask;
+	MPASS((addr & pr->pr_invalid_bit) == 0);
+
+	CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__,
+	    pr->pr_arena, addr, prsv->prsv_nppods);
+
+	vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods));
+	prsv->prsv_nppods = 0;
 }
 
 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
 
-static int
-write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db)
+int
+t4_write_page_pods_for_db(struct adapter *sc, struct sge_wrq *wrq, int tid,
+    struct ddp_buffer *db)
 {
 	struct wrqe *wr;
 	struct ulp_mem_io *ulpmc;
 	struct ulptx_idata *ulpsc;
 	struct pagepod *ppod;
-	int i, j, k, n, chunk, len, ddp_pgsz, idx, ppod_addr;
+	int i, j, k, n, chunk, len, ddp_pgsz, idx;
+	u_int ppod_addr;
+	uint32_t cmd;
+	struct ppod_reservation *prsv = &db->prsv;
+	struct ppod_region *pr = prsv->prsv_pr;
 
-	ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)];
-	ppod_addr = sc->vres.ddp.start + G_PPOD_TAG(db->tag) * PPOD_SIZE;
-	for (i = 0; i < db->nppods; ppod_addr += chunk) {
+	MPASS(prsv->prsv_nppods > 0);
 
+	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
+	if (is_t4(sc))
+		cmd |= htobe32(F_ULP_MEMIO_ORDER);
+	else
+		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
+	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
+	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
+	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
+
 		/* How many page pods are we writing in this cycle */
-		n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
+		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
 		chunk = PPOD_SZ(n);
-		len = roundup(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
+		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
 
-		wr = alloc_wrqe(len, toep->ctrlq);
+		wr = alloc_wrqe(len, wrq);
 		if (wr == NULL)
 			return (ENOMEM);	/* ok to just bail out */
 		ulpmc = wrtod(wr);
 
 		INIT_ULPTX_WR(ulpmc, len, 0, 0);
-		ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
-		    F_ULP_MEMIO_ORDER);
+		ulpmc->cmd = cmd;
 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -776,7 +855,7 @@
 		ppod = (struct pagepod *)(ulpsc + 1);
 		for (j = 0; j < n; i++, j++, ppod++) {
 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
-			    V_PPOD_TID(toep->tid) | db->tag);
+			    V_PPOD_TID(tid) | prsv->prsv_tag);
 			ppod->len_offset = htobe64(V_PPOD_LEN(db->len) |
 			    V_PPOD_OFST(db->offset));
 			ppod->rsvd = 0;
@@ -804,6 +883,94 @@
 	return (0);
 }
 
+int
+t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid,
+    struct ppod_reservation *prsv, vm_offset_t buf, int buflen)
+{
+	struct wrqe *wr;
+	struct ulp_mem_io *ulpmc;
+	struct ulptx_idata *ulpsc;
+	struct pagepod *ppod;
+	int i, j, k, n, chunk, len, ddp_pgsz;
+	u_int ppod_addr, offset;
+	uint32_t cmd;
+	struct ppod_region *pr = prsv->prsv_pr;
+	uintptr_t end_pva, pva, pa;
+
+	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
+	if (is_t4(sc))
+		cmd |= htobe32(F_ULP_MEMIO_ORDER);
+	else
+		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
+	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
+	offset = buf & PAGE_MASK;
+	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
+	pva = trunc_page(buf);
+	end_pva = trunc_page(buf + buflen - 1);
+	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
+
+		/* How many page pods are we writing in this cycle */
+		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
+		MPASS(n > 0);
+		chunk = PPOD_SZ(n);
+		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
+
+		wr = alloc_wrqe(len, wrq);
+		if (wr == NULL)
+			return (ENOMEM);	/* ok to just bail out */
+		ulpmc = wrtod(wr);
+
+		INIT_ULPTX_WR(ulpmc, len, 0, 0);
+		ulpmc->cmd = cmd;
+		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
+		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
+
+		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+		ulpsc->len = htobe32(chunk);
+
+		ppod = (struct pagepod *)(ulpsc + 1);
+		for (j = 0; j < n; i++, j++, ppod++) {
+			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
+			    V_PPOD_TID(tid) |
+			    (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
+			ppod->len_offset = htobe64(V_PPOD_LEN(buflen) |
+			    V_PPOD_OFST(offset));
+			ppod->rsvd = 0;
+
+			for (k = 0; k < nitems(ppod->addr); k++) {
+				if (pva > end_pva)
+					ppod->addr[k] = 0;
+				else {
+					pa = pmap_kextract(pva);
+					ppod->addr[k] = htobe64(pa);
+					pva += ddp_pgsz;
+				}
+#if 0
+				CTR5(KTR_CXGBE,
+				    "%s: tid %d ppod[%d]->addr[%d] = %p",
+				    __func__, tid, i, k,
+				    htobe64(ppod->addr[k]));
+#endif
+			}
+
+			/*
+			 * Walk back 1 segment so that the first address in the
+			 * next pod is the same as the last one in the current
+			 * pod.
+			 */
+			pva -= ddp_pgsz;
+		}
+
+		t4_wrq_tx(sc, wr);
+	}
+
+	MPASS(pva <= end_pva);
+
+	return (0);
+}
+
 /*
  * Reuse, or allocate (and program the page pods for) a new DDP buffer.  The
  * "pages" array is handed over to this function and should not be used in any
@@ -827,27 +994,32 @@
 	}
 
 	/* Allocate new buffer, write its page pods. */
-	db = alloc_ddp_buffer(td, pages, npages, db_off, db_len);
+	db = alloc_ddp_buffer(pages, npages, db_off, db_len);
 	if (db == NULL) {
 		vm_page_unhold_pages(pages, npages);
 		free(pages, M_CXGBE);
 		return (-1);
 	}
-	if (write_page_pods(sc, toep, db) != 0) {
+	if (t4_alloc_page_pods_for_db(&td->pr, db)) {
 		vm_page_unhold_pages(pages, npages);
-		free_ddp_buffer(td, db);
+		free_ddp_buffer(db);
 		return (-1);
 	}
+	if (t4_write_page_pods_for_db(sc, toep->ctrlq, toep->tid, db) != 0) {
+		vm_page_unhold_pages(pages, npages);
+		free_ddp_buffer(db);
+		return (-1);
+	}
 
 	i = empty_slot;
 	if (i < 0) {
 		i = arc4random() % nitems(toep->db);
-		free_ddp_buffer(td, toep->db[i]);
+		free_ddp_buffer(toep->db[i]);
 	}
 	toep->db[i] = db;
 
 	CTR5(KTR_CXGBE, "%s: tid %d, DDP buffer[%d] = %p (tag 0x%x)",
-	    __func__, toep->tid, i, db, db->tag);
+	    __func__, toep->tid, i, db, db->prsv.prsv_tag);
 
 	return (i);
 }
@@ -979,31 +1151,52 @@
 	return (0);
 }
 
-void
-t4_init_ddp(struct adapter *sc, struct tom_data *td)
+int
+t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz,
+    const char *name)
 {
-	int nppods = sc->vres.ddp.size / PPOD_SIZE;
+	int i;
 
-	td->nppods = nppods;
-	td->nppods_free = nppods;
-	td->nppods_free_head = nppods;
-	TAILQ_INIT(&td->ppods);
-	mtx_init(&td->ppod_lock, "page pods", NULL, MTX_DEF);
+	MPASS(pr != NULL);
+	MPASS(r->size > 0);
 
-	t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp);
-	t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
+	pr->pr_start = r->start;
+	pr->pr_len = r->size;
+	pr->pr_page_shift[0] = 12 + G_HPZ0(psz);
+	pr->pr_page_shift[1] = 12 + G_HPZ1(psz);
+	pr->pr_page_shift[2] = 12 + G_HPZ2(psz);
+	pr->pr_page_shift[3] = 12 + G_HPZ3(psz);
+
+	/* The SGL -> page pod algorithm requires the sizes to be in order. */
+	for (i = 1; i < nitems(pr->pr_page_shift); i++) {
+		if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1])
+			return (ENXIO);
+	}
+
+	pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG);
+	pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask;
+	if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0)
+		return (ENXIO);
+	pr->pr_alias_shift = fls(pr->pr_tag_mask);
+	pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1);
+
+	pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0,
+	    M_FIRSTFIT | M_NOWAIT);
+	if (pr->pr_arena == NULL)
+		return (ENOMEM);
+
+	return (0);
 }
 
 void
-t4_uninit_ddp(struct adapter *sc __unused, struct tom_data *td)
+t4_free_ppod_region(struct ppod_region *pr)
 {
 
-	KASSERT(td->nppods == td->nppods_free,
-	    ("%s: page pods still in use, nppods = %d, free = %d",
-	    __func__, td->nppods, td->nppods_free));
+	MPASS(pr != NULL);
 
-	if (mtx_initialized(&td->ppod_lock))
-		mtx_destroy(&td->ppod_lock);
+	if (pr->pr_arena)
+		vmem_destroy(pr->pr_arena);
+	bzero(pr, sizeof(*pr));
 }
 
 #define	VNET_SO_ASSERT(so)						\
@@ -1017,6 +1210,29 @@
 	CXGBE_UNIMPLEMENTED(__func__);
 }
 
+static char ddp_magic_str[] = "nothing to see here";
+
+static struct mbuf *
+get_ddp_mbuf(int len)
+{
+	struct mbuf *m;
+
+	m = m_get(M_NOWAIT, MT_DATA);
+	if (m == NULL)
+		CXGBE_UNIMPLEMENTED("mbuf alloc failure");
+	m->m_len = len;
+	m->m_data = &ddp_magic_str[0];
+
+	return (m);
+}
+
+static inline int
+is_ddp_mbuf(struct mbuf *m)
+{
+
+	return (m->m_data == &ddp_magic_str[0]);
+}
+
 /*
  * Copy an mbuf chain into a uio limited by len if set.
  */
@@ -1035,7 +1251,7 @@
 	for (; m != NULL; m = m->m_next) {
 		length = min(m->m_len, total - progress);
 
-		if (m->m_flags & M_DDP) {
+		if (is_ddp_mbuf(m)) {
 			enum uio_seg segflag = uio->uio_segflg;
 
 			uio->uio_segflg	= UIO_NOCOPY;
@@ -1083,9 +1299,9 @@
 
 	/* Prevent other readers from entering the socket. */
 	error = sblock(sb, SBLOCKWAIT(flags));
+	SOCKBUF_LOCK(sb);
 	if (error)
 		goto out;
-	SOCKBUF_LOCK(sb);
 
 	/* Easy one, no space to copyout anything. */
 	if (uio->uio_resid == 0) {
@@ -1145,7 +1361,7 @@
 
 	/* Socket buffer got some data that we shall deliver now. */
 	if (sb->sb_cc > 0 && !(flags & MSG_WAITALL) &&
-	    ((sb->sb_flags & SS_NBIO) ||
+	    ((so->so_state & SS_NBIO) ||
 	     (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
 	     sb->sb_cc >= sb->sb_lowat ||
 	     sb->sb_cc >= uio->uio_resid ||
@@ -1266,4 +1482,20 @@
 	return (error);
 }
 
+int
+t4_ddp_mod_load(void)
+{
+
+	t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
+	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
+	return (0);
+}
+
+void
+t4_ddp_mod_unload(void)
+{
+
+	t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
+	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);
+}
 #endif

Modified: trunk/sys/dev/cxgbe/tom/t4_listen.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_listen.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_listen.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_listen.c 248078 2013-03-09 00:39:54Z marius $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_listen.c 318804 2017-05-24 20:01:12Z np $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -72,7 +73,7 @@
 
 /* lctx services */
 static struct listen_ctx *alloc_lctx(struct adapter *, struct inpcb *,
-    struct port_info *);
+    struct vi_info *);
 static int free_lctx(struct adapter *, struct listen_ctx *);
 static void hold_lctx(struct listen_ctx *);
 static void listen_hash_add(struct adapter *, struct listen_ctx *);
@@ -80,7 +81,7 @@
 static struct listen_ctx *listen_hash_del(struct adapter *, struct inpcb *);
 static struct inpcb *release_lctx(struct adapter *, struct listen_ctx *);
 
-static inline void save_qids_in_mbuf(struct mbuf *, struct port_info *);
+static inline void save_qids_in_mbuf(struct mbuf *, struct vi_info *);
 static inline void get_qids_from_mbuf(struct mbuf *m, int *, int *);
 static void send_reset_synqe(struct toedev *, struct synq_entry *);
 
@@ -125,7 +126,7 @@
 		TAILQ_FOREACH(s, &t->stids, link) {
 			stid += s->used + s->free;
 			f = stid & mask;
-			if (n <= s->free - f) {
+			if (s->free >= n + f) {
 				stid -= n + f;
 				s->free -= n + f;
 				TAILQ_INSERT_AFTER(&t->stids, s, sr, link);
@@ -187,7 +188,7 @@
 }
 
 static struct listen_ctx *
-alloc_lctx(struct adapter *sc, struct inpcb *inp, struct port_info *pi)
+alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi)
 {
 	struct listen_ctx *lctx;
 
@@ -203,12 +204,24 @@
 		return (NULL);
 	}
 
-	lctx->ctrlq = &sc->sge.ctrlq[pi->port_id];
-	lctx->ofld_rxq = &sc->sge.ofld_rxq[pi->first_ofld_rxq];
+	if (inp->inp_vflag & INP_IPV6 &&
+	    !IN6_ARE_ADDR_EQUAL(&in6addr_any, &inp->in6p_laddr)) {
+		struct tom_data *td = sc->tom_softc;
+
+		lctx->ce = hold_lip(td, &inp->in6p_laddr, NULL);
+		if (lctx->ce == NULL) {
+			free(lctx, M_CXGBE);
+			return (NULL);
+		}
+	}
+
+	lctx->ctrlq = &sc->sge.ctrlq[vi->pi->port_id];
+	lctx->ofld_rxq = &sc->sge.ofld_rxq[vi->first_ofld_rxq];
 	refcount_init(&lctx->refcount, 1);
 	TAILQ_INIT(&lctx->synq);
 
 	lctx->inp = inp;
+	lctx->vnet = inp->inp_socket->so_vnet;
 	in_pcbref(inp);
 
 	return (lctx);
@@ -219,6 +232,7 @@
 free_lctx(struct adapter *sc, struct listen_ctx *lctx)
 {
 	struct inpcb *inp = lctx->inp;
+	struct tom_data *td = sc->tom_softc;
 
 	INP_WLOCK_ASSERT(inp);
 	KASSERT(lctx->refcount == 0,
@@ -230,6 +244,8 @@
 	CTR4(KTR_CXGBE, "%s: stid %u, lctx %p, inp %p",
 	    __func__, lctx->stid, lctx, lctx->inp);
 
+	if (lctx->ce)
+		release_lip(td, lctx->ce);
 	free_stid(sc, lctx);
 	free(lctx, M_CXGBE);
 
@@ -332,7 +348,8 @@
 	struct adapter *sc = tod->tod_softc;
 	struct mbuf *m = synqe->syn;
 	struct ifnet *ifp = m->m_pkthdr.rcvif;
-	struct port_info *pi = ifp->if_softc;
+	struct vi_info *vi = ifp->if_softc;
+	struct port_info *pi = vi->pi;
 	struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
 	struct wrqe *wr;
 	struct fw_flowc_wr *flowc;
@@ -341,7 +358,7 @@
 	struct sge_wrq *ofld_txq;
 	struct sge_ofld_rxq *ofld_rxq;
 	const int nparams = 6;
-	unsigned int pfvf = G_FW_VIID_PFN(pi->viid) << S_FW_VIID_PFN;
+	unsigned int pfvf = G_FW_VIID_PFN(vi->viid) << S_FW_VIID_PFN;
 
 	INP_WLOCK_ASSERT(synqe->lctx->inp);
 
@@ -360,13 +377,13 @@
 	/* The wrqe will have two WRs - a flowc followed by an abort_req */
 	flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
 
-	wr = alloc_wrqe(roundup(flowclen, EQ_ESIZE) + sizeof(*req), ofld_txq);
+	wr = alloc_wrqe(roundup2(flowclen, EQ_ESIZE) + sizeof(*req), ofld_txq);
 	if (wr == NULL) {
 		/* XXX */
 		panic("%s: allocation failure.", __func__);
 	}
 	flowc = wrtod(wr);
-	req = (void *)((caddr_t)flowc + roundup(flowclen, EQ_ESIZE));
+	req = (void *)((caddr_t)flowc + roundup2(flowclen, EQ_ESIZE));
 
 	/* First the flowc ... */
 	memset(flowc, 0, wr->wr_len);
@@ -481,7 +498,7 @@
 /*
  * Start a listening server by sending a passive open request to HW.
  *
- * Can't take adapter lock here and access to sc->flags, sc->open_device_map,
+ * Can't take adapter lock here and access to sc->flags,
  * sc->offload_map, if_capenable are all race prone.
  */
 int
@@ -488,13 +505,20 @@
 t4_listen_start(struct toedev *tod, struct tcpcb *tp)
 {
 	struct adapter *sc = tod->tod_softc;
+	struct vi_info *vi;
 	struct port_info *pi;
 	struct inpcb *inp = tp->t_inpcb;
 	struct listen_ctx *lctx;
-	int i, rc;
+	int i, rc, v;
 
 	INP_WLOCK_ASSERT(inp);
 
+	/* Don't start a hardware listener for any loopback address. */
+	if (inp->inp_vflag & INP_IPV6 && IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr))
+		return (0);
+	if (!(inp->inp_vflag & INP_IPV6) &&
+	    IN_LOOPBACK(ntohl(inp->inp_laddr.s_addr)))
+		return (0);
 #if 0
 	ADAPTER_LOCK(sc);
 	if (IS_BUSY(sc)) {
@@ -503,16 +527,13 @@
 		goto done;
 	}
 
-	KASSERT(sc->flags & TOM_INIT_DONE,
+	KASSERT(uld_active(sc, ULD_TOM),
 	    ("%s: TOM not initialized", __func__));
 #endif
 
-	if ((sc->open_device_map & sc->offload_map) == 0)
-		goto done;	/* no port that's UP with IFCAP_TOE enabled */
-
 	/*
-	 * Find a running port with IFCAP_TOE (4 or 6).  We'll use the first
-	 * such port's queues to send the passive open and receive the reply to
+	 * Find an initialized VI with IFCAP_TOE (4 or 6).  We'll use the first
+	 * such VI's queues to send the passive open and receive the reply to
 	 * it.
 	 *
 	 * XXX: need a way to mark a port in use by offload.  if_cxgbe should
@@ -520,18 +541,20 @@
 	 * attempts to disable IFCAP_TOE on that port too?).
 	 */
 	for_each_port(sc, i) {
-		if (isset(&sc->open_device_map, i) &&
-		    sc->port[i]->ifp->if_capenable & IFCAP_TOE)
-				break;
+		pi = sc->port[i];
+		for_each_vi(pi, v, vi) {
+			if (vi->flags & VI_INIT_DONE &&
+			    vi->ifp->if_capenable & IFCAP_TOE)
+				goto found;
+		}
 	}
-	KASSERT(i < sc->params.nports,
-	    ("%s: no running port with TOE capability enabled.", __func__));
-	pi = sc->port[i];
+	goto done;	/* no port that's UP with IFCAP_TOE enabled */
+found:
 
 	if (listen_hash_find(sc, inp) != NULL)
 		goto done;	/* already setup */
 
-	lctx = alloc_lctx(sc, inp, pi);
+	lctx = alloc_lctx(sc, inp, vi);
 	if (lctx == NULL) {
 		log(LOG_ERR,
 		    "%s: listen request ignored, %s couldn't allocate lctx\n",
@@ -674,6 +697,12 @@
 	synqe->iss = be32toh(th->th_seq);
 	synqe->ts = to.to_tsval;
 
+	if (chip_id(sc) >= CHELSIO_T5) {
+		struct cpl_t5_pass_accept_rpl *rpl5 = wrtod(wr);
+
+		rpl5->iss = th->th_seq;
+	}
+
 	e = &sc->l2t->l2tab[synqe->l2e_idx];
 	t4_l2t_send(sc, wr, e);
 
@@ -796,17 +825,19 @@
 {
 	struct listen_ctx *lctx = synqe->lctx;
 	struct inpcb *inp = lctx->inp;
-	struct port_info *pi = synqe->syn->m_pkthdr.rcvif->if_softc;
+	struct vi_info *vi = synqe->syn->m_pkthdr.rcvif->if_softc;
 	struct l2t_entry *e = &sc->l2t->l2tab[synqe->l2e_idx];
+	int ntids;
 
 	INP_WLOCK_ASSERT(inp);
+	ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1;
 
 	TAILQ_REMOVE(&lctx->synq, synqe, link);
 	inp = release_lctx(sc, lctx);
 	if (inp)
 		INP_WUNLOCK(inp);
-	remove_tid(sc, synqe->tid);
-	release_tid(sc, synqe->tid, &sc->sge.ctrlq[pi->port_id]);
+	remove_tid(sc, synqe->tid, ntids);
+	release_tid(sc, synqe->tid, &sc->sge.ctrlq[vi->pi->port_id]);
 	t4_l2t_release(e);
 	release_synqe(synqe);	/* removed from synq list */
 }
@@ -904,7 +935,7 @@
 	struct cpl_pass_establish *cpl = mtod(synqe->syn, void *);
 	struct toepcb *toep = *(struct toepcb **)(cpl + 1);
 
-	INP_INFO_LOCK_ASSERT(&V_tcbinfo); /* prevents bad race with accept() */
+	INP_INFO_RLOCK_ASSERT(&V_tcbinfo); /* prevents bad race with accept() */
 	INP_WLOCK_ASSERT(inp);
 	KASSERT(synqe->flags & TPF_SYNQE,
 	    ("%s: %p not a synq_entry?", __func__, arg));
@@ -917,12 +948,12 @@
 }
 
 static inline void
-save_qids_in_mbuf(struct mbuf *m, struct port_info *pi)
+save_qids_in_mbuf(struct mbuf *m, struct vi_info *vi)
 {
 	uint32_t txqid, rxqid;
 
-	txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
-	rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
+	txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq;
+	rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq;
 
 	m->m_pkthdr.flowid = (txqid << 16) | (rxqid & 0xffff);
 }
@@ -944,7 +975,7 @@
 static struct synq_entry *
 mbuf_to_synqe(struct mbuf *m)
 {
-	int len = roundup(sizeof (struct synq_entry), 8);
+	int len = roundup2(sizeof (struct synq_entry), 8);
 	int tspace = M_TRAILINGSPACE(m);
 	struct synq_entry *synqe = NULL;
 
@@ -990,15 +1021,18 @@
 calc_opt2p(struct adapter *sc, struct port_info *pi, int rxqid,
     const struct tcp_options *tcpopt, struct tcphdr *th, int ulp_mode)
 {
-	uint32_t opt2 = 0;
 	struct sge_ofld_rxq *ofld_rxq = &sc->sge.ofld_rxq[rxqid];
+	uint32_t opt2;
 
+	opt2 = V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]) |
+	    F_RSS_QUEUE_VALID | V_RSS_QUEUE(ofld_rxq->iq.abs_id);
+
 	if (V_tcp_do_rfc1323) {
 		if (tcpopt->tstamp)
 			opt2 |= F_TSTAMPS_EN;
 		if (tcpopt->sack)
 			opt2 |= F_SACK_EN;
-		if (tcpopt->wsf > 0)
+		if (tcpopt->wsf <= 14)
 			opt2 |= F_WND_SCALE_EN;
 	}
 
@@ -1005,9 +1039,15 @@
 	if (V_tcp_do_ecn && th->th_flags & (TH_ECE | TH_CWR))
 		opt2 |= F_CCTRL_ECN;
 
-	opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]);
-	opt2 |= F_RX_COALESCE_VALID | V_RX_COALESCE(M_RX_COALESCE);
-	opt2 |= F_RSS_QUEUE_VALID | V_RSS_QUEUE(ofld_rxq->iq.abs_id);
+	/* RX_COALESCE is always a valid value (0 or M_RX_COALESCE). */
+	if (is_t4(sc))
+		opt2 |= F_RX_COALESCE_VALID;
+	else {
+		opt2 |= F_T5_OPT_2_VALID;
+		opt2 |= F_CONG_CNTRL_VALID; /* OPT_2_ISS really, for T5 */
+	}
+	if (sc->tt.rx_coalesce)
+		opt2 |= V_RX_COALESCE(M_RX_COALESCE);
 
 #ifdef USE_DDP_RX_FLOW_CONTROL
 	if (ulp_mode == ULP_MODE_TCPDDP)
@@ -1017,20 +1057,9 @@
 	return htobe32(opt2);
 }
 
-/* XXX: duplication. */
-static inline void
-tcp_fields_to_host(struct tcphdr *th)
-{
-
-	th->th_seq = ntohl(th->th_seq);
-	th->th_ack = ntohl(th->th_ack);
-	th->th_win = ntohs(th->th_win);
-	th->th_urp = ntohs(th->th_urp);
-}
-
 static void
-pass_accept_req_to_protohdrs(const struct mbuf *m, struct in_conninfo *inc,
-    struct tcphdr *th)
+pass_accept_req_to_protohdrs(struct adapter *sc, const struct mbuf *m,
+    struct in_conninfo *inc, struct tcphdr *th)
 {
 	const struct cpl_pass_accept_req *cpl = mtod(m, const void *);
 	const struct ether_header *eh;
@@ -1039,8 +1068,13 @@
 	const struct tcphdr *tcp;
 
 	eh = (const void *)(cpl + 1);
-	l3hdr = ((uintptr_t)eh + G_ETH_HDR_LEN(hlen));
-	tcp = (const void *)(l3hdr + G_IP_HDR_LEN(hlen));
+	if (chip_id(sc) >= CHELSIO_T6) {
+		l3hdr = ((uintptr_t)eh + G_T6_ETH_HDR_LEN(hlen));
+		tcp = (const void *)(l3hdr + G_T6_IP_HDR_LEN(hlen));
+	} else {
+		l3hdr = ((uintptr_t)eh + G_ETH_HDR_LEN(hlen));
+		tcp = (const void *)(l3hdr + G_IP_HDR_LEN(hlen));
+	}
 
 	if (inc) {
 		bzero(inc, sizeof(*inc));
@@ -1200,11 +1234,12 @@
 	struct tcphdr th;
 	struct tcpopt to;
 	struct port_info *pi;
+	struct vi_info *vi;
 	struct ifnet *hw_ifp, *ifp;
 	struct l2t_entry *e = NULL;
 	int rscale, mtu_idx, rx_credits, rxqid, ulp_mode;
 	struct synq_entry *synqe = NULL;
-	int reject_reason;
+	int reject_reason, v, ntids;
 	uint16_t vid;
 #ifdef INVARIANTS
 	unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl)));
@@ -1217,11 +1252,32 @@
 	CTR4(KTR_CXGBE, "%s: stid %u, tid %u, lctx %p", __func__, stid, tid,
 	    lctx);
 
-	pass_accept_req_to_protohdrs(m, &inc, &th);
+	pass_accept_req_to_protohdrs(sc, m, &inc, &th);
 	t4opt_to_tcpopt(&cpl->tcpopt, &to);
 
 	pi = sc->port[G_SYN_INTF(be16toh(cpl->l2info))];
-	hw_ifp = pi->ifp;	/* the cxgbeX ifnet */
+
+	CURVNET_SET(lctx->vnet);
+
+	/*
+	 * Use the MAC index to lookup the associated VI.  If this SYN
+	 * didn't match a perfect MAC filter, punt.
+	 */
+	if (!(be16toh(cpl->l2info) & F_SYN_XACT_MATCH)) {
+		m_freem(m);
+		m = NULL;
+		REJECT_PASS_ACCEPT();
+	}
+	for_each_vi(pi, v, vi) {
+		if (vi->xact_addr_filt == G_SYN_MAC_IDX(be16toh(cpl->l2info)))
+			goto found;
+	}
+	m_freem(m);
+	m = NULL;
+	REJECT_PASS_ACCEPT();
+
+found:
+	hw_ifp = vi->ifp;	/* the (v)cxgbeX ifnet */
 	m->m_pkthdr.rcvif = hw_ifp;
 	tod = TOEDEV(hw_ifp);
 
@@ -1259,6 +1315,8 @@
 		 */
 		if (!ifnet_has_ip6(ifp, &inc.inc6_laddr))
 			REJECT_PASS_ACCEPT();
+
+		ntids = 2;
 	} else {
 
 		/* Don't offload if the ifcap isn't enabled */
@@ -1271,8 +1329,17 @@
 		 */
 		if (!ifnet_has_ip(ifp, inc.inc_laddr))
 			REJECT_PASS_ACCEPT();
+
+		ntids = 1;
 	}
 
+	/*
+	 * Don't offload if the ifnet that the SYN came in on is not in the same
+	 * vnet as the listening socket.
+	 */
+	if (lctx->vnet != ifp->if_vnet)
+		REJECT_PASS_ACCEPT();
+
 	e = get_l2te_for_nexthop(pi, ifp, &inc);
 	if (e == NULL)
 		REJECT_PASS_ACCEPT();
@@ -1281,19 +1348,21 @@
 	if (synqe == NULL)
 		REJECT_PASS_ACCEPT();
 
-	wr = alloc_wrqe(sizeof(*rpl), &sc->sge.ctrlq[pi->port_id]);
+	wr = alloc_wrqe(is_t4(sc) ? sizeof(struct cpl_pass_accept_rpl) :
+	    sizeof(struct cpl_t5_pass_accept_rpl), &sc->sge.ctrlq[pi->port_id]);
 	if (wr == NULL)
 		REJECT_PASS_ACCEPT();
 	rpl = wrtod(wr);
 
-	INP_INFO_WLOCK(&V_tcbinfo);	/* for 4-tuple check, syncache_add */
+	INP_INFO_RLOCK(&V_tcbinfo);	/* for 4-tuple check */
 
 	/* Don't offload if the 4-tuple is already in use */
 	if (toe_4tuple_check(&inc, &th, ifp) != 0) {
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
 		free(wr, M_CXGBE);
 		REJECT_PASS_ACCEPT();
 	}
+	INP_INFO_RUNLOCK(&V_tcbinfo);
 
 	inp = lctx->inp;		/* listening socket, not owned by TOE */
 	INP_WLOCK(inp);
@@ -1306,7 +1375,6 @@
 		 * resources tied to this listen context.
 		 */
 		INP_WUNLOCK(inp);
-		INP_INFO_WUNLOCK(&V_tcbinfo);
 		free(wr, M_CXGBE);
 		REJECT_PASS_ACCEPT();
 	}
@@ -1319,16 +1387,22 @@
 	rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ);
 	SOCKBUF_UNLOCK(&so->so_rcv);
 
-	save_qids_in_mbuf(m, pi);
+	save_qids_in_mbuf(m, vi);
 	get_qids_from_mbuf(m, NULL, &rxqid);
 
-	INIT_TP_WR_MIT_CPL(rpl, CPL_PASS_ACCEPT_RPL, tid);
+	if (is_t4(sc))
+		INIT_TP_WR_MIT_CPL(rpl, CPL_PASS_ACCEPT_RPL, tid);
+	else {
+		struct cpl_t5_pass_accept_rpl *rpl5 = (void *)rpl;
+
+		INIT_TP_WR_MIT_CPL(rpl5, CPL_PASS_ACCEPT_RPL, tid);
+	}
 	if (sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0) {
 		ulp_mode = ULP_MODE_TCPDDP;
 		synqe->flags |= TPF_SYNQE_TCPDDP;
 	} else
 		ulp_mode = ULP_MODE_NONE;
-	rpl->opt0 = calc_opt0(so, pi, e, mtu_idx, rscale, rx_credits, ulp_mode);
+	rpl->opt0 = calc_opt0(so, vi, e, mtu_idx, rscale, rx_credits, ulp_mode);
 	rpl->opt2 = calc_opt2p(sc, pi, rxqid, &cpl->tcpopt, &th, ulp_mode);
 
 	synqe->tid = tid;
@@ -1340,19 +1414,17 @@
 	synqe->rcv_bufsize = rx_credits;
 	atomic_store_rel_ptr(&synqe->wr, (uintptr_t)wr);
 
-	insert_tid(sc, tid, synqe);
+	insert_tid(sc, tid, synqe, ntids);
 	TAILQ_INSERT_TAIL(&lctx->synq, synqe, link);
 	hold_synqe(synqe);	/* hold for the duration it's in the synq */
 	hold_lctx(lctx);	/* A synqe on the list has a ref on its lctx */
 
-	/*
+        /*
 	 * If all goes well t4_syncache_respond will get called during
-	 * syncache_add.  Also note that syncache_add releases both pcbinfo and
-	 * pcb locks.
+	 * syncache_add.  Note that syncache_add releases the pcb lock.
 	 */
 	toe_syncache_add(&inc, &to, &th, inp, tod, synqe);
 	INP_UNLOCK_ASSERT(inp);	/* ok to assert, we have a ref on the inp */
-	INP_INFO_UNLOCK_ASSERT(&V_tcbinfo);
 
 	/*
 	 * If we replied during syncache_add (synqe->wr has been consumed),
@@ -1370,7 +1442,7 @@
 		if (m)
 			m->m_pkthdr.rcvif = hw_ifp;
 
-		remove_tid(sc, synqe->tid);
+		remove_tid(sc, synqe->tid, ntids);
 		free(wr, M_CXGBE);
 
 		/* Yank the synqe out of the lctx synq. */
@@ -1402,15 +1474,18 @@
 		if (!(synqe->flags & TPF_SYNQE_EXPANDED))
 			send_reset_synqe(tod, synqe);
 		INP_WUNLOCK(inp);
+		CURVNET_RESTORE();
 
 		release_synqe(synqe);	/* extra hold */
 		return (__LINE__);
 	}
 	INP_WUNLOCK(inp);
+	CURVNET_RESTORE();
 
 	release_synqe(synqe);	/* extra hold */
 	return (0);
 reject:
+	CURVNET_RESTORE();
 	CTR4(KTR_CXGBE, "%s: stid %u, tid %u, REJECT (%d)", __func__, stid, tid,
 	    reject_reason);
 
@@ -1430,7 +1505,7 @@
 }
 
 static void
-synqe_to_protohdrs(struct synq_entry *synqe,
+synqe_to_protohdrs(struct adapter *sc, struct synq_entry *synqe,
     const struct cpl_pass_establish *cpl, struct in_conninfo *inc,
     struct tcphdr *th, struct tcpopt *to)
 {
@@ -1437,7 +1512,7 @@
 	uint16_t tcp_opt = be16toh(cpl->tcp_opt);
 
 	/* start off with the original SYN */
-	pass_accept_req_to_protohdrs(synqe->syn, inc, th);
+	pass_accept_req_to_protohdrs(sc, synqe->syn, inc, th);
 
 	/* modify parts to make it look like the ACK to our SYN|ACK */
 	th->th_flags = TH_ACK;
@@ -1455,7 +1530,7 @@
     struct mbuf *m)
 {
 	struct adapter *sc = iq->adapter;
-	struct port_info *pi;
+	struct vi_info *vi;
 	struct ifnet *ifp;
 	const struct cpl_pass_establish *cpl = (const void *)(rss + 1);
 #if defined(KTR) || defined(INVARIANTS)
@@ -1464,7 +1539,7 @@
 	unsigned int tid = GET_TID(cpl);
 	struct synq_entry *synqe = lookup_tid(sc, tid);
 	struct listen_ctx *lctx = synqe->lctx;
-	struct inpcb *inp = lctx->inp;
+	struct inpcb *inp = lctx->inp, *new_inp;
 	struct socket *so;
 	struct tcphdr th;
 	struct tcpopt to;
@@ -1482,7 +1557,8 @@
 	KASSERT(synqe->flags & TPF_SYNQE,
 	    ("%s: tid %u (ctx %p) not a synqe", __func__, tid, synqe));
 
-	INP_INFO_WLOCK(&V_tcbinfo);	/* for syncache_expand */
+	CURVNET_SET(lctx->vnet);
+	INP_INFO_RLOCK(&V_tcbinfo);	/* for syncache_expand */
 	INP_WLOCK(inp);
 
 	CTR6(KTR_CXGBE,
@@ -1498,14 +1574,15 @@
 		}
 
 		INP_WUNLOCK(inp);
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+		CURVNET_RESTORE();
 		return (0);
 	}
 
 	ifp = synqe->syn->m_pkthdr.rcvif;
-	pi = ifp->if_softc;
-	KASSERT(pi->adapter == sc,
-	    ("%s: pi %p, sc %p mismatch", __func__, pi, sc));
+	vi = ifp->if_softc;
+	KASSERT(vi->pi->adapter == sc,
+	    ("%s: vi %p, sc %p mismatch", __func__, vi, sc));
 
 	get_qids_from_mbuf(synqe->syn, &txqid, &rxqid);
 	KASSERT(rxqid == iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0],
@@ -1512,7 +1589,7 @@
 	    ("%s: CPL arrived on unexpected rxq.  %d %d", __func__, rxqid,
 	    (int)(iq_to_ofld_rxq(iq) - &sc->sge.ofld_rxq[0])));
 
-	toep = alloc_toepcb(pi, txqid, rxqid, M_NOWAIT);
+	toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT);
 	if (toep == NULL) {
 reset:
 		/*
@@ -1523,7 +1600,8 @@
 		 */
 		send_reset_synqe(TOEDEV(ifp), synqe);
 		INP_WUNLOCK(inp);
-		INP_INFO_WUNLOCK(&V_tcbinfo);
+		INP_INFO_RUNLOCK(&V_tcbinfo);
+		CURVNET_RESTORE();
 		return (0);
 	}
 	toep->tid = tid;
@@ -1539,7 +1617,7 @@
 	KASSERT(so != NULL, ("%s: socket is NULL", __func__));
 
 	/* Come up with something that syncache_expand should be ok with. */
-	synqe_to_protohdrs(synqe, cpl, &inc, &th, &to);
+	synqe_to_protohdrs(sc, synqe, cpl, &inc, &th, &to);
 
 	/*
 	 * No more need for anything in the mbuf that carried the
@@ -1557,6 +1635,14 @@
 		goto reset;
 	}
 
+	/* New connection inpcb is already locked by syncache_expand(). */
+	new_inp = sotoinpcb(so);
+	INP_WLOCK_ASSERT(new_inp);
+	MPASS(so->so_vnet == lctx->vnet);
+	toep->vnet = lctx->vnet;
+	if (inc.inc_flags & INC_ISIPV6)
+		toep->ce = hold_lip(sc->tom_softc, &inc.inc6_laddr, lctx->ce);
+
 	/*
 	 * This is for the unlikely case where the syncache entry that we added
 	 * has been evicted from the syncache, but the syncache_expand above
@@ -1567,20 +1653,19 @@
 	 * this somewhat defeats the purpose of having a tod_offload_socket :-(
 	 */
 	if (__predict_false(!(synqe->flags & TPF_SYNQE_EXPANDED))) {
-		struct inpcb *new_inp = sotoinpcb(so);
-
-		INP_WLOCK(new_inp);
 		tcp_timer_activate(intotcpcb(new_inp), TT_KEEP, 0);
 		t4_offload_socket(TOEDEV(ifp), synqe, so);
-		INP_WUNLOCK(new_inp);
 	}
 
+	INP_WUNLOCK(new_inp);
+
 	/* Done with the synqe */
 	TAILQ_REMOVE(&lctx->synq, synqe, link);
 	inp = release_lctx(sc, lctx);
 	if (inp != NULL)
 		INP_WUNLOCK(inp);
-	INP_INFO_WUNLOCK(&V_tcbinfo);
+	INP_INFO_RUNLOCK(&V_tcbinfo);
+	CURVNET_RESTORE();
 	release_synqe(synqe);
 
 	return (0);
@@ -1587,12 +1672,22 @@
 }
 
 void
-t4_init_listen_cpl_handlers(struct adapter *sc)
+t4_init_listen_cpl_handlers(void)
 {
 
-	t4_register_cpl_handler(sc, CPL_PASS_OPEN_RPL, do_pass_open_rpl);
-	t4_register_cpl_handler(sc, CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl);
-	t4_register_cpl_handler(sc, CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
-	t4_register_cpl_handler(sc, CPL_PASS_ESTABLISH, do_pass_establish);
+	t4_register_cpl_handler(CPL_PASS_OPEN_RPL, do_pass_open_rpl);
+	t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl);
+	t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
+	t4_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
 }
+
+void
+t4_uninit_listen_cpl_handlers(void)
+{
+
+	t4_register_cpl_handler(CPL_PASS_OPEN_RPL, NULL);
+	t4_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, NULL);
+	t4_register_cpl_handler(CPL_PASS_ACCEPT_REQ, NULL);
+	t4_register_cpl_handler(CPL_PASS_ESTABLISH, NULL);
+}
 #endif

Modified: trunk/sys/dev/cxgbe/tom/t4_tom.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_tom.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_tom.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_tom.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_tom.c 330303 2018-03-03 00:54:12Z jhb $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -36,11 +37,13 @@
 #include <sys/systm.h>
 #include <sys/kernel.h>
 #include <sys/ktr.h>
+#include <sys/limits.h>
 #include <sys/module.h>
 #include <sys/protosw.h>
 #include <sys/domain.h>
 #include <sys/socket.h>
 #include <sys/socketvar.h>
+#include <sys/taskqueue.h>
 #include <net/if.h>
 #include <netinet/in.h>
 #include <netinet/in_pcb.h>
@@ -51,6 +54,7 @@
 #include <netinet6/scope6_var.h>
 #define TCPSTATES
 #include <netinet/tcp_fsm.h>
+#include <netinet/tcp_timer.h>
 #include <netinet/toecore.h>
 
 #ifdef TCP_OFFLOAD
@@ -57,6 +61,8 @@
 #include "common/common.h"
 #include "common/t4_msg.h"
 #include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_tcb.h"
 #include "tom/t4_tom_l2t.h"
 #include "tom/t4_tom.h"
 
@@ -89,12 +95,54 @@
 static int delete_lip(struct adapter *, struct in6_addr *);
 static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *);
 static void init_clip_table(struct adapter *, struct tom_data *);
+static void update_clip(struct adapter *, void *);
+static void t4_clip_task(void *, int);
+static void update_clip_table(struct adapter *, struct tom_data *);
 static void destroy_clip_table(struct adapter *, struct tom_data *);
 static void free_tom_data(struct adapter *, struct tom_data *);
+static void reclaim_wr_resources(void *, int);
 
+static int in6_ifaddr_gen;
+static eventhandler_tag ifaddr_evhandler;
+static struct timeout_task clip_task;
+
+static void
+mbufq_init(struct mbufq *q, int limit)
+{
+
+	q->head = q->tail = NULL;
+}
+
+static void
+mbufq_drain(struct mbufq *q)
+{
+	struct mbuf *m;
+
+	while ((m = q->head) != NULL) {
+		q->head = m->m_nextpkt;
+		m_freem(m);
+	}
+	q->tail = NULL;
+}
+
+#ifdef INVARIANTS
+static inline int
+mbufq_len(const struct mbufq *q)
+{
+	struct mbuf *m;
+	int len;
+
+	len = 0;
+	for (m = q->head; m != NULL; m = m->m_nextpkt)
+		len++;
+	return (len);
+}
+#endif
+	
 struct toepcb *
-alloc_toepcb(struct port_info *pi, int txqid, int rxqid, int flags)
+alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
 {
+	struct port_info *pi = vi->pi;
 	struct adapter *sc = pi->adapter;
 	struct toepcb *toep;
 	int tx_credits, txsd_total, len;
@@ -113,21 +161,21 @@
 	 * units of 16 byte.  Calculate the maximum work requests possible.
 	 */
 	txsd_total = tx_credits /
-	    howmany((sizeof(struct fw_ofld_tx_data_wr) + 1), 16);
+	    howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16);
 
 	if (txqid < 0)
-		txqid = (arc4random() % pi->nofldtxq) + pi->first_ofld_txq;
-	KASSERT(txqid >= pi->first_ofld_txq &&
-	    txqid < pi->first_ofld_txq + pi->nofldtxq,
-	    ("%s: txqid %d for port %p (first %d, n %d)", __func__, txqid, pi,
-		pi->first_ofld_txq, pi->nofldtxq));
+		txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq;
+	KASSERT(txqid >= vi->first_ofld_txq &&
+	    txqid < vi->first_ofld_txq + vi->nofldtxq,
+	    ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi,
+		vi->first_ofld_txq, vi->nofldtxq));
 
 	if (rxqid < 0)
-		rxqid = (arc4random() % pi->nofldrxq) + pi->first_ofld_rxq;
-	KASSERT(rxqid >= pi->first_ofld_rxq &&
-	    rxqid < pi->first_ofld_rxq + pi->nofldrxq,
-	    ("%s: rxqid %d for port %p (first %d, n %d)", __func__, rxqid, pi,
-		pi->first_ofld_rxq, pi->nofldrxq));
+		rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq;
+	KASSERT(rxqid >= vi->first_ofld_rxq &&
+	    rxqid < vi->first_ofld_rxq + vi->nofldrxq,
+	    ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi,
+		vi->first_ofld_rxq, vi->nofldrxq));
 
 	len = offsetof(struct toepcb, txsd) +
 	    txsd_total * sizeof(struct ofld_tx_sdesc);
@@ -137,11 +185,14 @@
 		return (NULL);
 
 	toep->td = sc->tom_softc;
-	toep->port = pi;
+	toep->vi = vi;
+	toep->tx_total = tx_credits;
 	toep->tx_credits = tx_credits;
 	toep->ofld_txq = &sc->sge.ofld_txq[txqid];
 	toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid];
 	toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
+	mbufq_init(&toep->ulp_pduq, INT_MAX);
+	mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX);
 	toep->txsd_total = txsd_total;
 	toep->txsd_avail = txsd_total;
 	toep->txsd_pidx = 0;
@@ -257,6 +308,14 @@
 	CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
 	    __func__, toep, tid, toep->l2te, toep->ce);
 
+	/*
+	 * These queues should have been emptied at approximately the same time
+	 * that a normal connection's socket's so_snd would have been purged or
+	 * drained.  Do _not_ clean up here.
+	 */
+	MPASS(mbufq_len(&toep->ulp_pduq) == 0);
+	MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0);
+
 	if (toep->ulp_mode == ULP_MODE_TCPDDP)
 		release_ddp_resources(toep);
 
@@ -264,7 +323,7 @@
 		t4_l2t_release(toep->l2te);
 
 	if (tid >= 0) {
-		remove_tid(sc, tid);
+		remove_tid(sc, tid, toep->ce ? 2 : 1);
 		release_tid(sc, tid, toep->ctrlq);
 	}
 
@@ -322,6 +381,31 @@
 }
 
 /*
+ * setsockopt handler.
+ */
+static void
+t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name)
+{
+	struct adapter *sc = tod->tod_softc;
+	struct toepcb *toep = tp->t_toe;
+
+	if (dir == SOPT_GET)
+		return;
+
+	CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name);
+
+	switch (name) {
+	case TCP_NODELAY:
+		t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS,
+		    V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1),
+		    0, 0, toep->ofld_rxq->iq.abs_id);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
  * The TOE driver will not receive any more CPLs for the tid associated with the
  * toepcb; release the hold on the inpcb.
  */
@@ -340,6 +424,7 @@
 
 	toep->inp = NULL;
 	toep->flags &= ~TPF_CPL_PENDING;
+	mbufq_drain(&toep->ulp_pdu_reclaimq);
 
 	if (!(toep->flags & TPF_ATTACHED))
 		release_offload_resources(toep);
@@ -349,12 +434,12 @@
 }
 
 void
-insert_tid(struct adapter *sc, int tid, void *ctx)
+insert_tid(struct adapter *sc, int tid, void *ctx, int ntids)
 {
 	struct tid_info *t = &sc->tids;
 
 	t->tid_tab[tid] = ctx;
-	atomic_add_int(&t->tids_in_use, 1);
+	atomic_add_int(&t->tids_in_use, ntids);
 }
 
 void *
@@ -374,12 +459,12 @@
 }
 
 void
-remove_tid(struct adapter *sc, int tid)
+remove_tid(struct adapter *sc, int tid, int ntids)
 {
 	struct tid_info *t = &sc->tids;
 
 	t->tid_tab[tid] = NULL;
-	atomic_subtract_int(&t->tids_in_use, 1);
+	atomic_subtract_int(&t->tids_in_use, ntids);
 }
 
 void
@@ -466,14 +551,11 @@
 	return (wscale);
 }
 
-extern int always_keepalive;
-#define VIID_SMACIDX(v)	(((unsigned int)(v) & 0x7f) << 1)
-
 /*
  * socket so could be a listening socket too.
  */
 uint64_t
-calc_opt0(struct socket *so, struct port_info *pi, struct l2t_entry *e,
+calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e,
     int mtu_idx, int rscale, int rx_credits, int ulp_mode)
 {
 	uint64_t opt0;
@@ -487,7 +569,7 @@
 	if (so != NULL) {
 		struct inpcb *inp = sotoinpcb(so);
 		struct tcpcb *tp = intotcpcb(inp);
-		int keepalive = always_keepalive ||
+		int keepalive = tcp_always_keepalive ||
 		    so_options_get(so) & SO_KEEPALIVE;
 
 		opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0);
@@ -497,45 +579,48 @@
 	if (e != NULL)
 		opt0 |= V_L2T_IDX(e->idx);
 
-	if (pi != NULL) {
-		opt0 |= V_SMAC_SEL(VIID_SMACIDX(pi->viid));
-		opt0 |= V_TX_CHAN(pi->tx_chan);
+	if (vi != NULL) {
+		opt0 |= V_SMAC_SEL(vi->smt_idx);
+		opt0 |= V_TX_CHAN(vi->pi->tx_chan);
 	}
 
 	return htobe64(opt0);
 }
 
-#define FILTER_SEL_WIDTH_P_FC (3 + 1)
-#define FILTER_SEL_WIDTH_VIN_P_FC (6 + 7 + FILTER_SEL_WIDTH_P_FC)
-#define FILTER_SEL_WIDTH_TAG_P_FC (3 + FILTER_SEL_WIDTH_VIN_P_FC)
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-
-uint32_t
-select_ntuple(struct port_info *pi, struct l2t_entry *e, uint32_t filter_mode)
+uint64_t
+select_ntuple(struct vi_info *vi, struct l2t_entry *e)
 {
-	uint16_t viid = pi->viid;
-	uint32_t ntuple = 0;
+	struct adapter *sc = vi->pi->adapter;
+	struct tp_params *tp = &sc->params.tp;
+	uint16_t viid = vi->viid;
+	uint64_t ntuple = 0;
 
-	if (filter_mode == HW_TPL_FR_MT_PR_IV_P_FC) {
-                if (e->vlan == VLAN_NONE)
-			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
-                else {
-                        ntuple |= e->vlan << FILTER_SEL_WIDTH_P_FC;
-                        ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-                }
-                ntuple |= e->lport << S_PORT;
-		ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-	} else if (filter_mode == HW_TPL_FR_MT_PR_OV_P_FC) {
-                ntuple |= G_FW_VIID_VIN(viid) << FILTER_SEL_WIDTH_P_FC;
-                ntuple |= G_FW_VIID_PFN(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
-                ntuple |= G_FW_VIID_VIVLD(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
-                ntuple |= e->lport << S_PORT;
-		ntuple |= IPPROTO_TCP << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
-        }
+	/*
+	 * Initialize each of the fields which we care about which are present
+	 * in the Compressed Filter Tuple.
+	 */
+	if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE)
+		ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
 
-	return (htobe32(ntuple));
+	if (tp->port_shift >= 0)
+		ntuple |= (uint64_t)e->lport << tp->port_shift;
+
+	if (tp->protocol_shift >= 0)
+		ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
+
+	if (tp->vnic_shift >= 0) {
+		uint32_t vf = G_FW_VIID_VIN(viid);
+		uint32_t pf = G_FW_VIID_PFN(viid);
+		uint32_t vld = G_FW_VIID_VIVLD(viid);
+
+		ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) |
+		    V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+	}
+
+	if (is_t4(sc))
+		return (htobe32((uint32_t)ntuple));
+	else
+		return (htobe64(V_FILTER_TUPLE(ntuple)));
 }
 
 void
@@ -623,7 +708,7 @@
         c.ip_hi = *(uint64_t *)&lip->s6_addr[0];
         c.ip_lo = *(uint64_t *)&lip->s6_addr[8];
 
-	return (t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
+	return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
 }
 
 static int
@@ -641,7 +726,7 @@
         c.ip_hi = *(uint64_t *)&lip->s6_addr[0];
         c.ip_lo = *(uint64_t *)&lip->s6_addr[8];
 
-	return (t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
+	return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c));
 }
 
 static struct clip_entry *
@@ -660,12 +745,12 @@
 }
 
 struct clip_entry *
-hold_lip(struct tom_data *td, struct in6_addr *lip)
+hold_lip(struct tom_data *td, struct in6_addr *lip, struct clip_entry *ce)
 {
-	struct clip_entry *ce;
 
 	mtx_lock(&td->clip_table_lock);
-	ce = search_lip(td, lip);
+	if (ce == NULL)
+		ce = search_lip(td, lip);
 	if (ce != NULL)
 		ce->refcount++;
 	mtx_unlock(&td->clip_table_lock);
@@ -689,47 +774,161 @@
 static void
 init_clip_table(struct adapter *sc, struct tom_data *td)
 {
-	struct in6_ifaddr *ia;
-	struct in6_addr *lip, tlip;
-	struct clip_entry *ce;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
 	mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF);
 	TAILQ_INIT(&td->clip_table);
+	td->clip_gen = -1;
 
+	update_clip_table(sc, td);
+}
+
+static void
+update_clip(struct adapter *sc, void *arg __unused)
+{
+
+	if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc"))
+		return;
+
+	if (uld_active(sc, ULD_TOM))
+		update_clip_table(sc, sc->tom_softc);
+
+	end_synchronized_op(sc, LOCK_HELD);
+}
+
+static void
+t4_clip_task(void *arg, int count)
+{
+
+	t4_iterate(update_clip, NULL);
+}
+
+static void
+update_clip_table(struct adapter *sc, struct tom_data *td)
+{
+	struct in6_ifaddr *ia;
+	struct in6_addr *lip, tlip;
+	struct clip_head stale;
+	struct clip_entry *ce, *ce_temp;
+	struct vi_info *vi;
+	int rc, gen, i, j;
+	uintptr_t last_vnet;
+
+	ASSERT_SYNCHRONIZED_OP(sc);
+
 	IN6_IFADDR_RLOCK();
-	TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
-		lip = &ia->ia_addr.sin6_addr;
+	mtx_lock(&td->clip_table_lock);
 
-		KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
-		    ("%s: mcast address in in6_ifaddr list", __func__));
+	gen = atomic_load_acq_int(&in6_ifaddr_gen);
+	if (gen == td->clip_gen)
+		goto done;
 
-		if (IN6_IS_ADDR_LOOPBACK(lip))
+	TAILQ_INIT(&stale);
+	TAILQ_CONCAT(&stale, &td->clip_table, link);
+
+	/*
+	 * last_vnet optimizes the common cases where all if_vnet = NULL (no
+	 * VIMAGE) or all if_vnet = vnet0.
+	 */
+	last_vnet = (uintptr_t)(-1);
+	for_each_port(sc, i)
+	for_each_vi(sc->port[i], j, vi) {
+		if (last_vnet == (uintptr_t)vi->ifp->if_vnet)
 			continue;
-		if (IN6_IS_SCOPE_EMBED(lip)) {
-			/* Remove the embedded scope */
-			tlip = *lip;
-			lip = &tlip;
-			in6_clearscope(lip);
-		}
-		/*
-		 * XXX: how to weed out the link local address for the loopback
-		 * interface?  It's fe80::1 usually (always?).
-		 */
 
-		mtx_lock(&td->clip_table_lock);
-		if (search_lip(td, lip) == NULL) {
+		/* XXX: races with if_vmove */
+		CURVNET_SET(vi->ifp->if_vnet);
+		TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) {
+			lip = &ia->ia_addr.sin6_addr;
+
+			KASSERT(!IN6_IS_ADDR_MULTICAST(lip),
+			    ("%s: mcast address in in6_ifaddr list", __func__));
+
+			if (IN6_IS_ADDR_LOOPBACK(lip))
+				continue;
+			if (IN6_IS_SCOPE_EMBED(lip)) {
+				/* Remove the embedded scope */
+				tlip = *lip;
+				lip = &tlip;
+				in6_clearscope(lip);
+			}
+			/*
+			 * XXX: how to weed out the link local address for the
+			 * loopback interface?  It's fe80::1 usually (always?).
+			 */
+
+			/*
+			 * If it's in the main list then we already know it's
+			 * not stale.
+			 */
+			TAILQ_FOREACH(ce, &td->clip_table, link) {
+				if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip))
+					goto next;
+			}
+
+			/*
+			 * If it's in the stale list we should move it to the
+			 * main list.
+			 */
+			TAILQ_FOREACH(ce, &stale, link) {
+				if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) {
+					TAILQ_REMOVE(&stale, ce, link);
+					TAILQ_INSERT_TAIL(&td->clip_table, ce,
+					    link);
+					goto next;
+				}
+			}
+
+			/* A new IP6 address; add it to the CLIP table */
 			ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT);
 			memcpy(&ce->lip, lip, sizeof(ce->lip));
 			ce->refcount = 0;
-			if (add_lip(sc, lip) == 0)
+			rc = add_lip(sc, lip);
+			if (rc == 0)
 				TAILQ_INSERT_TAIL(&td->clip_table, ce, link);
-			else
+			else {
+				char ip[INET6_ADDRSTRLEN];
+
+				inet_ntop(AF_INET6, &ce->lip, &ip[0],
+				    sizeof(ip));
+				log(LOG_ERR, "%s: could not add %s (%d)\n",
+				    __func__, ip, rc);
 				free(ce, M_CXGBE);
+			}
+next:
+			continue;
 		}
-		mtx_unlock(&td->clip_table_lock);
+		CURVNET_RESTORE();
+		last_vnet = (uintptr_t)vi->ifp->if_vnet;
 	}
+
+	/*
+	 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are
+	 * no longer referenced by the driver.
+	 */
+	TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) {
+		if (ce->refcount == 0) {
+			rc = delete_lip(sc, &ce->lip);
+			if (rc == 0) {
+				TAILQ_REMOVE(&stale, ce, link);
+				free(ce, M_CXGBE);
+			} else {
+				char ip[INET6_ADDRSTRLEN];
+
+				inet_ntop(AF_INET6, &ce->lip, &ip[0],
+				    sizeof(ip));
+				log(LOG_ERR, "%s: could not delete %s (%d)\n",
+				    __func__, ip, rc);
+			}
+		}
+	}
+	/* The ones that are still referenced need to stay in the CLIP table */
+	TAILQ_CONCAT(&td->clip_table, &stale, link);
+
+	td->clip_gen = gen;
+done:
+	mtx_unlock(&td->clip_table_lock);
 	IN6_IFADDR_RUNLOCK();
 }
 
@@ -764,14 +963,14 @@
 	KASSERT(td->lctx_count == 0,
 	    ("%s: lctx hash table is not empty.", __func__));
 
-	t4_uninit_l2t_cpl_handlers(sc);
-	t4_uninit_cpl_io_handlers(sc);
-	t4_uninit_ddp(sc, td);
+	t4_free_ppod_region(&td->pr);
 	destroy_clip_table(sc, td);
 
 	if (td->listen_mask != 0)
 		hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask);
 
+	if (mtx_initialized(&td->unsent_wr_lock))
+		mtx_destroy(&td->unsent_wr_lock);
 	if (mtx_initialized(&td->lctx_hash_lock))
 		mtx_destroy(&td->lctx_hash_lock);
 	if (mtx_initialized(&td->toep_list_lock))
@@ -781,6 +980,44 @@
 	free(td, M_CXGBE);
 }
 
+static void
+reclaim_wr_resources(void *arg, int count)
+{
+	struct tom_data *td = arg;
+	STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list);
+	struct cpl_act_open_req *cpl;
+	u_int opcode, atid;
+	struct wrqe *wr;
+	struct adapter *sc;
+
+	mtx_lock(&td->unsent_wr_lock);
+	STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe);
+	mtx_unlock(&td->unsent_wr_lock);
+
+	while ((wr = STAILQ_FIRST(&twr_list)) != NULL) {
+		STAILQ_REMOVE_HEAD(&twr_list, link);
+
+		cpl = wrtod(wr);
+		opcode = GET_OPCODE(cpl);
+
+		switch (opcode) {
+		case CPL_ACT_OPEN_REQ:
+		case CPL_ACT_OPEN_REQ6:
+			atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
+			sc = td_adapter(td);
+
+			CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
+			act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
+			free(wr, M_CXGBE);
+			break;
+		default:
+			log(LOG_ERR, "%s: leaked work request %p, wr_len %d, "
+			    "opcode %x\n", __func__, wr, wr->wr_len, opcode);
+			/* WR not freed here; go look at it with a debugger.  */
+		}
+	}
+}
+
 /*
  * Ground control to Major TOM
  * Commencing countdown, engines on
@@ -790,7 +1027,9 @@
 {
 	struct tom_data *td;
 	struct toedev *tod;
-	int i, rc;
+	struct vi_info *vi;
+	struct sge_ofld_rxq *ofld_rxq;
+	int i, j, rc, v;
 
 	ASSERT_SYNCHRONIZED_OP(sc);
 
@@ -808,23 +1047,26 @@
 	td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE,
 	    &td->listen_mask, HASH_NOWAIT);
 
+	/* List of WRs for which L2 resolution failed */
+	mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF);
+	STAILQ_INIT(&td->unsent_wr_list);
+	TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
+
 	/* TID tables */
 	rc = alloc_tid_tabs(&sc->tids);
 	if (rc != 0)
 		goto done;
 
-	/* DDP page pods and CPL handlers */
-	t4_init_ddp(sc, td);
+	rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp,
+	    t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods");
+	if (rc != 0)
+		goto done;
+	t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK,
+	    V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask);
 
 	/* CLIP table for IPv6 offload */
 	init_clip_table(sc, td);
 
-	/* CPL handlers */
-	t4_init_connect_cpl_handlers(sc);
-	t4_init_l2t_cpl_handlers(sc);
-	t4_init_listen_cpl_handlers(sc);
-	t4_init_cpl_io_handlers(sc);
-
 	/* toedev ops */
 	tod = &td->tod;
 	init_toedev(tod);
@@ -842,12 +1084,19 @@
 	tod->tod_syncache_removed = t4_syncache_removed;
 	tod->tod_syncache_respond = t4_syncache_respond;
 	tod->tod_offload_socket = t4_offload_socket;
+	tod->tod_ctloutput = t4_ctloutput;
 
-	for_each_port(sc, i)
-		TOEDEV(sc->port[i]->ifp) = &td->tod;
+	for_each_port(sc, i) {
+		for_each_vi(sc->port[i], v, vi) {
+			TOEDEV(vi->ifp) = &td->tod;
+			for_each_ofld_rxq(vi, j, ofld_rxq) {
+				ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl;
+				ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2;
+			}
+		}
+	}
 
 	sc->tom_softc = td;
-	sc->flags |= TOM_INIT_DONE;
 	register_toedev(sc->tom_softc);
 
 done:
@@ -870,6 +1119,9 @@
 	if (sc->offload_map != 0)
 		return (EBUSY);	/* at least one port has IFCAP_TOE enabled */
 
+	if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
+		return (EBUSY);	/* both iWARP and iSCSI rely on the TOE. */
+
 	mtx_lock(&td->toep_list_lock);
 	if (!TAILQ_EMPTY(&td->toep_list))
 		rc = EBUSY;
@@ -880,16 +1132,29 @@
 		rc = EBUSY;
 	mtx_unlock(&td->lctx_hash_lock);
 
+	taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
+	mtx_lock(&td->unsent_wr_lock);
+	if (!STAILQ_EMPTY(&td->unsent_wr_list))
+		rc = EBUSY;
+	mtx_unlock(&td->unsent_wr_lock);
+
 	if (rc == 0) {
 		unregister_toedev(sc->tom_softc);
 		free_tom_data(sc, td);
 		sc->tom_softc = NULL;
-		sc->flags &= ~TOM_INIT_DONE;
 	}
 
 	return (rc);
 }
 
+static void
+t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp)
+{
+
+	atomic_add_rel_int(&in6_ifaddr_gen, 1);
+	taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4);
+}
+
 static int
 t4_tom_mod_load(void)
 {
@@ -896,6 +1161,15 @@
 	int rc;
 	struct protosw *tcp_protosw, *tcp6_protosw;
 
+	/* CPL handlers */
+	t4_init_connect_cpl_handlers();
+	t4_init_listen_cpl_handlers();
+	t4_init_cpl_io_handlers();
+
+	rc = t4_ddp_mod_load();
+	if (rc != 0)
+		return (rc);
+
 	tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
 	if (tcp_protosw == NULL)
 		return (ENOPROTOOPT);
@@ -912,6 +1186,10 @@
 	ddp6_usrreqs.pru_soreceive = t4_soreceive_ddp;
 	ddp6_protosw.pr_usrreqs = &ddp6_usrreqs;
 
+	TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL);
+	ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event,
+	    t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
+
 	rc = t4_register_uld(&tom_uld_info);
 	if (rc != 0)
 		t4_tom_mod_unload();
@@ -922,14 +1200,14 @@
 static void
 tom_uninit(struct adapter *sc, void *arg __unused)
 {
-	if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomun"))
+	if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
 		return;
 
 	/* Try to free resources (works only if no port has IFCAP_TOE) */
-	if (sc->flags & TOM_INIT_DONE)
+	if (uld_active(sc, ULD_TOM))
 		t4_deactivate_uld(sc, ULD_TOM);
 
-	end_synchronized_op(sc, LOCK_HELD);
+	end_synchronized_op(sc, 0);
 }
 
 static int
@@ -940,6 +1218,17 @@
 	if (t4_unregister_uld(&tom_uld_info) == EBUSY)
 		return (EBUSY);
 
+	if (ifaddr_evhandler) {
+		EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler);
+		taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL);
+	}
+
+	t4_ddp_mod_unload();
+
+	t4_uninit_connect_cpl_handlers();
+	t4_uninit_listen_cpl_handlers();
+	t4_uninit_cpl_io_handlers();
+
 	return (0);
 }
 #endif	/* TCP_OFFLOAD */

Modified: trunk/sys/dev/cxgbe/tom/t4_tom.h
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_tom.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_tom.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,5 +1,6 @@
+/* $MidnightBSD$ */
 /*-
- * Copyright (c) 2012 Chelsio Communications, Inc.
+ * Copyright (c) 2012, 2015 Chelsio Communications, Inc.
  * All rights reserved.
  * Written by: Navdeep Parhar <np at FreeBSD.org>
  *
@@ -24,14 +25,23 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_tom.h 247434 2013-02-28 00:44:54Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_tom.h 318804 2017-05-24 20:01:12Z np $
  *
  */
 
 #ifndef __T4_TOM_H__
 #define __T4_TOM_H__
+#include <sys/vmem.h>
 
-#define KTR_CXGBE	KTR_SPARE3
+/*
+ * Inline version of mbufq for use on 10.x.  Borrowed from
+ * sys/cam/ctl/ctl_ha.c.
+ */
+struct mbufq {
+	struct mbuf *head;
+	struct mbuf *tail;
+};
+
 #define LISTEN_HASH_SIZE 32
 
 /*
@@ -49,10 +59,11 @@
 #define	DDP_RSVD_WIN (16 * 1024U)
 #define	SB_DDP_INDICATE	SB_IN_TOE	/* soreceive must respond to indicate */
 
-#define	M_DDP	M_PROTO1
-
 #define USE_DDP_RX_FLOW_CONTROL
 
+#define PPOD_SZ(n)	((n) * sizeof(struct pagepod))
+#define PPOD_SIZE	(PPOD_SZ(1))
+
 /* TOE PCB flags */
 enum {
 	TPF_ATTACHED	   = (1 << 0),	/* a tcpcb refers to this toepcb */
@@ -84,27 +95,37 @@
 };
 
 struct ppod_region {
-	TAILQ_ENTRY(ppod_region) link;
-	int used;	/* # of pods used by this region */
-	int free;	/* # of contiguous pods free right after this region */
+	u_int pr_start;
+	u_int pr_len;
+	u_int pr_page_shift[4];
+	uint32_t pr_tag_mask;		/* hardware tagmask for this region. */
+	uint32_t pr_invalid_bit;	/* OR with this to invalidate tag. */
+	uint32_t pr_alias_mask;		/* AND with tag to get alias bits. */
+	u_int pr_alias_shift;		/* shift this much for first alias bit. */
+	vmem_t *pr_arena;
 };
 
+struct ppod_reservation {
+	struct ppod_region *prsv_pr;
+	uint32_t prsv_tag;		/* Full tag: pgsz, alias, tag, color */
+	u_int prsv_nppods;
+};
+
 struct ddp_buffer {
-	uint32_t tag;	/* includes color, page pod addr, and DDP page size */
-	int nppods;
 	int offset;
 	int len;
-	struct ppod_region ppod_region;
 	int npages;
 	vm_page_t *pages;
+	struct ppod_reservation prsv;
 };
 
 struct toepcb {
 	TAILQ_ENTRY(toepcb) link; /* toep_list */
-	unsigned int flags;	/* miscellaneous flags */
+	u_int flags;		/* miscellaneous flags */
 	struct tom_data *td;
 	struct inpcb *inp;	/* backpointer to host stack's PCB */
-	struct port_info *port;	/* physical port */
+	struct vnet *vnet;
+	struct vi_info *vi;	/* virtual interface */
 	struct sge_wrq *ofld_txq;
 	struct sge_ofld_rxq *ofld_rxq;
 	struct sge_wrq *ctrlq;
@@ -111,13 +132,24 @@
 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
 	struct clip_entry *ce;	/* CLIP table entry used by this tid */
 	int tid;		/* Connection identifier */
-	unsigned int tx_credits;/* tx WR credits (in 16 byte units) remaining */
-	unsigned int sb_cc;	/* last noted value of so_rcv->sb_cc */
+
+	/* tx credit handling */
+	u_int tx_total;		/* total tx WR credits (in 16B units) */
+	u_int tx_credits;	/* tx WR credits (in 16B units) available */
+	u_int tx_nocompl;	/* tx WR credits since last compl request */
+	u_int plen_nocompl;	/* payload since last compl request */
+
+	/* rx credit handling */
+	u_int sb_cc;		/* last noted value of so_rcv->sb_cc */
 	int rx_credits;		/* rx credits (in bytes) to be returned to hw */
 
-	unsigned int ulp_mode;	/* ULP mode */
+	u_int ulp_mode;	/* ULP mode */
+	void *ulpcb;
+	void *ulpcb2;
+	struct mbufq ulp_pduq;	/* PDUs waiting to be sent out. */
+	struct mbufq ulp_pdu_reclaimq;
 
-	unsigned int ddp_flags;
+	u_int ddp_flags;
 	struct ddp_buffer *db[2];
 	time_t ddp_disabled;
 	uint8_t ddp_score;
@@ -169,13 +201,13 @@
 	struct stid_region stid_region;
 	int flags;
 	struct inpcb *inp;		/* listening socket's inp */
+	struct vnet *vnet;
 	struct sge_wrq *ctrlq;
 	struct sge_ofld_rxq *ofld_rxq;
+	struct clip_entry *ce;
 	TAILQ_HEAD(, synq_entry) synq;
 };
 
-TAILQ_HEAD(ppod_head, ppod_region);
-
 struct clip_entry {
 	TAILQ_ENTRY(clip_entry) link;
 	struct in6_addr lip;	/* local IPv6 address */
@@ -182,6 +214,7 @@
 	u_int refcount;
 };
 
+TAILQ_HEAD(clip_head, clip_entry);
 struct tom_data {
 	struct toedev tod;
 
@@ -194,14 +227,16 @@
 	u_long listen_mask;
 	int lctx_count;		/* # of lctx in the hash table */
 
-	struct mtx ppod_lock;
-	int nppods;
-	int nppods_free;	/* # of available ppods */
-	int nppods_free_head;	/* # of available ppods at the begining */
-	struct ppod_head ppods;
+	struct ppod_region pr;
 
 	struct mtx clip_table_lock;
-	TAILQ_HEAD(, clip_entry) clip_table;
+	struct clip_head clip_table;
+	int clip_gen;
+
+	/* WRs that will not be sent to the chip because L2 resolution failed */
+	struct mtx unsent_wr_lock;
+	STAILQ_HEAD(, wrqe) unsent_wr_list;
+	struct task reclaim_wr_resources;
 };
 
 static inline struct tom_data *
@@ -218,35 +253,61 @@
 	return (td->tod.tod_softc);
 }
 
+/*
+ * XXX: Don't define these for the iWARP driver on 10 due to differences
+ * in LinuxKPI.
+ */
+#ifndef	_LINUX_TYPES_H_
+static inline void
+set_mbuf_ulp_submode(struct mbuf *m, uint8_t ulp_submode)
+{
+
+	M_ASSERTPKTHDR(m);
+	m->m_pkthdr.PH_per.eigth[0] = ulp_submode;
+}
+
+static inline uint8_t
+mbuf_ulp_submode(struct mbuf *m)
+{
+
+	M_ASSERTPKTHDR(m);
+	return (m->m_pkthdr.PH_per.eigth[0]);
+}
+#endif
+
 /* t4_tom.c */
-struct toepcb *alloc_toepcb(struct port_info *, int, int, int);
+struct toepcb *alloc_toepcb(struct vi_info *, int, int, int);
 void free_toepcb(struct toepcb *);
 void offload_socket(struct socket *, struct toepcb *);
 void undo_offload_socket(struct socket *);
 void final_cpl_received(struct toepcb *);
-void insert_tid(struct adapter *, int, void *);
+void insert_tid(struct adapter *, int, void *, int);
 void *lookup_tid(struct adapter *, int);
 void update_tid(struct adapter *, int, void *);
-void remove_tid(struct adapter *, int);
+void remove_tid(struct adapter *, int, int);
 void release_tid(struct adapter *, int, struct sge_wrq *);
 int find_best_mtu_idx(struct adapter *, struct in_conninfo *, int);
 u_long select_rcv_wnd(struct socket *);
 int select_rcv_wscale(void);
-uint64_t calc_opt0(struct socket *, struct port_info *, struct l2t_entry *,
+uint64_t calc_opt0(struct socket *, struct vi_info *, struct l2t_entry *,
     int, int, int, int);
-uint32_t select_ntuple(struct port_info *, struct l2t_entry *, uint32_t);
+uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
 void set_tcpddp_ulp_mode(struct toepcb *);
 int negative_advice(int);
-struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *);
+struct clip_entry *hold_lip(struct tom_data *, struct in6_addr *,
+    struct clip_entry *);
 void release_lip(struct tom_data *, struct clip_entry *);
 
 /* t4_connect.c */
-void t4_init_connect_cpl_handlers(struct adapter *);
+void t4_init_connect_cpl_handlers(void);
+void t4_uninit_connect_cpl_handlers(void);
 int t4_connect(struct toedev *, struct socket *, struct rtentry *,
     struct sockaddr *);
+void act_open_failure_cleanup(struct adapter *, u_int, u_int);
 
 /* t4_listen.c */
-void t4_init_listen_cpl_handlers(struct adapter *);
+void t4_init_listen_cpl_handlers(void);
+void t4_uninit_listen_cpl_handlers(void);
 int t4_listen_start(struct toedev *, struct tcpcb *);
 int t4_listen_stop(struct toedev *, struct tcpcb *);
 void t4_syncache_added(struct toedev *, void *);
@@ -259,8 +320,8 @@
 void t4_offload_socket(struct toedev *, void *, struct socket *);
 
 /* t4_cpl_io.c */
-void t4_init_cpl_io_handlers(struct adapter *);
-void t4_uninit_cpl_io_handlers(struct adapter *);
+void t4_init_cpl_io_handlers(void);
+void t4_uninit_cpl_io_handlers(void);
 void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int);
 void send_flowc_wr(struct toepcb *, struct flowc_tx_params *);
 void send_reset(struct adapter *, struct toepcb *, uint32_t);
@@ -269,15 +330,32 @@
 int t4_tod_output(struct toedev *, struct tcpcb *);
 int t4_send_fin(struct toedev *, struct tcpcb *);
 int t4_send_rst(struct toedev *, struct tcpcb *);
-void t4_set_tcb_field(struct adapter *, struct toepcb *, uint16_t, uint64_t,
-    uint64_t);
+void t4_set_tcb_field(struct adapter *, struct sge_wrq *, int, uint16_t,
+    uint64_t, uint64_t, int, int, int);
+void t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop);
+void t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop);
+int do_set_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
 
 /* t4_ddp.c */
-void t4_init_ddp(struct adapter *, struct tom_data *);
-void t4_uninit_ddp(struct adapter *, struct tom_data *);
+int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
+    const char *);
+void t4_free_ppod_region(struct ppod_region *);
+int t4_alloc_page_pods_for_db(struct ppod_region *, struct ddp_buffer *);
+int t4_alloc_page_pods_for_buf(struct ppod_region *, vm_offset_t, int,
+    struct ppod_reservation *);
+int t4_write_page_pods_for_db(struct adapter *, struct sge_wrq *, int,
+    struct ddp_buffer *);
+int t4_write_page_pods_for_buf(struct adapter *, struct sge_wrq *, int tid,
+    struct ppod_reservation *, vm_offset_t, int);
+void t4_free_page_pods(struct ppod_reservation *);
 int t4_soreceive_ddp(struct socket *, struct sockaddr **, struct uio *,
     struct mbuf **, struct mbuf **, int *);
+int t4_ddp_mod_load(void);
+void t4_ddp_mod_unload(void);
 void enable_ddp(struct adapter *, struct toepcb *toep);
 void release_ddp_resources(struct toepcb *toep);
+void handle_ddp_close(struct toepcb *, struct tcpcb *, struct sockbuf *,
+    uint32_t);
 void insert_ddp_data(struct toepcb *, uint32_t);
+
 #endif

Modified: trunk/sys/dev/cxgbe/tom/t4_tom_l2t.c
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_tom_l2t.c	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_tom_l2t.c	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -24,7 +25,7 @@
  * SUCH DAMAGE.
  */
 #include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_tom_l2t.c 247434 2013-02-28 00:44:54Z np $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_tom_l2t.c 309442 2016-12-02 21:29:52Z jhb $");
 
 #include "opt_inet.h"
 #include "opt_inet6.h"
@@ -41,6 +42,7 @@
 #include <sys/rwlock.h>
 #include <sys/socket.h>
 #include <sys/sbuf.h>
+#include <sys/taskqueue.h>
 #include <net/if.h>
 #include <net/if_types.h>
 #include <net/ethernet.h>
@@ -161,25 +163,17 @@
 }
 
 static void
-resolution_failed_for_wr(struct wrqe *wr)
+resolution_failed(struct adapter *sc, struct l2t_entry *e)
 {
-	log(LOG_ERR, "%s: leaked work request %p, wr_len %d\n", __func__, wr,
-	    wr->wr_len);
+	struct tom_data *td = sc->tom_softc;
 
-	/* free(wr, M_CXGBE); */
-}
+	mtx_assert(&e->lock, MA_OWNED);
 
-static void
-resolution_failed(struct l2t_entry *e)
-{
-	struct wrqe *wr;
+	mtx_lock(&td->unsent_wr_lock);
+	STAILQ_CONCAT(&td->unsent_wr_list, &e->wr_list);
+	mtx_unlock(&td->unsent_wr_lock);
 
-	mtx_assert(&e->lock, MA_OWNED);
-
-	while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
-		STAILQ_REMOVE_HEAD(&e->wr_list, link);
-		resolution_failed_for_wr(wr);
-	}
+	taskqueue_enqueue(taskqueue_thread, &td->reclaim_wr_resources);
 }
 
 static void
@@ -203,7 +197,7 @@
 		 * need to wlock the table).
 		 */
 		e->state = L2T_STATE_FAILED;
-		resolution_failed(e);
+		resolution_failed(sc, e);
 		return;
 
 	} else if (lladdr == NULL) {
@@ -226,7 +220,7 @@
 
 			memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
 			e->vlan = vtag;
-			t4_write_l2e(sc, e, 1);
+			t4_write_l2e(e, 1);
 		}
 		e->state = L2T_STATE_VALID;
 	}
@@ -305,12 +299,11 @@
 		if (e->state == L2T_STATE_VALID && !STAILQ_EMPTY(&e->wr_list))
 			send_pending(sc, e);
 		if (e->state == L2T_STATE_FAILED)
-			resolution_failed(e);
+			resolution_failed(sc, e);
 		mtx_unlock(&e->lock);
 		break;
 
 	case L2T_STATE_FAILED:
-		resolution_failed_for_wr(wr);
 		return (EHOSTUNREACH);
 	}
 
@@ -317,19 +310,7 @@
 	return (0);
 }
 
-/*
- * Called when an L2T entry has no more users.  The entry is left in the hash
- * table since it is likely to be reused but we also bump nfree to indicate
- * that the entry can be reallocated for a different neighbor.  We also drop
- * the existing neighbor reference in case the neighbor is going away and is
- * waiting on our reference.
- *
- * Because entries can be reallocated to other neighbors once their ref count
- * drops to 0 we need to take the entry's lock to avoid races with a new
- * incarnation.
- */
-
-static int
+int
 do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss,
     struct mbuf *m)
 {
@@ -337,11 +318,13 @@
 	const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
 	unsigned int tid = GET_TID(rpl);
 	unsigned int idx = tid % L2T_SIZE;
-	int rc;
 
-	rc = do_l2t_write_rpl(iq, rss, m);
-	if (rc != 0)
-		return (rc);
+	if (__predict_false(rpl->status != CPL_ERR_NONE)) {
+		log(LOG_ERR,
+		    "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n",
+		    rpl->status, idx);
+		return (EINVAL);
+	}
 
 	if (tid & F_SYNC_WR) {
 		struct l2t_entry *e = &sc->l2t->l2tab[idx - sc->vres.l2t.start];
@@ -357,20 +340,6 @@
 	return (0);
 }
 
-void
-t4_init_l2t_cpl_handlers(struct adapter *sc)
-{
-
-	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl2);
-}
-
-void
-t4_uninit_l2t_cpl_handlers(struct adapter *sc)
-{
-
-	t4_register_cpl_handler(sc, CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
-}
-
 /*
  * The TOE wants an L2 table entry that it can use to reach the next hop over
  * the specified port.  Produce such an entry - create one if needed.
@@ -382,7 +351,8 @@
 t4_l2t_get(struct port_info *pi, struct ifnet *ifp, struct sockaddr *sa)
 {
 	struct l2t_entry *e;
-	struct l2t_data *d = pi->adapter->l2t;
+	struct adapter *sc = pi->adapter;
+	struct l2t_data *d = sc->l2t;
 	u_int hash, smt_idx = pi->port_id;
 
 	KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
@@ -417,6 +387,8 @@
 		e->smt_idx = smt_idx;
 		e->hash = hash;
 		e->lport = pi->lport;
+		e->wrq = &sc->sge.ctrlq[pi->port_id];
+		e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
 		atomic_store_rel_int(&e->refcnt, 1);
 #ifdef VLAN_TAG
 		if (ifp->if_type == IFT_L2VLAN)

Modified: trunk/sys/dev/cxgbe/tom/t4_tom_l2t.h
===================================================================
--- trunk/sys/dev/cxgbe/tom/t4_tom_l2t.h	2018-05-28 00:14:50 UTC (rev 10119)
+++ trunk/sys/dev/cxgbe/tom/t4_tom_l2t.h	2018-05-28 00:17:55 UTC (rev 10120)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /*-
  * Copyright (c) 2012 Chelsio Communications, Inc.
  * All rights reserved.
@@ -23,7 +24,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  *
- * $FreeBSD: stable/9/sys/dev/cxgbe/tom/t4_tom_l2t.h 237263 2012-06-19 07:34:13Z np $
+ * $FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_tom_l2t.h 309442 2016-12-02 21:29:52Z jhb $
  *
  */
 
@@ -37,8 +38,8 @@
     struct sockaddr *);
 void t4_l2_update(struct toedev *, struct ifnet *, struct sockaddr *,
     uint8_t *, uint16_t);
-void t4_init_l2t_cpl_handlers(struct adapter *);
-void t4_uninit_l2t_cpl_handlers(struct adapter *);
+int do_l2t_write_rpl2(struct sge_iq *, const struct rss_header *,
+    struct mbuf *);
 
 static inline int
 t4_l2t_send(struct adapter *sc, struct wrqe *wr, struct l2t_entry *e)



More information about the Midnightbsd-cvs mailing list