[Midnightbsd-cvs] src [10060] trunk/sys/dev/ixl: add ixl

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sun May 27 19:00:06 EDT 2018


Revision: 10060
          http://svnweb.midnightbsd.org/src/?rev=10060
Author:   laffer1
Date:     2018-05-27 19:00:05 -0400 (Sun, 27 May 2018)
Log Message:
-----------
add ixl

Added Paths:
-----------
    trunk/sys/dev/ixl/
    trunk/sys/dev/ixl/README
    trunk/sys/dev/ixl/i40e_adminq.c
    trunk/sys/dev/ixl/i40e_adminq.h
    trunk/sys/dev/ixl/i40e_adminq_cmd.h
    trunk/sys/dev/ixl/i40e_alloc.h
    trunk/sys/dev/ixl/i40e_common.c
    trunk/sys/dev/ixl/i40e_devids.h
    trunk/sys/dev/ixl/i40e_hmc.c
    trunk/sys/dev/ixl/i40e_hmc.h
    trunk/sys/dev/ixl/i40e_lan_hmc.c
    trunk/sys/dev/ixl/i40e_lan_hmc.h
    trunk/sys/dev/ixl/i40e_nvm.c
    trunk/sys/dev/ixl/i40e_osdep.c
    trunk/sys/dev/ixl/i40e_osdep.h
    trunk/sys/dev/ixl/i40e_prototype.h
    trunk/sys/dev/ixl/i40e_register.h
    trunk/sys/dev/ixl/i40e_status.h
    trunk/sys/dev/ixl/i40e_type.h
    trunk/sys/dev/ixl/i40e_virtchnl.h
    trunk/sys/dev/ixl/if_ixl.c
    trunk/sys/dev/ixl/if_ixlv.c
    trunk/sys/dev/ixl/ixl.h
    trunk/sys/dev/ixl/ixl_pf.h
    trunk/sys/dev/ixl/ixl_txrx.c
    trunk/sys/dev/ixl/ixlv.h
    trunk/sys/dev/ixl/ixlv_vc_mgr.h
    trunk/sys/dev/ixl/ixlvc.c

Added: trunk/sys/dev/ixl/README
===================================================================
--- trunk/sys/dev/ixl/README	                        (rev 0)
+++ trunk/sys/dev/ixl/README	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,411 @@
+	ixl FreeBSD* Base Driver and ixlv VF Driver for the
+	     Intel XL710 Ethernet Controller Family
+
+$MidnightBSD$
+/*$FreeBSD: stable/10/sys/dev/ixl/README 270919 2014-09-01 07:54:30Z jfv $*/
+================================================================
+
+August 26, 2014
+
+
+Contents
+========
+
+- Overview
+- Supported Adapters
+- The VF Driver
+- Building and Installation
+- Additional Configurations
+- Known Limitations
+
+
+Overview
+========
+
+This file describes the IXL FreeBSD* Base driver and the IXLV VF Driver
+for the XL710 Ethernet Family of Adapters. The Driver has been developed
+for use with FreeBSD 10.0 or later, but should be compatible with any
+supported release.
+
+For questions related to hardware requirements, refer to the documentation
+supplied with your Intel XL710 adapter. All hardware requirements listed
+apply for use with FreeBSD.
+
+
+Supported Adapters
+==================
+
+The drivers in this release are compatible with XL710 and X710-based
+Intel Ethernet Network Connections.
+
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+SR Modules
+----------
+  Intel     DUAL RATE 1G/10G SFP+ SR (bailed)    FTLX8571D3BCV-IT
+  Intel     DUAL RATE 1G/10G SFP+ SR (bailed)    AFBR-703SDZ-IN2
+
+LR Modules
+----------
+  Intel     DUAL RATE 1G/10G SFP+ LR (bailed)    FTLX1471D3BCV-IT
+  Intel     DUAL RATE 1G/10G SFP+ LR (bailed)    AFCT-701SDZ-IN2
+
+QSFP+ Modules
+-------------
+  Intel     TRIPLE RATE 1G/10G/40G QSFP+ SR (bailed)    E40GQSFPSR
+  Intel     TRIPLE RATE 1G/10G/40G QSFP+ LR (bailed)    E40GQSFPLR
+    QSFP+ 1G speed is not supported on XL710 based devices.
+
+X710/XL710 Based SFP+ adapters support all passive and active limiting direct
+attach cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+              
+The VF Driver
+==================
+The VF driver is normally used in a virtualized environment where a host
+driver manages SRIOV, and provides a VF device to the guest. With this
+first release the only host environment tested was using Linux QEMU/KVM.
+Support is planned for Xen and VMWare hosts at a later time.
+
+In the FreeBSD guest the IXLV driver would be loaded and will function
+using the VF device assigned to it.
+
+The VF driver provides most of the same functionality as the CORE driver,
+but is actually a slave to the Host, access to many controls are actually
+accomplished by a request to the Host via what is called the "Admin queue".
+These are startup and initialization events however, once in operation
+the device is self-contained and should achieve near native performance.
+
+Some notable limitations of the VF environment: for security reasons 
+the driver is never permitted to be promiscuous, therefore a tcpdump
+will not behave the same with the interface. Second, media info is not
+available from the PF, so it will always appear as auto.
+
+Tarball Building and Installation
+=========================
+
+NOTE: You must have kernel sources installed to compile the driver tarball.
+
+These instructions assume a standalone driver tarball, building the driver
+already in the kernel source is simply a matter of adding the device entry
+to the kernel config file, or building in the ixl or ixlv module directory.
+
+In the instructions below, x.x.x is the driver version
+as indicated in the name of the driver tarball. The example is
+for ixl, the same procedure applies for ixlv.
+
+1. Move the base driver tar file to the directory of your choice.
+   For example, use /home/username/ixl or /usr/local/src/ixl.
+
+2. Untar/unzip the archive:
+     tar xfz ixl-x.x.x.tar.gz
+
+3. To install man page:
+     cd ixl-x.x.x
+     gzip -c ixl.4 > /usr/share/man/man4/ixl.4.gz
+
+4. To load the driver onto a running system:
+     cd ixl-x.x.x/src
+     make load
+
+5. To assign an IP address to the interface, enter the following:
+     ifconfig ixl<interface_num> <IP_address>
+
+6. Verify that the interface works. Enter the following, where <IP_address>
+   is the IP address for another machine on the same subnet as the interface
+   that is  being tested:
+
+     ping <IP_address>
+
+7. If you want the driver to load automatically when the system is booted:
+
+     cd ixl-x.x.x/src
+     make
+     make install
+        
+    Edit /boot/loader.conf, and add the following line:
+     if_ixl_load="YES"
+
+    Edit /etc/rc.conf, and create the appropriate
+    ifconfig_ixl<interface_num> entry:
+
+     ifconfig_ixl<interface_num>="<ifconfig_settings>"
+
+     Example usage:
+
+     ifconfig_ixl0="inet 192.168.10.1 netmask 255.255.255.0"
+
+     NOTE: For assistance, see the ifconfig man page.
+
+
+
+Configuration and Tuning
+=========================
+
+Both drivers supports Transmit/Receive Checksum Offload for IPv4 and IPv6,
+TSO forIPv4 and IPv6, LRO, and Jumbo Frames on all 40 Gigabit adapters. 
+
+  Jumbo Frames
+  ------------
+  To enable Jumbo Frames, use the ifconfig utility to increase
+  the MTU beyond 1500 bytes.
+
+       - The Jumbo Frames setting on the switch must be set to at least
+         22 byteslarger than that of the adapter.
+
+       - The maximum MTU setting for Jumbo Frames is 9706. This value
+         coincides with the maximum jumbo frames size of 9728.
+         To modify the setting, enter the following:
+
+        ifconfig ixl<interface_num> <hostname or IP address> mtu 9000
+
+       - To confirm an interface's MTU value, use the ifconfig command.
+         To confirm the MTU used between two specific devices, use:
+
+        route get <destination_IP_address>
+
+  VLANs
+  -----
+  To create a new VLAN pseudo-interface:
+
+        ifconfig <vlan_name> create
+
+  To associate the VLAN pseudo-interface with a physical interface
+  and assign a VLAN ID, IP address, and netmask:
+
+        ifconfig <vlan_name> <ip_address> netmask <subnet_mask> vlan
+           <vlan_id> vlandev <physical_interface>
+
+  Example:
+
+        ifconfig vlan10 10.0.0.1 netmask 255.255.255.0 vlan 10 vlandev ixl0
+
+  In this example, all packets will be marked on egress with
+  802.1Q VLAN tags, specifying a VLAN ID of 10.
+
+  To remove a VLAN pseudo-interface:
+
+        ifconfig <vlan_name> destroy
+
+
+  Checksum Offload
+  ----------------
+    
+  Checksum offloading supports IPv4 and IPv6 with TCP and UDP packets
+  and is supported for both transmit and receive. Checksum offloading
+  for transmit and recieve is enabled by default for both IPv4 and IPv6.
+
+  Checksum offloading can be enabled or disabled using ifconfig.
+  Transmit and receive offloading for IPv4 and Ipv6 are enabled
+  and disabled seperately.
+
+  NOTE: TSO requires Tx checksum, so when Tx checksum
+        is disabled, TSO will also  be disabled. 
+
+  To enable Tx checksum offloading for ipv4:
+
+         ifconfig ixl<interface_num> txcsum4 
+
+  To disable Tx checksum offloading for ipv4:
+         
+         ifconfig ixl<interface_num> -txcsum4 
+         (NOTE: This will disable TSO4)
+
+  To enable Rx checksum offloading for ipv6:
+ 
+         ifconfig ixl<interface_num> rxcsum6 
+         
+  To disable Rx checksum offloading for ipv6:
+
+         ifconfig ixl<interface_num> -rxcsum6 
+         (NOTE: This will disable TSO6)
+
+  
+  To confirm the current settings:
+
+         ifconfig ixl<interface_num>
+
+  
+  TSO
+  ---
+
+  TSO supports both IPv4 and IPv6 and is enabled by default. TSO can
+  be disabled and enabled using the ifconfig utility.
+
+  NOTE: TSO requires Tx checksum, so when Tx checksum is
+      disabled, TSO will also be disabled. 
+
+  To disable TSO IPv4:
+
+         ifconfig ixl<interface_num> -tso4
+         
+  To enable TSO IPv4:
+
+         ifconfig ixl<interface_num> tso4 
+
+  To disable TSO IPv6:
+
+         ifconfig ixl<interface_num> -tso6
+
+  To enable TSO IPv6:
+        
+         ifconfig ixl<interface_num> tso6
+
+  To disable BOTH TSO IPv4 and IPv6:
+
+         ifconfig ixl<interface_num> -tso
+
+  To enable BOTH TSO IPv4 and IPv6:
+  
+         ifconfig ixl<interface_num> tso
+
+
+  LRO
+  ---
+
+  Large Receive Offload is enabled by default. It can be enabled
+  or disabled by using the ifconfig utility.
+
+  NOTE: LRO should be disabled when forwarding packets.
+
+  To disable LRO:	
+
+         ifconfig ixl<interface_num> -lro 
+
+  To enable LRO:
+
+         ifconfig ixl<interface_num> lro 
+
+
+Flow Control  (IXL only)
+------------
+Flow control is disabled by default. To change flow control settings use sysctl.
+
+To enable flow control to Rx pause frames:     
+
+         sysctl dev.ixl.<interface_num>.fc=1
+
+To enable flow control to Tx pause frames: 
+
+         sysctl dev.ixl.<interface_num>.fc=2
+
+To enable flow control to Rx and Tx pause frames:
+
+         sysctl dev.ixl.<interface_num>.fc=3
+
+To disable flow control:
+
+         sysctl dev.ixl.<interface_num>.fc=0
+    
+
+NOTE: You must have a flow control capable link partner.
+
+NOTE: The VF driver does not have access to flow control, it must be
+	managed from the host side.
+
+   
+  Important system configuration changes:
+  =======================================
+ 
+-Change the file /etc/sysctl.conf, and add the line:  
+ 
+         hw.intr_storm_threshold: 0 (the default is 1000)
+
+-Best throughput results are seen with a large MTU; use 9706 if possible. 
+
+-The default number of descriptors per ring is 1024, increasing this may
+improve performance depending on the use case.
+
+-The VF driver uses a relatively large buf ring, this was found to eliminate
+ UDP transmit errors, it is a tuneable, and if no UDP traffic is used it can
+ be reduced. It is memory used per queue.
+
+
+Known Limitations
+=================
+
+Network Memory Buffer allocation
+--------------------------------
+  FreeBSD may have a low number of network memory buffers (mbufs) by default.
+If your mbuf value is too low, it may cause the driver to fail to initialize
+and/or cause the system to become unresponsive. You can check to see if the
+system is mbuf-starved by running 'netstat -m'. Increase the number of mbufs
+by editing the lines below in /etc/sysctl.conf:
+
+         kern.ipc.nmbclusters
+         kern.ipc.nmbjumbop    
+         kern.ipc.nmbjumbo9
+         kern.ipc.nmbjumbo16
+         kern.ipc.nmbufs
+
+The amount of memory that you allocate is system specific, and may
+require some trial and error.
+
+Also, increasing the follwing in /etc/sysctl.conf could help increase
+network performance:
+         
+         kern.ipc.maxsockbuf
+         net.inet.tcp.sendspace
+         net.inet.tcp.recvspace
+         net.inet.udp.maxdgram
+         net.inet.udp.recvspace
+                  
+
+UDP Stress Test Dropped Packet Issue
+------------------------------------
+Under small packet UDP stress test with the ixl driver, the FreeBSD system
+may drop UDP packets due to the fullness of socket buffers. You may want to
+change the driver's Flow Control variables to the minimum value for
+controlling packet reception.
+
+
+Disable LRO when routing/bridging
+---------------------------------
+LRO must be turned off when forwarding traffic.
+
+
+Lower than expected performance
+-------------------------------
+Some PCIe x8 slots are actually configured as x4 slots. These slots have
+insufficient bandwidth for full line rate with dual port and quad port
+devices.
+
+In addition, if you put a PCIe Generation 3-capable adapter into a PCIe
+Generation 2 slot, you cannot get full bandwidth. The driver detects this
+situation and writes the following message in the system log:
+
+  "PCI-Express bandwidth available for this card is not sufficient for
+   optimal  performance. For optimal performance a x8 PCI-Express slot
+   is required."
+
+If this error occurs, moving your adapter to a true PCIe Generation 3 x8
+slot will resolve the issue.
+
+
+Support
+=======
+
+For general information and support, go to the Intel support website at:
+
+        http://support.intel.com
+
+If an issue is identified with the released source code on the supported kernel
+with a supported adapter, email the specific information related to the issue
+to freebsdnic at mailbox.intel.com.
+
+
+License
+=======
+
+This software program is released under the terms of a license agreement
+between you ('Licensee') and Intel. Do not use or load this software or any
+associated  materials (collectively, the 'Software') until you have carefully
+read the full terms and conditions of the LICENSE located in this software
+package. By loadingor using the Software, you agree to the terms of this
+Agreement. If you do not agree with the terms of this Agreement, do not
+install or use the Software.
+
+* Other names and brands may be claimed as the property of others.
+
+


Property changes on: trunk/sys/dev/ixl/README
___________________________________________________________________
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_adminq.c
===================================================================
--- trunk/sys/dev/ixl/i40e_adminq.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_adminq.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,1102 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_adminq.c 292100 2015-12-11 13:08:38Z smh $*/
+
+#include "i40e_status.h"
+#include "i40e_type.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+
+/**
+ * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+	return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
+		desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+}
+
+/**
+ *  i40e_adminq_init_regs - Initialize AdminQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct i40e_hw *hw)
+{
+	/* set head and tail registers in our local struct */
+	if (i40e_is_vf(hw)) {
+		hw->aq.asq.tail = I40E_VF_ATQT1;
+		hw->aq.asq.head = I40E_VF_ATQH1;
+		hw->aq.asq.len  = I40E_VF_ATQLEN1;
+		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
+		hw->aq.arq.tail = I40E_VF_ARQT1;
+		hw->aq.arq.head = I40E_VF_ARQH1;
+		hw->aq.arq.len  = I40E_VF_ARQLEN1;
+		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
+	} else {
+		hw->aq.asq.tail = I40E_PF_ATQT;
+		hw->aq.asq.head = I40E_PF_ATQH;
+		hw->aq.asq.len  = I40E_PF_ATQLEN;
+		hw->aq.asq.bal  = I40E_PF_ATQBAL;
+		hw->aq.asq.bah  = I40E_PF_ATQBAH;
+		hw->aq.arq.tail = I40E_PF_ARQT;
+		hw->aq.arq.head = I40E_PF_ARQH;
+		hw->aq.arq.len  = I40E_PF_ARQLEN;
+		hw->aq.arq.bal  = I40E_PF_ARQBAL;
+		hw->aq.arq.bah  = I40E_PF_ARQBAH;
+	}
+}
+
+/**
+ *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+					 i40e_mem_atq_ring,
+					 (hw->aq.num_asq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+	if (ret_code)
+		return ret_code;
+
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+					  (hw->aq.num_asq_entries *
+					  sizeof(struct i40e_asq_cmd_details)));
+	if (ret_code) {
+		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+		return ret_code;
+	}
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code;
+
+	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+					 i40e_mem_arq_ring,
+					 (hw->aq.num_arq_entries *
+					 sizeof(struct i40e_aq_desc)),
+					 I40E_ADMINQ_DESC_ALIGNMENT);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_adminq_asq - Free Admin Queue send rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted send buffers have already been cleaned
+ *  and de-allocated
+ **/
+void i40e_free_adminq_asq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ *  i40e_free_adminq_arq - Free Admin Queue receive rings
+ *  @hw: pointer to the hardware structure
+ *
+ *  This assumes the posted receive buffers have already been cleaned
+ *  and de-allocated
+ **/
+void i40e_free_adminq_arq(struct i40e_hw *hw)
+{
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ *  @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* We'll be allocating the buffer info memory first, then we can
+	 * allocate the mapped buffers for the event processing
+	 */
+
+	/* buffer_info structures do not need alignment */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_arq_bufs;
+	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_arq_entries; i++) {
+		bi = &hw->aq.arq.r.arq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_arq_buf,
+						 hw->aq.arq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_arq_bufs;
+
+		/* now configure the descriptors for use */
+		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
+
+		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+		desc->opcode = 0;
+		/* This is in accordance with Admin queue design, there is no
+		 * register for buffer size configuration
+		 */
+		desc->datalen = CPU_TO_LE16((u16)bi->size);
+		desc->retval = 0;
+		desc->cookie_high = 0;
+		desc->cookie_low = 0;
+		desc->params.external.addr_high =
+			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+		desc->params.external.addr_low =
+			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+		desc->params.external.param0 = 0;
+		desc->params.external.param1 = 0;
+	}
+
+alloc_arq_bufs:
+	return ret_code;
+
+unwind_alloc_arq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ *  @hw: pointer to the hardware structure
+ **/
+static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code;
+	struct i40e_dma_mem *bi;
+	int i;
+
+	/* No mapped memory needed yet, just the buffer info structures */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
+	if (ret_code)
+		goto alloc_asq_bufs;
+	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
+
+	/* allocate the mapped buffers */
+	for (i = 0; i < hw->aq.num_asq_entries; i++) {
+		bi = &hw->aq.asq.r.asq_bi[i];
+		ret_code = i40e_allocate_dma_mem(hw, bi,
+						 i40e_mem_asq_buf,
+						 hw->aq.asq_buf_size,
+						 I40E_ADMINQ_DESC_ALIGNMENT);
+		if (ret_code)
+			goto unwind_alloc_asq_bufs;
+	}
+alloc_asq_bufs:
+	return ret_code;
+
+unwind_alloc_asq_bufs:
+	/* don't try to free the one that failed... */
+	i--;
+	for (; i >= 0; i--)
+		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_free_arq_bufs - Free receive queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* free descriptors */
+	for (i = 0; i < hw->aq.num_arq_entries; i++)
+		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ *  i40e_free_asq_bufs - Free send queue buffer info elements
+ *  @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct i40e_hw *hw)
+{
+	int i;
+
+	/* only unmap if the address is non-NULL */
+	for (i = 0; i < hw->aq.num_asq_entries; i++)
+		if (hw->aq.asq.r.asq_bi[i].pa)
+			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+	/* free the buffer info list */
+	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+	/* free the descriptor memory */
+	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+	/* free the dma header */
+	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ *  i40e_config_asq_regs - configure ASQ registers
+ *  @hw: pointer to the hardware structure
+ *
+ *  Configure base address and length registers for the transmit queue
+ **/
+static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u32 reg = 0;
+
+	/* Clear Head and Tail */
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+
+	/* set starting point */
+	if (!i40e_is_vf(hw))
+		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+					  I40E_PF_ATQLEN_ATQENABLE_MASK));
+	if (i40e_is_vf(hw))
+		wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+					  I40E_VF_ATQLEN1_ATQENABLE_MASK));
+	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
+	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
+
+	/* Check one register to verify that config was applied */
+	reg = rd32(hw, hw->aq.asq.bal);
+	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_config_arq_regs - ARQ register configuration
+ *  @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u32 reg = 0;
+
+	/* Clear Head and Tail */
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+
+	/* set starting point */
+	if (!i40e_is_vf(hw))
+		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+					  I40E_PF_ARQLEN_ARQENABLE_MASK));
+	if (i40e_is_vf(hw))
+		wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+					  I40E_VF_ARQLEN1_ARQENABLE_MASK));
+	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
+	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
+
+	/* Update tail in the HW to post pre-allocated buffers */
+	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+	/* Check one register to verify that config was applied */
+	reg = rd32(hw, hw->aq.arq.bal);
+	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+	return ret_code;
+}
+
+/**
+ *  i40e_init_asq - main initialization routine for ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  This is the main initialization routine for the Admin Send Queue
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (hw->aq.asq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+	hw->aq.asq.count = hw->aq.num_asq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_asq_ring(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_asq_bufs(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	ret_code = i40e_config_asq_regs(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_rings;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_init_arq - initialize ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main initialization routine for the Admin Receive (Event) Queue.
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.arq_buf_size
+ *
+ *  Do *NOT* hold the lock when calling this as the memory allocation routines
+ *  called are not going to be atomic context safe
+ **/
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (hw->aq.arq.count > 0) {
+		/* queue already initialized */
+		ret_code = I40E_ERR_NOT_READY;
+		goto init_adminq_exit;
+	}
+
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+	hw->aq.arq.count = hw->aq.num_arq_entries;
+
+	/* allocate the ring memory */
+	ret_code = i40e_alloc_adminq_arq_ring(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_exit;
+
+	/* allocate buffers in the rings */
+	ret_code = i40e_alloc_arq_bufs(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_rings;
+
+	/* initialize base registers */
+	ret_code = i40e_config_arq_regs(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_rings;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_rings:
+	i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_asq - shutdown the ASQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Send Queue
+ **/
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (hw->aq.asq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.asq.head, 0);
+	wr32(hw, hw->aq.asq.tail, 0);
+	wr32(hw, hw->aq.asq.len, 0);
+	wr32(hw, hw->aq.asq.bal, 0);
+	wr32(hw, hw->aq.asq.bah, 0);
+
+	/* make sure spinlock is available */
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_asq_bufs(hw);
+
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_arq - shutdown ARQ
+ *  @hw: pointer to the hardware structure
+ *
+ *  The main shutdown routine for the Admin Receive Queue
+ **/
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (hw->aq.arq.count == 0)
+		return I40E_ERR_NOT_READY;
+
+	/* Stop firmware AdminQ processing */
+	wr32(hw, hw->aq.arq.head, 0);
+	wr32(hw, hw->aq.arq.tail, 0);
+	wr32(hw, hw->aq.arq.len, 0);
+	wr32(hw, hw->aq.arq.bal, 0);
+	wr32(hw, hw->aq.arq.bah, 0);
+
+	/* make sure spinlock is available */
+	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+	/* free ring buffers */
+	i40e_free_arq_bufs(hw);
+
+	i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_init_adminq - main initialization routine for Admin Queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  Prior to calling this function, drivers *MUST* set the following fields
+ *  in the hw->aq structure:
+ *     - hw->aq.num_asq_entries
+ *     - hw->aq.num_arq_entries
+ *     - hw->aq.arq_buf_size
+ *     - hw->aq.asq_buf_size
+ **/
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code;
+	u16 eetrack_lo, eetrack_hi;
+	u16 cfg_ptr, oem_hi, oem_lo;
+	int retry = 0;
+	/* verify input for valid configuration */
+	if ((hw->aq.num_arq_entries == 0) ||
+	    (hw->aq.num_asq_entries == 0) ||
+	    (hw->aq.arq_buf_size == 0) ||
+	    (hw->aq.asq_buf_size == 0)) {
+		ret_code = I40E_ERR_CONFIG;
+		goto init_adminq_exit;
+	}
+
+	/* initialize spin locks */
+	i40e_init_spinlock(&hw->aq.asq_spinlock);
+	i40e_init_spinlock(&hw->aq.arq_spinlock);
+
+	/* Set up register offsets */
+	i40e_adminq_init_regs(hw);
+
+	/* setup ASQ command write back timeout */
+	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+	/* allocate the ASQ */
+	ret_code = i40e_init_asq(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_destroy_spinlocks;
+
+	/* allocate the ARQ */
+	ret_code = i40e_init_arq(hw);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_asq;
+
+	/* VF has no need of firmware */
+	if (i40e_is_vf(hw))
+		goto init_adminq_exit;
+	/* There are some cases where the firmware may not be quite ready
+	 * for AdminQ operations, so we retry the AdminQ setup a few times
+	 * if we see timeouts in this first AQ call.
+	 */
+	do {
+		ret_code = i40e_aq_get_firmware_version(hw,
+							&hw->aq.fw_maj_ver,
+							&hw->aq.fw_min_ver,
+							&hw->aq.fw_build,
+							&hw->aq.api_maj_ver,
+							&hw->aq.api_min_ver,
+							NULL);
+		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+			break;
+		retry++;
+		i40e_msec_delay(100);
+		i40e_resume_aq(hw);
+	} while (retry < 10);
+	if (ret_code != I40E_SUCCESS)
+		goto init_adminq_free_arq;
+
+	/* get the NVM version info */
+	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
+			   &hw->nvm.version);
+	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
+	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
+	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
+	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
+	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
+			   &oem_hi);
+	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
+			   &oem_lo);
+	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
+
+	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
+		goto init_adminq_free_arq;
+	}
+
+	/* pre-emptive resource lock release */
+	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+	hw->aq.nvm_release_on_done = FALSE;
+	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
+	ret_code = i40e_aq_set_hmc_resource_profile(hw,
+						    I40E_HMC_PROFILE_DEFAULT,
+						    0,
+						    NULL);
+	ret_code = I40E_SUCCESS;
+
+	/* success! */
+	goto init_adminq_exit;
+
+init_adminq_free_arq:
+	i40e_shutdown_arq(hw);
+init_adminq_free_asq:
+	i40e_shutdown_asq(hw);
+init_adminq_destroy_spinlocks:
+	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+init_adminq_exit:
+	return ret_code;
+}
+
+/**
+ *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
+ *  @hw: pointer to the hardware structure
+ **/
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (i40e_check_asq_alive(hw))
+		i40e_aq_queue_shutdown(hw, TRUE);
+
+	i40e_shutdown_asq(hw);
+	i40e_shutdown_arq(hw);
+
+	/* destroy the spinlocks */
+	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
+	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+
+	if (hw->nvm_buff.va)
+		i40e_free_virt_mem(hw, &hw->nvm_buff);
+
+	return ret_code;
+}
+
+/**
+ *  i40e_clean_asq - cleans Admin send queue
+ *  @hw: pointer to the hardware structure
+ *
+ *  returns the number of free desc
+ **/
+u16 i40e_clean_asq(struct i40e_hw *hw)
+{
+	struct i40e_adminq_ring *asq = &(hw->aq.asq);
+	struct i40e_asq_cmd_details *details;
+	u16 ntc = asq->next_to_clean;
+	struct i40e_aq_desc desc_cb;
+	struct i40e_aq_desc *desc;
+
+	desc = I40E_ADMINQ_DESC(*asq, ntc);
+	details = I40E_ADMINQ_DETAILS(*asq, ntc);
+
+	while (rd32(hw, hw->aq.asq.head) != ntc) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+		if (details->callback) {
+			I40E_ADMINQ_CALLBACK cb_func =
+					(I40E_ADMINQ_CALLBACK)details->callback;
+			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
+				    I40E_DMA_TO_DMA);
+			cb_func(hw, &desc_cb);
+		}
+		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
+		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
+		ntc++;
+		if (ntc == asq->count)
+			ntc = 0;
+		desc = I40E_ADMINQ_DESC(*asq, ntc);
+		details = I40E_ADMINQ_DETAILS(*asq, ntc);
+	}
+
+	asq->next_to_clean = ntc;
+
+	return I40E_DESC_UNUSED(asq);
+}
+
+/**
+ *  i40e_asq_done - check if FW has processed the Admin Send Queue
+ *  @hw: pointer to the hw struct
+ *
+ *  Returns TRUE if the firmware has processed all descriptors on the
+ *  admin send queue. Returns FALSE if there are still requests pending.
+ **/
+bool i40e_asq_done(struct i40e_hw *hw)
+{
+	/* AQ designers suggest use of head for better
+	 * timing reliability than DD bit
+	 */
+	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+
+}
+
+/**
+ *  i40e_asq_send_command - send command to Admin Queue
+ *  @hw: pointer to the hw struct
+ *  @desc: prefilled descriptor describing the command (non DMA mem)
+ *  @buff: buffer to use for indirect commands
+ *  @buff_size: size of buffer for indirect commands
+ *  @cmd_details: pointer to command details structure
+ *
+ *  This is the main send command driver routine for the Admin Queue send
+ *  queue.  It runs the queue, cleans the queue, etc
+ **/
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+	struct i40e_dma_mem *dma_buff = NULL;
+	struct i40e_asq_cmd_details *details;
+	struct i40e_aq_desc *desc_on_ring;
+	bool cmd_completed = FALSE;
+	u16  retval = 0;
+	u32  val = 0;
+
+	hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+	val = rd32(hw, hw->aq.asq.head);
+	if (val >= hw->aq.num_asq_entries) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: head overrun at %d\n", val);
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_exit;
+	}
+
+	if (hw->aq.asq.count == 0) {
+		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Admin queue not initialized.\n");
+		status = I40E_ERR_QUEUE_EMPTY;
+		goto asq_send_command_exit;
+	}
+
+	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+	if (cmd_details) {
+		i40e_memcpy(details,
+			    cmd_details,
+			    sizeof(struct i40e_asq_cmd_details),
+			    I40E_NONDMA_TO_NONDMA);
+
+		/* If the cmd_details are defined copy the cookie.  The
+		 * CPU_TO_LE32 is not needed here because the data is ignored
+		 * by the FW, only used by the driver
+		 */
+		if (details->cookie) {
+			desc->cookie_high =
+				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
+			desc->cookie_low =
+				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
+		}
+	} else {
+		i40e_memset(details, 0,
+			    sizeof(struct i40e_asq_cmd_details),
+			    I40E_NONDMA_MEM);
+	}
+
+	/* clear requested flags and then set additional flags if defined */
+	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
+	desc->flags |= CPU_TO_LE16(details->flags_ena);
+
+	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
+
+	if (buff_size > hw->aq.asq_buf_size) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Invalid buffer size: %d.\n",
+			   buff_size);
+		status = I40E_ERR_INVALID_SIZE;
+		goto asq_send_command_error;
+	}
+
+	if (details->postpone && !details->async) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Async flag not set along with postpone flag");
+		status = I40E_ERR_PARAM;
+		goto asq_send_command_error;
+	}
+
+	/* call clean and check queue available function to reclaim the
+	 * descriptors that were processed by FW, the function returns the
+	 * number of desc available
+	 */
+	/* the clean function called here could be called in a separate thread
+	 * in case of asynchronous completions
+	 */
+	if (i40e_clean_asq(hw) == 0) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Error queue is full.\n");
+		status = I40E_ERR_ADMIN_QUEUE_FULL;
+		goto asq_send_command_error;
+	}
+
+	/* initialize the temp desc pointer with the right desc */
+	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+	/* if the desc is available copy the temp desc to the right place */
+	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
+		    I40E_NONDMA_TO_DMA);
+
+	/* if buff is not NULL assume indirect command */
+	if (buff != NULL) {
+		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
+		/* copy the user buff into the respective DMA buff */
+		i40e_memcpy(dma_buff->va, buff, buff_size,
+			    I40E_NONDMA_TO_DMA);
+		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
+
+		/* Update the address values in the desc with the pa value
+		 * for respective buffer
+		 */
+		desc_on_ring->params.external.addr_high =
+				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
+		desc_on_ring->params.external.addr_low =
+				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
+	}
+
+	/* bump the tail */
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+		      buff, buff_size);
+	(hw->aq.asq.next_to_use)++;
+	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+		hw->aq.asq.next_to_use = 0;
+	if (!details->postpone)
+		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+	/* if cmd_details are not defined or async flag is not set,
+	 * we need to wait for desc write back
+	 */
+	if (!details->async && !details->postpone) {
+		u32 total_delay = 0;
+
+		do {
+			/* AQ designers suggest use of head for better
+			 * timing reliability than DD bit
+			 */
+			if (i40e_asq_done(hw))
+				break;
+			/* ugh! delay while spin_lock */
+			i40e_msec_delay(1);
+			total_delay++;
+		} while (total_delay < hw->aq.asq_cmd_timeout);
+	}
+
+	/* if ready, copy the desc back to temp */
+	if (i40e_asq_done(hw)) {
+		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
+			    I40E_DMA_TO_NONDMA);
+		if (buff != NULL)
+			i40e_memcpy(buff, dma_buff->va, buff_size,
+				    I40E_DMA_TO_NONDMA);
+		retval = LE16_TO_CPU(desc->retval);
+		if (retval != 0) {
+			i40e_debug(hw,
+				   I40E_DEBUG_AQ_MESSAGE,
+				   "AQTX: Command completed with error 0x%X.\n",
+				   retval);
+
+			/* strip off FW internal code */
+			retval &= 0xff;
+		}
+		cmd_completed = TRUE;
+		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+			status = I40E_SUCCESS;
+		else
+			status = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+	}
+
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+		   "AQTX: desc and buffer writeback:\n");
+	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+	/* save writeback aq if requested */
+	if (details->wb_desc)
+		i40e_memcpy(details->wb_desc, desc_on_ring,
+			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
+
+	/* update the error if time out occurred */
+	if ((!cmd_completed) &&
+	    (!details->async && !details->postpone)) {
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQTX: Writeback timeout.\n");
+		status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+	}
+
+asq_send_command_error:
+	i40e_release_spinlock(&hw->aq.asq_spinlock);
+asq_send_command_exit:
+	return status;
+}
+
+/**
+ *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
+ *  @desc:     pointer to the temp descriptor (non DMA mem)
+ *  @opcode:   the opcode can be used to decide which flags to turn off or on
+ *
+ *  Fill the desc with default values
+ **/
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode)
+{
+	/* zero out the desc */
+	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
+		    I40E_NONDMA_MEM);
+	desc->opcode = CPU_TO_LE16(opcode);
+	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ *  i40e_clean_arq_element
+ *  @hw: pointer to the hw struct
+ *  @e: event info from the receive descriptor, includes any buffers
+ *  @pending: number of events that could be left to process
+ *
+ *  This function cleans one Admin Receive Queue element and returns
+ *  the contents through e.  It can also return how many events are
+ *  left to process through 'pending'
+ **/
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *pending)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u16 ntc = hw->aq.arq.next_to_clean;
+	struct i40e_aq_desc *desc;
+	struct i40e_dma_mem *bi;
+	u16 desc_idx;
+	u16 datalen;
+	u16 flags;
+	u16 ntu;
+
+	/* take the lock before we start messing with the ring */
+	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
+
+	/* set next_to_use to head */
+	if (!i40e_is_vf(hw))
+		ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+	if (i40e_is_vf(hw))
+		ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
+	if (ntu == ntc) {
+		/* nothing to do - shouldn't need to update ring's values */
+		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+		goto clean_arq_element_out;
+	}
+
+	/* now clean the next descriptor */
+	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
+	desc_idx = ntc;
+
+	flags = LE16_TO_CPU(desc->flags);
+	if (flags & I40E_AQ_FLAG_ERR) {
+		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+		hw->aq.arq_last_status =
+			(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
+		i40e_debug(hw,
+			   I40E_DEBUG_AQ_MESSAGE,
+			   "AQRX: Event received with error 0x%X.\n",
+			   hw->aq.arq_last_status);
+	}
+
+	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
+		    I40E_DMA_TO_NONDMA);
+	datalen = LE16_TO_CPU(desc->datalen);
+	e->msg_len = min(datalen, e->buf_len);
+	if (e->msg_buf != NULL && (e->msg_len != 0))
+		i40e_memcpy(e->msg_buf,
+			    hw->aq.arq.r.arq_bi[desc_idx].va,
+			    e->msg_len, I40E_DMA_TO_NONDMA);
+
+	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+		      hw->aq.arq_buf_size);
+
+	/* Restore the original datalen and buffer address in the desc,
+	 * FW updates datalen to indicate the event message
+	 * size
+	 */
+	bi = &hw->aq.arq.r.arq_bi[ntc];
+	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
+
+	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
+	desc->datalen = CPU_TO_LE16((u16)bi->size);
+	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
+	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
+
+	/* set tail = the last cleaned desc index. */
+	wr32(hw, hw->aq.arq.tail, ntc);
+	/* ntc is updated to tail + 1 */
+	ntc++;
+	if (ntc == hw->aq.num_arq_entries)
+		ntc = 0;
+	hw->aq.arq.next_to_clean = ntc;
+	hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+	/* Set pending if needed, unlock and return */
+	if (pending != NULL)
+		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+	i40e_release_spinlock(&hw->aq.arq_spinlock);
+
+	if (i40e_is_nvm_update_op(&e->desc)) {
+		if (hw->aq.nvm_release_on_done) {
+			i40e_release_nvm(hw);
+			hw->aq.nvm_release_on_done = FALSE;
+		}
+
+		switch (hw->nvmupd_state) {
+		case I40E_NVMUPD_STATE_INIT_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+			break;
+
+		case I40E_NVMUPD_STATE_WRITE_WAIT:
+			hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	return ret_code;
+}
+
+void i40e_resume_aq(struct i40e_hw *hw)
+{
+	/* Registers are reset after PF reset */
+	hw->aq.asq.next_to_use = 0;
+	hw->aq.asq.next_to_clean = 0;
+
+	i40e_config_asq_regs(hw);
+
+	hw->aq.arq.next_to_use = 0;
+	hw->aq.arq.next_to_clean = 0;
+
+	i40e_config_arq_regs(hw);
+}


Property changes on: trunk/sys/dev/ixl/i40e_adminq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_adminq.h
===================================================================
--- trunk/sys/dev/ixl/i40e_adminq.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_adminq.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,126 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_adminq.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_ADMINQ_H_
+#define _I40E_ADMINQ_H_
+
+#include "i40e_osdep.h"
+#include "i40e_status.h"
+#include "i40e_adminq_cmd.h"
+
+#define I40E_ADMINQ_DESC(R, i)   \
+	(&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define I40E_ADMINQ_DESC_ALIGNMENT 4096
+
+struct i40e_adminq_ring {
+	struct i40e_virt_mem dma_head;	/* space for dma structures */
+	struct i40e_dma_mem desc_buf;	/* descriptor ring memory */
+	struct i40e_virt_mem cmd_buf;	/* command buffer memory */
+
+	union {
+		struct i40e_dma_mem *asq_bi;
+		struct i40e_dma_mem *arq_bi;
+	} r;
+
+	u16 count;		/* Number of descriptors */
+	u16 rx_buf_len;		/* Admin Receive Queue buffer length */
+
+	/* used for interrupt processing */
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	/* used for queue tracking */
+	u32 head;
+	u32 tail;
+	u32 len;
+	u32 bah;
+	u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+	void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+	u64 cookie;
+	u16 flags_ena;
+	u16 flags_dis;
+	bool async;
+	bool postpone;
+	struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i)   \
+	(&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+	struct i40e_aq_desc desc;
+	u16 msg_len;
+	u16 buf_len;
+	u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct i40e_adminq_info {
+	struct i40e_adminq_ring arq;    /* receive queue */
+	struct i40e_adminq_ring asq;    /* send queue */
+	u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
+	u16 num_arq_entries;            /* receive queue depth */
+	u16 num_asq_entries;            /* send queue depth */
+	u16 arq_buf_size;               /* receive queue buffer size */
+	u16 asq_buf_size;               /* send queue buffer size */
+	u16 fw_maj_ver;                 /* firmware major version */
+	u16 fw_min_ver;                 /* firmware minor version */
+	u32 fw_build;                   /* firmware build number */
+	u16 api_maj_ver;                /* api major version */
+	u16 api_min_ver;                /* api minor version */
+	bool nvm_release_on_done;
+
+	struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
+	struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
+
+	/* last status values on send and receive queues */
+	enum i40e_admin_queue_err asq_last_status;
+	enum i40e_admin_queue_err arq_last_status;
+};
+
+/* general information */
+#define I40E_AQ_LARGE_BUF		512
+#define I40E_ASQ_CMD_TIMEOUT		250  /* msecs */
+
+void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
+				       u16 opcode);
+
+#endif /* _I40E_ADMINQ_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_adminq.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_adminq_cmd.h
===================================================================
--- trunk/sys/dev/ixl/i40e_adminq_cmd.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_adminq_cmd.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,2425 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_adminq_cmd.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR	0x0001
+#ifdef X722_SUPPORT
+#define I40E_FW_API_VERSION_MINOR	0x0003
+#else
+#define I40E_FW_API_VERSION_MINOR	0x0004
+#endif
+
+struct i40e_aq_desc {
+	__le16 flags;
+	__le16 opcode;
+	__le16 datalen;
+	__le16 retval;
+	__le32 cookie_high;
+	__le32 cookie_low;
+	union {
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 param2;
+			__le32 param3;
+		} internal;
+		struct {
+			__le32 param0;
+			__le32 param1;
+			__le32 addr_high;
+			__le32 addr_low;
+		} external;
+		u8 raw[16];
+	} params;
+};
+
+/* Flags sub-structure
+ * |0  |1  |2  |3  |4  |5  |6  |7  |8  |9  |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * *  RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT	0
+#define I40E_AQ_FLAG_CMP_SHIFT	1
+#define I40E_AQ_FLAG_ERR_SHIFT	2
+#define I40E_AQ_FLAG_VFE_SHIFT	3
+#define I40E_AQ_FLAG_LB_SHIFT	9
+#define I40E_AQ_FLAG_RD_SHIFT	10
+#define I40E_AQ_FLAG_VFC_SHIFT	11
+#define I40E_AQ_FLAG_BUF_SHIFT	12
+#define I40E_AQ_FLAG_SI_SHIFT	13
+#define I40E_AQ_FLAG_EI_SHIFT	14
+#define I40E_AQ_FLAG_FE_SHIFT	15
+
+#define I40E_AQ_FLAG_DD		(1 << I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
+#define I40E_AQ_FLAG_CMP	(1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
+#define I40E_AQ_FLAG_ERR	(1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
+#define I40E_AQ_FLAG_VFE	(1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
+#define I40E_AQ_FLAG_LB		(1 << I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
+#define I40E_AQ_FLAG_RD		(1 << I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
+#define I40E_AQ_FLAG_VFC	(1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
+#define I40E_AQ_FLAG_BUF	(1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI		(1 << I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
+#define I40E_AQ_FLAG_EI		(1 << I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
+#define I40E_AQ_FLAG_FE		(1 << I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+	I40E_AQ_RC_OK		= 0,  /* success */
+	I40E_AQ_RC_EPERM	= 1,  /* Operation not permitted */
+	I40E_AQ_RC_ENOENT	= 2,  /* No such element */
+	I40E_AQ_RC_ESRCH	= 3,  /* Bad opcode */
+	I40E_AQ_RC_EINTR	= 4,  /* operation interrupted */
+	I40E_AQ_RC_EIO		= 5,  /* I/O error */
+	I40E_AQ_RC_ENXIO	= 6,  /* No such resource */
+	I40E_AQ_RC_E2BIG	= 7,  /* Arg too long */
+	I40E_AQ_RC_EAGAIN	= 8,  /* Try again */
+	I40E_AQ_RC_ENOMEM	= 9,  /* Out of memory */
+	I40E_AQ_RC_EACCES	= 10, /* Permission denied */
+	I40E_AQ_RC_EFAULT	= 11, /* Bad address */
+	I40E_AQ_RC_EBUSY	= 12, /* Device or resource busy */
+	I40E_AQ_RC_EEXIST	= 13, /* object already exists */
+	I40E_AQ_RC_EINVAL	= 14, /* Invalid argument */
+	I40E_AQ_RC_ENOTTY	= 15, /* Not a typewriter */
+	I40E_AQ_RC_ENOSPC	= 16, /* No space left or alloc failure */
+	I40E_AQ_RC_ENOSYS	= 17, /* Function not implemented */
+	I40E_AQ_RC_ERANGE	= 18, /* Parameter out of range */
+	I40E_AQ_RC_EFLUSHED	= 19, /* Cmd flushed due to prev cmd error */
+	I40E_AQ_RC_BAD_ADDR	= 20, /* Descriptor contains a bad pointer */
+	I40E_AQ_RC_EMODE	= 21, /* Op not allowed in current dev mode */
+	I40E_AQ_RC_EFBIG	= 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+	/* aq commands */
+	i40e_aqc_opc_get_version	= 0x0001,
+	i40e_aqc_opc_driver_version	= 0x0002,
+	i40e_aqc_opc_queue_shutdown	= 0x0003,
+	i40e_aqc_opc_set_pf_context	= 0x0004,
+
+	/* resource ownership */
+	i40e_aqc_opc_request_resource	= 0x0008,
+	i40e_aqc_opc_release_resource	= 0x0009,
+
+	i40e_aqc_opc_list_func_capabilities	= 0x000A,
+	i40e_aqc_opc_list_dev_capabilities	= 0x000B,
+
+	/* LAA */
+	i40e_aqc_opc_mac_address_read	= 0x0107,
+	i40e_aqc_opc_mac_address_write	= 0x0108,
+
+	/* PXE */
+	i40e_aqc_opc_clear_pxe_mode	= 0x0110,
+
+	/* internal switch commands */
+	i40e_aqc_opc_get_switch_config		= 0x0200,
+	i40e_aqc_opc_add_statistics		= 0x0201,
+	i40e_aqc_opc_remove_statistics		= 0x0202,
+	i40e_aqc_opc_set_port_parameters	= 0x0203,
+	i40e_aqc_opc_get_switch_resource_alloc	= 0x0204,
+
+	i40e_aqc_opc_add_vsi			= 0x0210,
+	i40e_aqc_opc_update_vsi_parameters	= 0x0211,
+	i40e_aqc_opc_get_vsi_parameters		= 0x0212,
+
+	i40e_aqc_opc_add_pv			= 0x0220,
+	i40e_aqc_opc_update_pv_parameters	= 0x0221,
+	i40e_aqc_opc_get_pv_parameters		= 0x0222,
+
+	i40e_aqc_opc_add_veb			= 0x0230,
+	i40e_aqc_opc_update_veb_parameters	= 0x0231,
+	i40e_aqc_opc_get_veb_parameters		= 0x0232,
+
+	i40e_aqc_opc_delete_element		= 0x0243,
+
+	i40e_aqc_opc_add_macvlan		= 0x0250,
+	i40e_aqc_opc_remove_macvlan		= 0x0251,
+	i40e_aqc_opc_add_vlan			= 0x0252,
+	i40e_aqc_opc_remove_vlan		= 0x0253,
+	i40e_aqc_opc_set_vsi_promiscuous_modes	= 0x0254,
+	i40e_aqc_opc_add_tag			= 0x0255,
+	i40e_aqc_opc_remove_tag			= 0x0256,
+	i40e_aqc_opc_add_multicast_etag		= 0x0257,
+	i40e_aqc_opc_remove_multicast_etag	= 0x0258,
+	i40e_aqc_opc_update_tag			= 0x0259,
+	i40e_aqc_opc_add_control_packet_filter	= 0x025A,
+	i40e_aqc_opc_remove_control_packet_filter	= 0x025B,
+	i40e_aqc_opc_add_cloud_filters		= 0x025C,
+	i40e_aqc_opc_remove_cloud_filters	= 0x025D,
+
+	i40e_aqc_opc_add_mirror_rule	= 0x0260,
+	i40e_aqc_opc_delete_mirror_rule	= 0x0261,
+
+	/* DCB commands */
+	i40e_aqc_opc_dcb_ignore_pfc	= 0x0301,
+	i40e_aqc_opc_dcb_updated	= 0x0302,
+
+	/* TX scheduler */
+	i40e_aqc_opc_configure_vsi_bw_limit		= 0x0400,
+	i40e_aqc_opc_configure_vsi_ets_sla_bw_limit	= 0x0406,
+	i40e_aqc_opc_configure_vsi_tc_bw		= 0x0407,
+	i40e_aqc_opc_query_vsi_bw_config		= 0x0408,
+	i40e_aqc_opc_query_vsi_ets_sla_config		= 0x040A,
+	i40e_aqc_opc_configure_switching_comp_bw_limit	= 0x0410,
+
+	i40e_aqc_opc_enable_switching_comp_ets			= 0x0413,
+	i40e_aqc_opc_modify_switching_comp_ets			= 0x0414,
+	i40e_aqc_opc_disable_switching_comp_ets			= 0x0415,
+	i40e_aqc_opc_configure_switching_comp_ets_bw_limit	= 0x0416,
+	i40e_aqc_opc_configure_switching_comp_bw_config		= 0x0417,
+	i40e_aqc_opc_query_switching_comp_ets_config		= 0x0418,
+	i40e_aqc_opc_query_port_ets_config			= 0x0419,
+	i40e_aqc_opc_query_switching_comp_bw_config		= 0x041A,
+	i40e_aqc_opc_suspend_port_tx				= 0x041B,
+	i40e_aqc_opc_resume_port_tx				= 0x041C,
+	i40e_aqc_opc_configure_partition_bw			= 0x041D,
+
+	/* hmc */
+	i40e_aqc_opc_query_hmc_resource_profile	= 0x0500,
+	i40e_aqc_opc_set_hmc_resource_profile	= 0x0501,
+
+	/* phy commands*/
+	i40e_aqc_opc_get_phy_abilities		= 0x0600,
+	i40e_aqc_opc_set_phy_config		= 0x0601,
+	i40e_aqc_opc_set_mac_config		= 0x0603,
+	i40e_aqc_opc_set_link_restart_an	= 0x0605,
+	i40e_aqc_opc_get_link_status		= 0x0607,
+	i40e_aqc_opc_set_phy_int_mask		= 0x0613,
+	i40e_aqc_opc_get_local_advt_reg		= 0x0614,
+	i40e_aqc_opc_set_local_advt_reg		= 0x0615,
+	i40e_aqc_opc_get_partner_advt		= 0x0616,
+	i40e_aqc_opc_set_lb_modes		= 0x0618,
+	i40e_aqc_opc_get_phy_wol_caps		= 0x0621,
+	i40e_aqc_opc_set_phy_debug		= 0x0622,
+	i40e_aqc_opc_upload_ext_phy_fm		= 0x0625,
+
+	/* NVM commands */
+	i40e_aqc_opc_nvm_read			= 0x0701,
+	i40e_aqc_opc_nvm_erase			= 0x0702,
+	i40e_aqc_opc_nvm_update			= 0x0703,
+	i40e_aqc_opc_nvm_config_read		= 0x0704,
+	i40e_aqc_opc_nvm_config_write		= 0x0705,
+	i40e_aqc_opc_oem_post_update		= 0x0720,
+
+	/* virtualization commands */
+	i40e_aqc_opc_send_msg_to_pf		= 0x0801,
+	i40e_aqc_opc_send_msg_to_vf		= 0x0802,
+	i40e_aqc_opc_send_msg_to_peer		= 0x0803,
+
+	/* alternate structure */
+	i40e_aqc_opc_alternate_write		= 0x0900,
+	i40e_aqc_opc_alternate_write_indirect	= 0x0901,
+	i40e_aqc_opc_alternate_read		= 0x0902,
+	i40e_aqc_opc_alternate_read_indirect	= 0x0903,
+	i40e_aqc_opc_alternate_write_done	= 0x0904,
+	i40e_aqc_opc_alternate_set_mode		= 0x0905,
+	i40e_aqc_opc_alternate_clear_port	= 0x0906,
+
+	/* LLDP commands */
+	i40e_aqc_opc_lldp_get_mib	= 0x0A00,
+	i40e_aqc_opc_lldp_update_mib	= 0x0A01,
+	i40e_aqc_opc_lldp_add_tlv	= 0x0A02,
+	i40e_aqc_opc_lldp_update_tlv	= 0x0A03,
+	i40e_aqc_opc_lldp_delete_tlv	= 0x0A04,
+	i40e_aqc_opc_lldp_stop		= 0x0A05,
+	i40e_aqc_opc_lldp_start		= 0x0A06,
+	i40e_aqc_opc_get_cee_dcb_cfg	= 0x0A07,
+	i40e_aqc_opc_lldp_set_local_mib	= 0x0A08,
+	i40e_aqc_opc_lldp_stop_start_spec_agent	= 0x0A09,
+
+	/* Tunnel commands */
+	i40e_aqc_opc_add_udp_tunnel	= 0x0B00,
+	i40e_aqc_opc_del_udp_tunnel	= 0x0B01,
+#ifdef X722_SUPPORT
+	i40e_aqc_opc_set_rss_key	= 0x0B02,
+	i40e_aqc_opc_set_rss_lut	= 0x0B03,
+	i40e_aqc_opc_get_rss_key	= 0x0B04,
+	i40e_aqc_opc_get_rss_lut	= 0x0B05,
+#endif
+
+	/* Async Events */
+	i40e_aqc_opc_event_lan_overflow		= 0x1001,
+
+	/* OEM commands */
+	i40e_aqc_opc_oem_parameter_change	= 0xFE00,
+	i40e_aqc_opc_oem_device_status_change	= 0xFE01,
+	i40e_aqc_opc_oem_ocsd_initialize	= 0xFE02,
+	i40e_aqc_opc_oem_ocbb_initialize	= 0xFE03,
+
+	/* debug commands */
+	i40e_aqc_opc_debug_read_reg		= 0xFF03,
+	i40e_aqc_opc_debug_write_reg		= 0xFF04,
+	i40e_aqc_opc_debug_modify_reg		= 0xFF07,
+	i40e_aqc_opc_debug_dump_internals	= 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+	{ i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X)	I40E_CHECK_STRUCT_LEN(16, X)
+
+/* internal (0x00XX) commands */
+
+/* Get version (direct 0x0001) */
+struct i40e_aqc_get_version {
+	__le32 rom_ver;
+	__le32 fw_build;
+	__le16 fw_major;
+	__le16 fw_minor;
+	__le16 api_major;
+	__le16 api_minor;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version);
+
+/* Send driver version (indirect 0x0002) */
+struct i40e_aqc_driver_version {
+	u8	driver_major_ver;
+	u8	driver_minor_ver;
+	u8	driver_build_ver;
+	u8	driver_subbuild_ver;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version);
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+	__le32	driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING	0x1
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+	u8	pf_id;
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+#define I40E_AQ_RESOURCE_NVM			1
+#define I40E_AQ_RESOURCE_SDP			2
+#define I40E_AQ_RESOURCE_ACCESS_READ		1
+#define I40E_AQ_RESOURCE_ACCESS_WRITE		2
+#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT	3000
+#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT	180000
+
+struct i40e_aqc_request_resource {
+	__le16	resource_id;
+	__le16	access_type;
+	__le32	timeout;
+	__le32	resource_number;
+	u8	reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct i40e_aqc_list_capabilites {
+	u8 command_flags;
+#define I40E_AQ_LIST_CAP_PF_INDEX_EN	1
+	u8 pf_index;
+	u8 reserved[2];
+	__le32 count;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites);
+
+struct i40e_aqc_list_capabilities_element_resp {
+	__le16	id;
+	u8	major_rev;
+	u8	minor_rev;
+	__le32	number;
+	__le32	logical_id;
+	__le32	phys_id;
+	u8	reserved[16];
+};
+
+/* list of caps */
+
+#define I40E_AQ_CAP_ID_SWITCH_MODE	0x0001
+#define I40E_AQ_CAP_ID_MNG_MODE		0x0002
+#define I40E_AQ_CAP_ID_NPAR_ACTIVE	0x0003
+#define I40E_AQ_CAP_ID_OS2BMC_CAP	0x0004
+#define I40E_AQ_CAP_ID_FUNCTIONS_VALID	0x0005
+#define I40E_AQ_CAP_ID_ALTERNATE_RAM	0x0006
+#define I40E_AQ_CAP_ID_SRIOV		0x0012
+#define I40E_AQ_CAP_ID_VF		0x0013
+#define I40E_AQ_CAP_ID_VMDQ		0x0014
+#define I40E_AQ_CAP_ID_8021QBG		0x0015
+#define I40E_AQ_CAP_ID_8021QBR		0x0016
+#define I40E_AQ_CAP_ID_VSI		0x0017
+#define I40E_AQ_CAP_ID_DCB		0x0018
+#define I40E_AQ_CAP_ID_FCOE		0x0021
+#define I40E_AQ_CAP_ID_ISCSI		0x0022
+#define I40E_AQ_CAP_ID_RSS		0x0040
+#define I40E_AQ_CAP_ID_RXQ		0x0041
+#define I40E_AQ_CAP_ID_TXQ		0x0042
+#define I40E_AQ_CAP_ID_MSIX		0x0043
+#define I40E_AQ_CAP_ID_VF_MSIX		0x0044
+#define I40E_AQ_CAP_ID_FLOW_DIRECTOR	0x0045
+#define I40E_AQ_CAP_ID_1588		0x0046
+#define I40E_AQ_CAP_ID_IWARP		0x0051
+#define I40E_AQ_CAP_ID_LED		0x0061
+#define I40E_AQ_CAP_ID_SDP		0x0062
+#define I40E_AQ_CAP_ID_MDIO		0x0063
+#define I40E_AQ_CAP_ID_FLEX10		0x00F1
+#define I40E_AQ_CAP_ID_CEM		0x00F2
+
+/* Set CPPM Configuration (direct 0x0103) */
+struct i40e_aqc_cppm_configuration {
+	__le16	command_flags;
+#define I40E_AQ_CPPM_EN_LTRC	0x0800
+#define I40E_AQ_CPPM_EN_DMCTH	0x1000
+#define I40E_AQ_CPPM_EN_DMCTLX	0x2000
+#define I40E_AQ_CPPM_EN_HPTC	0x4000
+#define I40E_AQ_CPPM_EN_DMARC	0x8000
+	__le16	ttlx;
+	__le32	dmacr;
+	__le16	dmcth;
+	u8	hptc;
+	u8	reserved;
+	__le32	pfltrc;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
+
+/* Set ARP Proxy command / response (indirect 0x0104) */
+struct i40e_aqc_arp_proxy_data {
+	__le16	command_flags;
+#define I40E_AQ_ARP_INIT_IPV4	0x0008
+#define I40E_AQ_ARP_UNSUP_CTL	0x0010
+#define I40E_AQ_ARP_ENA		0x0020
+#define I40E_AQ_ARP_ADD_IPV4	0x0040
+#define I40E_AQ_ARP_DEL_IPV4	0x0080
+	__le16	table_id;
+	__le32	pfpm_proxyfc;
+	__le32	ip_addr;
+	u8	mac_addr[6];
+	u8	reserved[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data);
+
+/* Set NS Proxy Table Entry Command (indirect 0x0105) */
+struct i40e_aqc_ns_proxy_data {
+	__le16	table_idx_mac_addr_0;
+	__le16	table_idx_mac_addr_1;
+	__le16	table_idx_ipv6_0;
+	__le16	table_idx_ipv6_1;
+	__le16	control;
+#define I40E_AQ_NS_PROXY_ADD_0		0x0100
+#define I40E_AQ_NS_PROXY_DEL_0		0x0200
+#define I40E_AQ_NS_PROXY_ADD_1		0x0400
+#define I40E_AQ_NS_PROXY_DEL_1		0x0800
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0	0x1000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0	0x2000
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1	0x4000
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1	0x8000
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ	0x0001
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL	0x0002
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL	0x0004
+	u8	mac_addr_0[6];
+	u8	mac_addr_1[6];
+	u8	local_mac_addr[6];
+	u8	ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */
+	u8	ipv6_addr_1[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
+
+/* Manage LAA Command (0x0106) - obsolete */
+struct i40e_aqc_mng_laa {
+	__le16	command_flags;
+#define I40E_AQ_LAA_FLAG_WR	0x8000
+	u8	reserved[2];
+	__le32	sal;
+	__le16	sah;
+	u8	reserved2[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
+
+/* Manage MAC Address Read Command (indirect 0x0107) */
+struct i40e_aqc_mac_address_read {
+	__le16	command_flags;
+#define I40E_AQC_LAN_ADDR_VALID		0x10
+#define I40E_AQC_SAN_ADDR_VALID		0x20
+#define I40E_AQC_PORT_ADDR_VALID	0x40
+#define I40E_AQC_WOL_ADDR_VALID		0x80
+#define I40E_AQC_MC_MAG_EN_VALID	0x100
+#define I40E_AQC_ADDR_VALID_MASK	0x1F0
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read);
+
+struct i40e_aqc_mac_address_read_data {
+	u8 pf_lan_mac[6];
+	u8 pf_san_mac[6];
+	u8 port_mac[6];
+	u8 pf_wol_mac[6];
+};
+
+I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
+
+/* Manage MAC Address Write Command (0x0108) */
+struct i40e_aqc_mac_address_write {
+	__le16	command_flags;
+#define I40E_AQC_WRITE_TYPE_LAA_ONLY	0x0000
+#define I40E_AQC_WRITE_TYPE_LAA_WOL	0x4000
+#define I40E_AQC_WRITE_TYPE_PORT	0x8000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG	0xC000
+#define I40E_AQC_WRITE_TYPE_MASK	0xC000
+
+	__le16	mac_sah;
+	__le32	mac_sal;
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write);
+
+/* PXE commands (0x011x) */
+
+/* Clear PXE Command and response  (direct 0x0110) */
+struct i40e_aqc_clear_pxe {
+	u8	rx_cnt;
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+
+/* Switch configuration commands (0x02xx) */
+
+/* Used by many indirect commands that only pass an seid and a buffer in the
+ * command
+ */
+struct i40e_aqc_switch_seid {
+	__le16	seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid);
+
+/* Get Switch Configuration command (indirect 0x0200)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_switch_config_header_resp {
+	__le16	num_reported;
+	__le16	num_total;
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
+
+struct i40e_aqc_switch_config_element_resp {
+	u8	element_type;
+#define I40E_AQ_SW_ELEM_TYPE_MAC	1
+#define I40E_AQ_SW_ELEM_TYPE_PF		2
+#define I40E_AQ_SW_ELEM_TYPE_VF		3
+#define I40E_AQ_SW_ELEM_TYPE_EMP	4
+#define I40E_AQ_SW_ELEM_TYPE_BMC	5
+#define I40E_AQ_SW_ELEM_TYPE_PV		16
+#define I40E_AQ_SW_ELEM_TYPE_VEB	17
+#define I40E_AQ_SW_ELEM_TYPE_PA		18
+#define I40E_AQ_SW_ELEM_TYPE_VSI	19
+	u8	revision;
+#define I40E_AQ_SW_ELEM_REV_1		1
+	__le16	seid;
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	u8	reserved[3];
+	u8	connection_type;
+#define I40E_AQ_CONN_TYPE_REGULAR	0x1
+#define I40E_AQ_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_CONN_TYPE_CASCADED	0x3
+	__le16	scheduler_id;
+	__le16	element_info;
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp);
+
+/* Get Switch Configuration (indirect 0x0200)
+ *    an array of elements are returned in the response buffer
+ *    the first in the array is the header, remainder are elements
+ */
+struct i40e_aqc_get_switch_config_resp {
+	struct i40e_aqc_get_switch_config_header_resp	header;
+	struct i40e_aqc_switch_config_element_resp	element[1];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp);
+
+/* Add Statistics (direct 0x0201)
+ * Remove Statistics (direct 0x0202)
+ */
+struct i40e_aqc_add_remove_statistics {
+	__le16	seid;
+	__le16	vlan;
+	__le16	stat_index;
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
+
+/* Set Port Parameters command (direct 0x0203) */
+struct i40e_aqc_set_port_parameters {
+	__le16	command_flags;
+#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS	1
+#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS	2 /* must set! */
+#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA	4
+	__le16	bad_frame_vsi;
+	__le16	default_seid;        /* reserved for command */
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters);
+
+/* Get Switch Resource Allocation (indirect 0x0204) */
+struct i40e_aqc_get_switch_resource_alloc {
+	u8	num_entries;         /* reserved for command */
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
+
+/* expect an array of these structs in the response buffer */
+struct i40e_aqc_switch_resource_alloc_element_resp {
+	u8	resource_type;
+#define I40E_AQ_RESOURCE_TYPE_VEB		0x0
+#define I40E_AQ_RESOURCE_TYPE_VSI		0x1
+#define I40E_AQ_RESOURCE_TYPE_MACADDR		0x2
+#define I40E_AQ_RESOURCE_TYPE_STAG		0x3
+#define I40E_AQ_RESOURCE_TYPE_ETAG		0x4
+#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH	0x5
+#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH	0x6
+#define I40E_AQ_RESOURCE_TYPE_VLAN		0x7
+#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY	0x8
+#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY	0x9
+#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL	0xA
+#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE	0xB
+#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS	0xC
+#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS	0xD
+#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS	0xF
+#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS	0x10
+#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS	0x11
+#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS		0x12
+#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS	0x13
+	u8	reserved1;
+	__le16	guaranteed;
+	__le16	total;
+	__le16	used;
+	__le16	total_unalloced;
+	u8	reserved2[6];
+};
+
+I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
+
+/* Add VSI (indirect 0x0210)
+ *    this indirect command uses struct i40e_aqc_vsi_properties_data
+ *    as the indirect buffer (128 bytes)
+ *
+ * Update VSI (indirect 0x211)
+ *     uses the same data structure as Add VSI
+ *
+ * Get VSI (indirect 0x0212)
+ *     uses the same completion and data structure as Add VSI
+ */
+struct i40e_aqc_add_get_update_vsi {
+	__le16	uplink_seid;
+	u8	connection_type;
+#define I40E_AQ_VSI_CONN_TYPE_NORMAL	0x1
+#define I40E_AQ_VSI_CONN_TYPE_DEFAULT	0x2
+#define I40E_AQ_VSI_CONN_TYPE_CASCADED	0x3
+	u8	reserved1;
+	u8	vf_id;
+	u8	reserved2;
+	__le16	vsi_flags;
+#define I40E_AQ_VSI_TYPE_SHIFT		0x0
+#define I40E_AQ_VSI_TYPE_MASK		(0x3 << I40E_AQ_VSI_TYPE_SHIFT)
+#define I40E_AQ_VSI_TYPE_VF		0x0
+#define I40E_AQ_VSI_TYPE_VMDQ2		0x1
+#define I40E_AQ_VSI_TYPE_PF		0x2
+#define I40E_AQ_VSI_TYPE_EMP_MNG	0x3
+#define I40E_AQ_VSI_FLAG_CASCADED_PV	0x4
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi);
+
+struct i40e_aqc_add_get_update_vsi_completion {
+	__le16 seid;
+	__le16 vsi_number;
+	__le16 vsi_used;
+	__le16 vsi_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion);
+
+struct i40e_aqc_vsi_properties_data {
+	/* first 96 byte are written by SW */
+	__le16	valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID		0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID		0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID		0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID		0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID	0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID	0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID	0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID	0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID		0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID		0x0200
+	/* switch section */
+	__le16	switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT		0x0000
+#define I40E_AQ_VSI_SW_ID_MASK		(0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG	0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB	0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB	0x4000
+	u8	sw_reserved[2];
+	/* security section */
+	u8	sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD	0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK	0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK	0x04
+	u8	sec_reserved;
+	/* VLAN section */
+	__le16	pvid; /* VLANS include priority bits */
+	__le16	fcoe_pvid;
+	u8	port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT	0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK	(0x03 << \
+					 I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED	0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED	0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL	0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID	0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT	0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK	(0x3 << \
+					 I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH	0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP	0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR	0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING	0x18
+	u8	pvlan_reserved[3];
+	/* ingress egress up sections */
+	__le32	ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT	0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT	3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT	6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT	9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT	12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT	15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT	18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT	21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK	(0x7 << \
+					 I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+	__le32	egress_table;   /* same defines as for ingress table */
+	/* cascaded PV section */
+	__le16	cas_pv_tag;
+	u8	cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK		(0x03 << \
+						 I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE		0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE		0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY		0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG		0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE		0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG	0x40
+	u8	cas_pv_reserved;
+	/* queue mapping section */
+	__le16	mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG	0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG	0x1
+	__le16	queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT		0x0
+#define I40E_AQ_VSI_QUEUE_MASK		(0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+	__le16	tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT	0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK	(0x1FF << \
+					 I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT	9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK	(0x7 << \
+					 I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+	/* queueing option section */
+	u8	queueing_opt_flags;
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA	0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA	0x08
+#endif
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA	0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA	0x20
+#ifdef X722_SUPPORT
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF	0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI	0x40
+#endif
+	u8	queueing_opt_reserved[3];
+	/* scheduler section */
+	u8	up_enable_bits;
+	u8	sched_reserved;
+	/* outer up section */
+	__le32	outer_up_table; /* same structure and defines as ingress table */
+	u8	cmd_reserved[8];
+	/* last 32 bytes are written by FW */
+	__le16	qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID	0xFFFF
+	__le16	stat_counter_idx;
+	__le16	sched_id;
+	u8	resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Add Port Virtualizer (direct 0x0220)
+ * also used for update PV (direct 0x0221) but only flags are used
+ * (IS_CTRL_PORT only works on add PV)
+ */
+struct i40e_aqc_add_update_pv {
+	__le16	command_flags;
+#define I40E_AQC_PV_FLAG_PV_TYPE		0x1
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN	0x2
+#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN	0x4
+#define I40E_AQC_PV_FLAG_IS_CTRL_PORT		0x8
+	__le16	uplink_seid;
+	__le16	connected_seid;
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
+
+struct i40e_aqc_add_update_pv_completion {
+	/* reserved for update; for add also encodes error if rc == ENOSPC */
+	__le16	pv_seid;
+#define I40E_AQC_PV_ERR_FLAG_NO_PV	0x1
+#define I40E_AQC_PV_ERR_FLAG_NO_SCHED	0x2
+#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY	0x8
+	u8	reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion);
+
+/* Get PV Params (direct 0x0222)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+
+struct i40e_aqc_get_pv_params_completion {
+	__le16	seid;
+	__le16	default_stag;
+	__le16	pv_flags; /* same flags as add_pv */
+#define I40E_AQC_GET_PV_PV_TYPE			0x1
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG	0x2
+#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG	0x4
+	u8	reserved[8];
+	__le16	default_port_seid;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion);
+
+/* Add VEB (direct 0x0230) */
+struct i40e_aqc_add_veb {
+	__le16	uplink_seid;
+	__le16	downlink_seid;
+	__le16	veb_flags;
+#define I40E_AQC_ADD_VEB_FLOATING		0x1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT	1
+#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK		(0x3 << \
+					I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT	0x2
+#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA		0x4
+#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER	0x8
+	u8	enable_tcs;
+	u8	reserved[9];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb);
+
+struct i40e_aqc_add_veb_completion {
+	u8	reserved[6];
+	__le16	switch_seid;
+	/* also encodes error if rc == ENOSPC; codes are the same as add_pv */
+	__le16	veb_seid;
+#define I40E_AQC_VEB_ERR_FLAG_NO_VEB		0x1
+#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED		0x2
+#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER	0x4
+#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY		0x8
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+	__le16	seid;
+	__le16	switch_id;
+	__le16	veb_flags; /* only the first/last flags from 0x0230 is valid */
+	__le16	statistic_index;
+	__le16	vebs_used;
+	__le16	vebs_free;
+	u8	reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+/* Delete Element (direct 0x0243)
+ * uses the generic i40e_aqc_switch_seid
+ */
+
+/* Add MAC-VLAN (indirect 0x0250) */
+
+/* used for the command for most vlan commands */
+struct i40e_aqc_macvlan {
+	__le16	num_addresses;
+	__le16	seid[3];
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK	(0x3FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+#define I40E_AQC_MACVLAN_CMD_SEID_VALID		0x8000
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan);
+
+/* indirect data for command and response */
+struct i40e_aqc_add_macvlan_element_data {
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	__le16	flags;
+#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH	0x0001
+#define I40E_AQC_MACVLAN_ADD_HASH_MATCH		0x0002
+#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN	0x0004
+#define I40E_AQC_MACVLAN_ADD_TO_QUEUE		0x0008
+	__le16	queue_number;
+#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT	0
+#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK		(0x7FF << \
+					I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
+	/* response section */
+	u8	match_method;
+#define I40E_AQC_MM_PERFECT_MATCH	0x01
+#define I40E_AQC_MM_HASH_MATCH		0x02
+#define I40E_AQC_MM_ERR_NO_RES		0xFF
+	u8	reserved1[3];
+};
+
+struct i40e_aqc_add_remove_macvlan_completion {
+	__le16 perfect_mac_used;
+	__le16 perfect_mac_free;
+	__le16 unicast_hash_free;
+	__le16 multicast_hash_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion);
+
+/* Remove MAC-VLAN (indirect 0x0251)
+ * uses i40e_aqc_macvlan for the descriptor
+ * data points to an array of num_addresses of elements
+ */
+
+struct i40e_aqc_remove_macvlan_element_data {
+	u8	mac_addr[6];
+	__le16	vlan_tag;
+	u8	flags;
+#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH	0x01
+#define I40E_AQC_MACVLAN_DEL_HASH_MATCH		0x02
+#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN	0x08
+#define I40E_AQC_MACVLAN_DEL_ALL_VSIS		0x10
+	u8	reserved[3];
+	/* reply section */
+	u8	error_code;
+#define I40E_AQC_REMOVE_MACVLAN_SUCCESS		0x0
+#define I40E_AQC_REMOVE_MACVLAN_FAIL		0xFF
+	u8	reply_reserved[3];
+};
+
+/* Add VLAN (indirect 0x0252)
+ * Remove VLAN (indirect 0x0253)
+ * use the generic i40e_aqc_macvlan for the command
+ */
+struct i40e_aqc_add_remove_vlan_element_data {
+	__le16	vlan_tag;
+	u8	vlan_flags;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_LOCAL			0x1
+#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT		1
+#define I40E_AQC_ADD_PVLAN_TYPE_MASK	(0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
+#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR		0x0
+#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY		0x2
+#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY	0x4
+#define I40E_AQC_VLAN_PTYPE_SHIFT		3
+#define I40E_AQC_VLAN_PTYPE_MASK	(0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
+#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI		0x0
+#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI		0x8
+#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI	0x10
+#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI	0x18
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_ALL	0x1
+	u8	reserved;
+	u8	result;
+/* flags for add VLAN */
+#define I40E_AQC_ADD_VLAN_SUCCESS	0x0
+#define I40E_AQC_ADD_VLAN_FAIL_REQUEST	0xFE
+#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE	0xFF
+/* flags for remove VLAN */
+#define I40E_AQC_REMOVE_VLAN_SUCCESS	0x0
+#define I40E_AQC_REMOVE_VLAN_FAIL	0xFF
+	u8	reserved1[3];
+};
+
+struct i40e_aqc_add_remove_vlan_completion {
+	u8	reserved[4];
+	__le16	vlans_used;
+	__le16	vlans_free;
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+/* Set VSI Promiscuous Modes (direct 0x0254) */
+struct i40e_aqc_set_vsi_promiscuous_modes {
+	__le16	promiscuous_flags;
+	__le16	valid_flags;
+/* flags used for both fields above */
+#define I40E_AQC_SET_VSI_PROMISC_UNICAST	0x01
+#define I40E_AQC_SET_VSI_PROMISC_MULTICAST	0x02
+#define I40E_AQC_SET_VSI_PROMISC_BROADCAST	0x04
+#define I40E_AQC_SET_VSI_DEFAULT		0x08
+#define I40E_AQC_SET_VSI_PROMISC_VLAN		0x10
+	__le16	seid;
+#define I40E_AQC_VSI_PROM_CMD_SEID_MASK		0x3FF
+	__le16	vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK		0x0FFF
+#define I40E_AQC_SET_VSI_VLAN_VALID		0x8000
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
+
+/* Add S/E-tag command (direct 0x0255)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_add_tag {
+	__le16	flags;
+#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE		0x0001
+	__le16	seid;
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
+					I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
+	__le16	tag;
+	__le16	queue_number;
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag);
+
+struct i40e_aqc_add_remove_tag_completion {
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
+
+/* Remove S/E-tag command (direct 0x0256)
+ * Uses generic i40e_aqc_add_remove_tag_completion for completion
+ */
+struct i40e_aqc_remove_tag {
+	__le16	seid;
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
+					I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16	tag;
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag);
+
+/* Add multicast E-Tag (direct 0x0257)
+ * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields
+ * and no external data
+ */
+struct i40e_aqc_add_remove_mcast_etag {
+	__le16	pv_seid;
+	__le16	etag;
+	u8	num_unicast_etags;
+	u8	reserved[3];
+	__le32	addr_high;          /* address of array of 2-byte s-tags */
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag);
+
+struct i40e_aqc_add_remove_mcast_etag_completion {
+	u8	reserved[4];
+	__le16	mcast_etags_used;
+	__le16	mcast_etags_free;
+	__le32	addr_high;
+	__le32	addr_low;
+
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
+
+/* Update S/E-Tag (direct 0x0259) */
+struct i40e_aqc_update_tag {
+	__le16	seid;
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK	(0x3FF << \
+					I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
+	__le16	old_tag;
+	__le16	new_tag;
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag);
+
+struct i40e_aqc_update_tag_completion {
+	u8	reserved[12];
+	__le16	tags_used;
+	__le16	tags_free;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion);
+
+/* Add Control Packet filter (direct 0x025A)
+ * Remove Control Packet filter (direct 0x025B)
+ * uses the i40e_aqc_add_oveb_cloud,
+ * and the generic direct completion structure
+ */
+struct i40e_aqc_add_remove_control_packet_filter {
+	u8	mac[6];
+	__le16	etype;
+	__le16	flags;
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC	0x0001
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP		0x0002
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE	0x0004
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX		0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX		0x0000
+	__le16	seid;
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK	(0x3FF << \
+				I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
+	__le16	queue;
+	u8	reserved[2];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter);
+
+struct i40e_aqc_add_remove_control_packet_filter_completion {
+	__le16	mac_etype_used;
+	__le16	etype_used;
+	__le16	mac_etype_free;
+	__le16	etype_free;
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion);
+
+/* Add Cloud filters (indirect 0x025C)
+ * Remove Cloud filters (indirect 0x025D)
+ * uses the i40e_aqc_add_remove_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_aqc_add_remove_cloud_filters {
+	u8	num_filters;
+	u8	reserved;
+	__le16	seid;
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT	0
+#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK	(0x3FF << \
+					I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
+
+struct i40e_aqc_add_remove_cloud_filters_element_data {
+	u8	outer_mac[6];
+	u8	inner_mac[6];
+	__le16	inner_vlan;
+	union {
+		struct {
+			u8 reserved[12];
+			u8 data[4];
+		} v4;
+		struct {
+			u8 data[16];
+		} v6;
+	} ipaddr;
+	__le16	flags;
+#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT			0
+#define I40E_AQC_ADD_CLOUD_FILTER_MASK	(0x3F << \
+					I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
+/* 0x0000 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OIP			0x0001
+/* 0x0002 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN		0x0003
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID	0x0004
+/* 0x0005 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID		0x0006
+/* 0x0007 reserved */
+/* 0x0008 reserved */
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC			0x0009
+#define I40E_AQC_ADD_CLOUD_FILTER_IMAC			0x000A
+#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC	0x000B
+#define I40E_AQC_ADD_CLOUD_FILTER_IIP			0x000C
+
+#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE		0x0080
+#define I40E_AQC_ADD_CLOUD_VNK_SHIFT			6
+#define I40E_AQC_ADD_CLOUD_VNK_MASK			0x00C0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4			0
+#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6			0x0100
+
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT		9
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK		0x1E00
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN		0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC		1
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE			2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP			3
+
+	__le32	tenant_id;
+	u8	reserved[4];
+	__le16	queue_number;
+#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT		0
+#define I40E_AQC_ADD_CLOUD_QUEUE_MASK		(0x7FF << \
+						 I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
+	u8	reserved2[14];
+	/* response section */
+	u8	allocation_result;
+#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS	0x0
+#define I40E_AQC_ADD_CLOUD_FILTER_FAIL		0xFF
+	u8	response_reserved[7];
+};
+
+struct i40e_aqc_remove_cloud_filters_completion {
+	__le16 perfect_ovlan_used;
+	__le16 perfect_ovlan_free;
+	__le16 vlan_used;
+	__le16 vlan_free;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+
+/* Add Mirror Rule (indirect or direct 0x0260)
+ * Delete Mirror Rule (indirect or direct 0x0261)
+ * note: some rule types (4,5) do not use an external buffer.
+ *       take care to set the flags correctly.
+ */
+struct i40e_aqc_add_delete_mirror_rule {
+	__le16 seid;
+	__le16 rule_type;
+#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT		0
+#define I40E_AQC_MIRROR_RULE_TYPE_MASK		(0x7 << \
+						I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS	1
+#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS	2
+#define I40E_AQC_MIRROR_RULE_TYPE_VLAN		3
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS	4
+#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS	5
+	__le16 num_entries;
+	__le16 destination;  /* VSI for add, rule id for delete */
+	__le32 addr_high;    /* address of array of 2-byte VSI or VLAN ids */
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule);
+
+struct i40e_aqc_add_delete_mirror_rule_completion {
+	u8	reserved[2];
+	__le16	rule_id;  /* only used on add */
+	__le16	mirror_rules_used;
+	__le16	mirror_rules_free;
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+
+/* DCB 0x03xx*/
+
+/* PFC Ignore (direct 0x0301)
+ *    the command and response use the same descriptor structure
+ */
+struct i40e_aqc_pfc_ignore {
+	u8	tc_bitmap;
+	u8	command_flags; /* unused on response */
+#define I40E_AQC_PFC_IGNORE_SET		0x80
+#define I40E_AQC_PFC_IGNORE_CLEAR	0x0
+	u8	reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore);
+
+/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure
+ * with no parameters
+ */
+
+/* TX scheduler 0x04xx */
+
+/* Almost all the indirect commands use
+ * this generic struct to pass the SEID in param0
+ */
+struct i40e_aqc_tx_sched_ind {
+	__le16	vsi_seid;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind);
+
+/* Several commands respond with a set of queue set handles */
+struct i40e_aqc_qs_handles_resp {
+	__le16 qs_handles[8];
+};
+
+/* Configure VSI BW limits (direct 0x0400) */
+struct i40e_aqc_configure_vsi_bw_limit {
+	__le16	vsi_seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_credit; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit);
+
+/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_ets_sla_bw_data {
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credits[8]; /* FW writesback QS handles here */
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data);
+
+/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407)
+ *    responds with i40e_aqc_qs_handles_resp
+ */
+struct i40e_aqc_configure_vsi_tc_bw_data {
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	tc_bw_credits[8];
+	u8	reserved1[4];
+	__le16	qs_handles[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data);
+
+/* Query vsi bw configuration (indirect 0x0408) */
+struct i40e_aqc_query_vsi_bw_config_resp {
+	u8	tc_valid_bits;
+	u8	tc_suspended_bits;
+	u8	reserved[14];
+	__le16	qs_handles[8];
+	u8	reserved1[4];
+	__le16	port_bw_limit;
+	u8	reserved2[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved3[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp);
+
+/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */
+struct i40e_aqc_query_vsi_ets_sla_config_resp {
+	u8	tc_valid_bits;
+	u8	reserved[3];
+	u8	share_credits[8];
+	__le16	credits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16	tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp);
+
+/* Configure Switching Component Bandwidth Limit (direct 0x0410) */
+struct i40e_aqc_configure_switching_comp_bw_limit {
+	__le16	seid;
+	u8	reserved[2];
+	__le16	credit;
+	u8	reserved1[2];
+	u8	max_bw; /* 0-3, limit = 2^max */
+	u8	reserved2[7];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
+
+/* Enable  Physical Port ETS (indirect 0x0413)
+ * Modify  Physical Port ETS (indirect 0x0414)
+ * Disable Physical Port ETS (indirect 0x0415)
+ */
+struct i40e_aqc_configure_switching_comp_ets_data {
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK	0x1
+	u8	tc_strict_priority_flags;
+	u8	reserved1[17];
+	u8	tc_bw_share_credits[8];
+	u8	reserved2[96];
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data);
+
+/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
+struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
+	u8	tc_valid_bits;
+	u8	reserved[15];
+	__le16	tc_bw_credit[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16	tc_bw_max[2];
+	u8	reserved1[28];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+
+/* Configure Switching Component Bandwidth Allocation per Tc
+ * (indirect 0x0417)
+ */
+struct i40e_aqc_configure_switching_comp_bw_config_data {
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits; /* bool */
+	u8	tc_bw_share_credits[8];
+	u8	reserved1[20];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data);
+
+/* Query Switching Component Configuration (indirect 0x0418) */
+struct i40e_aqc_query_switching_comp_ets_config_resp {
+	u8	tc_valid_bits;
+	u8	reserved[35];
+	__le16	port_bw_limit;
+	u8	reserved1[2];
+	u8	tc_bw_max; /* 0-3, limit = 2^max */
+	u8	reserved2[23];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp);
+
+/* Query PhysicalPort ETS Configuration (indirect 0x0419) */
+struct i40e_aqc_query_port_ets_config_resp {
+	u8	reserved[4];
+	u8	tc_valid_bits;
+	u8	reserved1;
+	u8	tc_strict_priority_bits;
+	u8	reserved2;
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */
+	__le16	tc_bw_max[2];
+	u8	reserved3[32];
+};
+
+I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp);
+
+/* Query Switching Component Bandwidth Allocation per Traffic Type
+ * (indirect 0x041A)
+ */
+struct i40e_aqc_query_switching_comp_bw_config_resp {
+	u8	tc_valid_bits;
+	u8	reserved[2];
+	u8	absolute_credits_enable; /* bool */
+	u8	tc_bw_share_credits[8];
+	__le16	tc_bw_limits[8];
+
+	/* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */
+	__le16	tc_bw_max[2];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp);
+
+/* Suspend/resume port TX traffic
+ * (direct 0x041B and 0x041C) uses the generic SEID struct
+ */
+
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+	__le16	pf_valid_bits;
+	u8	min_bw[16];      /* guaranteed bandwidth */
+	u8	max_bw[16];      /* bandwidth limit */
+};
+
+I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+	u8	pm_profile;
+	u8	pe_vf_enabled;
+	u8	reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+	/* I40E_HMC_PROFILE_NO_CHANGE    = 0, reserved */
+	I40E_HMC_PROFILE_DEFAULT	= 1,
+	I40E_HMC_PROFILE_FAVOR_VF	= 2,
+	I40E_HMC_PROFILE_EQUAL		= 3,
+};
+
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK	0xF
+#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK	0x3F
+
+/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
+
+/* set in param0 for get phy abilities to report qualified modules */
+#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES	0x0001
+#define I40E_AQ_PHY_REPORT_INITIAL_VALUES	0x0002
+
+enum i40e_aq_phy_type {
+	I40E_PHY_TYPE_SGMII			= 0x0,
+	I40E_PHY_TYPE_1000BASE_KX		= 0x1,
+	I40E_PHY_TYPE_10GBASE_KX4		= 0x2,
+	I40E_PHY_TYPE_10GBASE_KR		= 0x3,
+	I40E_PHY_TYPE_40GBASE_KR4		= 0x4,
+	I40E_PHY_TYPE_XAUI			= 0x5,
+	I40E_PHY_TYPE_XFI			= 0x6,
+	I40E_PHY_TYPE_SFI			= 0x7,
+	I40E_PHY_TYPE_XLAUI			= 0x8,
+	I40E_PHY_TYPE_XLPPI			= 0x9,
+	I40E_PHY_TYPE_40GBASE_CR4_CU		= 0xA,
+	I40E_PHY_TYPE_10GBASE_CR1_CU		= 0xB,
+	I40E_PHY_TYPE_10GBASE_AOC		= 0xC,
+	I40E_PHY_TYPE_40GBASE_AOC		= 0xD,
+	I40E_PHY_TYPE_100BASE_TX		= 0x11,
+	I40E_PHY_TYPE_1000BASE_T		= 0x12,
+	I40E_PHY_TYPE_10GBASE_T			= 0x13,
+	I40E_PHY_TYPE_10GBASE_SR		= 0x14,
+	I40E_PHY_TYPE_10GBASE_LR		= 0x15,
+	I40E_PHY_TYPE_10GBASE_SFPP_CU		= 0x16,
+	I40E_PHY_TYPE_10GBASE_CR1		= 0x17,
+	I40E_PHY_TYPE_40GBASE_CR4		= 0x18,
+	I40E_PHY_TYPE_40GBASE_SR4		= 0x19,
+	I40E_PHY_TYPE_40GBASE_LR4		= 0x1A,
+	I40E_PHY_TYPE_1000BASE_SX		= 0x1B,
+	I40E_PHY_TYPE_1000BASE_LX		= 0x1C,
+	I40E_PHY_TYPE_1000BASE_T_OPTICAL	= 0x1D,
+	I40E_PHY_TYPE_20GBASE_KR2		= 0x1E,
+	I40E_PHY_TYPE_MAX
+};
+
+#define I40E_LINK_SPEED_100MB_SHIFT	0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT	0x2
+#define I40E_LINK_SPEED_10GB_SHIFT	0x3
+#define I40E_LINK_SPEED_40GB_SHIFT	0x4
+#define I40E_LINK_SPEED_20GB_SHIFT	0x5
+
+enum i40e_aq_link_speed {
+	I40E_LINK_SPEED_UNKNOWN	= 0,
+	I40E_LINK_SPEED_100MB	= (1 << I40E_LINK_SPEED_100MB_SHIFT),
+	I40E_LINK_SPEED_1GB	= (1 << I40E_LINK_SPEED_1000MB_SHIFT),
+	I40E_LINK_SPEED_10GB	= (1 << I40E_LINK_SPEED_10GB_SHIFT),
+	I40E_LINK_SPEED_40GB	= (1 << I40E_LINK_SPEED_40GB_SHIFT),
+	I40E_LINK_SPEED_20GB	= (1 << I40E_LINK_SPEED_20GB_SHIFT)
+};
+
+struct i40e_aqc_module_desc {
+	u8 oui[3];
+	u8 reserved1;
+	u8 part_number[16];
+	u8 revision[4];
+	u8 reserved2[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc);
+
+struct i40e_aq_get_phy_abilities_resp {
+	__le32	phy_type;       /* bitmap using the above enum for offsets */
+	u8	link_speed;     /* bitmap using the above enum bit patterns */
+	u8	abilities;
+#define I40E_AQ_PHY_FLAG_PAUSE_TX	0x01
+#define I40E_AQ_PHY_FLAG_PAUSE_RX	0x02
+#define I40E_AQ_PHY_FLAG_LOW_POWER	0x04
+#define I40E_AQ_PHY_LINK_ENABLED	0x08
+#define I40E_AQ_PHY_AN_ENABLED		0x10
+#define I40E_AQ_PHY_FLAG_MODULE_QUAL	0x20
+	__le16	eee_capability;
+#define I40E_AQ_EEE_100BASE_TX		0x0002
+#define I40E_AQ_EEE_1000BASE_T		0x0004
+#define I40E_AQ_EEE_10GBASE_T		0x0008
+#define I40E_AQ_EEE_1000BASE_KX		0x0010
+#define I40E_AQ_EEE_10GBASE_KX4		0x0020
+#define I40E_AQ_EEE_10GBASE_KR		0x0040
+	__le32	eeer_val;
+	u8	d3_lpan;
+#define I40E_AQ_SET_PHY_D3_LPAN_ENA	0x01
+	u8	reserved[3];
+	u8	phy_id[4];
+	u8	module_type[3];
+	u8	qualified_module_count;
+#define I40E_AQ_PHY_MAX_QMS		16
+	struct i40e_aqc_module_desc	qualified_module[I40E_AQ_PHY_MAX_QMS];
+};
+
+I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp);
+
+/* Set PHY Config (direct 0x0601) */
+struct i40e_aq_set_phy_config { /* same bits as above in all */
+	__le32	phy_type;
+	u8	link_speed;
+	u8	abilities;
+/* bits 0-2 use the values from get_phy_abilities_resp */
+#define I40E_AQ_PHY_ENABLE_LINK		0x08
+#define I40E_AQ_PHY_ENABLE_AN		0x10
+#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK	0x20
+	__le16	eee_capability;
+	__le32	eeer;
+	u8	low_power_ctrl;
+	u8	reserved[3];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
+
+/* Set MAC Config command data structure (direct 0x0603) */
+struct i40e_aq_set_mac_config {
+	__le16	max_frame_size;
+	u8	params;
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN		0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK	0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT	3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE	0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX	0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX	0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX	0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX	0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX	0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX	0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX	0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX	0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX	0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX	0x1
+	u8	tx_timer_priority; /* bitmap */
+	__le16	tx_timer_value;
+	__le16	fc_refresh_threshold;
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config);
+
+/* Restart Auto-Negotiation (direct 0x605) */
+struct i40e_aqc_set_link_restart_an {
+	u8	command;
+#define I40E_AQ_PHY_RESTART_AN	0x02
+#define I40E_AQ_PHY_LINK_ENABLE	0x04
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
+
+/* Get Link Status cmd & response data structure (direct 0x0607) */
+struct i40e_aqc_get_link_status {
+	__le16	command_flags; /* only field set on command */
+#define I40E_AQ_LSE_MASK		0x3
+#define I40E_AQ_LSE_NOP			0x0
+#define I40E_AQ_LSE_DISABLE		0x2
+#define I40E_AQ_LSE_ENABLE		0x3
+/* only response uses this flag */
+#define I40E_AQ_LSE_IS_ENABLED		0x1
+	u8	phy_type;    /* i40e_aq_phy_type   */
+	u8	link_speed;  /* i40e_aq_link_speed */
+	u8	link_info;
+#define I40E_AQ_LINK_UP			0x01    /* obsolete */
+#define I40E_AQ_LINK_UP_FUNCTION	0x01
+#define I40E_AQ_LINK_FAULT		0x02
+#define I40E_AQ_LINK_FAULT_TX		0x04
+#define I40E_AQ_LINK_FAULT_RX		0x08
+#define I40E_AQ_LINK_FAULT_REMOTE	0x10
+#define I40E_AQ_LINK_UP_PORT		0x20
+#define I40E_AQ_MEDIA_AVAILABLE		0x40
+#define I40E_AQ_SIGNAL_DETECT		0x80
+	u8	an_info;
+#define I40E_AQ_AN_COMPLETED		0x01
+#define I40E_AQ_LP_AN_ABILITY		0x02
+#define I40E_AQ_PD_FAULT		0x04
+#define I40E_AQ_FEC_EN			0x08
+#define I40E_AQ_PHY_LOW_POWER		0x10
+#define I40E_AQ_LINK_PAUSE_TX		0x20
+#define I40E_AQ_LINK_PAUSE_RX		0x40
+#define I40E_AQ_QUALIFIED_MODULE	0x80
+	u8	ext_info;
+#define I40E_AQ_LINK_PHY_TEMP_ALARM	0x01
+#define I40E_AQ_LINK_XCESSIVE_ERRORS	0x02
+#define I40E_AQ_LINK_TX_SHIFT		0x02
+#define I40E_AQ_LINK_TX_MASK		(0x03 << I40E_AQ_LINK_TX_SHIFT)
+#define I40E_AQ_LINK_TX_ACTIVE		0x00
+#define I40E_AQ_LINK_TX_DRAINED		0x01
+#define I40E_AQ_LINK_TX_FLUSHED		0x03
+#define I40E_AQ_LINK_FORCED_40G		0x10
+	u8	loopback; /* use defines from i40e_aqc_set_lb_mode */
+	__le16	max_frame_size;
+	u8	config;
+#define I40E_AQ_CONFIG_CRC_ENA		0x04
+#define I40E_AQ_CONFIG_PACING_MASK	0x78
+	u8	reserved[5];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
+
+/* Set event mask command (direct 0x613) */
+struct i40e_aqc_set_phy_int_mask {
+	u8	reserved[8];
+	__le16	event_mask;
+#define I40E_AQ_EVENT_LINK_UPDOWN	0x0002
+#define I40E_AQ_EVENT_MEDIA_NA		0x0004
+#define I40E_AQ_EVENT_LINK_FAULT	0x0008
+#define I40E_AQ_EVENT_PHY_TEMP_ALARM	0x0010
+#define I40E_AQ_EVENT_EXCESSIVE_ERRORS	0x0020
+#define I40E_AQ_EVENT_SIGNAL_DETECT	0x0040
+#define I40E_AQ_EVENT_AN_COMPLETED	0x0080
+#define I40E_AQ_EVENT_MODULE_QUAL_FAIL	0x0100
+#define I40E_AQ_EVENT_PORT_TX_SUSPENDED	0x0200
+	u8	reserved1[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask);
+
+/* Get Local AN advt register (direct 0x0614)
+ * Set Local AN advt register (direct 0x0615)
+ * Get Link Partner AN advt register (direct 0x0616)
+ */
+struct i40e_aqc_an_advt_reg {
+	__le32	local_an_reg0;
+	__le16	local_an_reg1;
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg);
+
+/* Set Loopback mode (0x0618) */
+struct i40e_aqc_set_lb_mode {
+	__le16	lb_mode;
+#define I40E_AQ_LB_PHY_LOCAL	0x01
+#define I40E_AQ_LB_PHY_REMOTE	0x02
+#define I40E_AQ_LB_MAC_LOCAL	0x04
+	u8	reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
+
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+	u8	command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL	0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT	2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK	(0x03 << \
+					I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE	0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD	0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT	0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW	0x10
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
+
+enum i40e_aq_phy_reg_type {
+	I40E_AQC_PHY_REG_INTERNAL	= 0x1,
+	I40E_AQC_PHY_REG_EXERNAL_BASET	= 0x2,
+	I40E_AQC_PHY_REG_EXERNAL_MODULE	= 0x3
+};
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Update commands (indirect 0x0703)
+ */
+struct i40e_aqc_nvm_update {
+	u8	command_flags;
+#define I40E_AQ_NVM_LAST_CMD	0x01
+#define I40E_AQ_NVM_FLASH_ONLY	0x80
+	u8	module_pointer;
+	__le16	length;
+	__le32	offset;
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+	__le16	cmd_flags;
+#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK	1
+#define I40E_AQ_ANVM_READ_SINGLE_FEATURE		0
+#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES		1
+	__le16	element_count;
+	__le16	element_id;     /* Feature/field ID */
+	__le16	element_id_msw;	/* MSWord of field ID */
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+	__le16	cmd_flags;
+	__le16	element_count;
+	u8	reserved[4];
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT		1
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK		(1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE				0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD			(1 << FEATURE_OR_IMMEDIATE_SHIFT)
+struct i40e_aqc_nvm_config_data_feature {
+	__le16 feature_id;
+#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY		0x01
+#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP		0x08
+#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR		0x10
+	__le16 feature_options;
+	__le16 feature_selection;
+};
+
+I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature);
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+	__le32 field_id;
+	__le32 field_value;
+	__le16 field_options;
+	__le16 reserved;
+};
+
+I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
+
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA	0x01
+	u8 sel_data;
+	u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+	u8 str_len;
+	u8 dev_addr;
+	__le16 eeprom_addr;
+	u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+	__le32	id;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+/* Alternate structure */
+
+/* Direct write (direct 0x0900)
+ * Direct read (direct 0x0902)
+ */
+struct i40e_aqc_alternate_write {
+	__le32 address0;
+	__le32 data0;
+	__le32 address1;
+	__le32 data1;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write);
+
+/* Indirect write (indirect 0x0901)
+ * Indirect read (indirect 0x0903)
+ */
+
+struct i40e_aqc_alternate_ind_write {
+	__le32 address;
+	__le32 length;
+	__le32 addr_high;
+	__le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
+
+/* Done alternate write (direct 0x0904)
+ * uses i40e_aq_desc
+ */
+struct i40e_aqc_alternate_write_done {
+	__le16	cmd_flags;
+#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK	1
+#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY	0
+#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI	1
+#define I40E_AQ_ALTERNATE_RESET_NEEDED		2
+	u8	reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
+
+/* Set OEM mode (direct 0x0905) */
+struct i40e_aqc_alternate_set_mode {
+	__le32	mode;
+#define I40E_AQ_ALTERNATE_MODE_NONE	0
+#define I40E_AQ_ALTERNATE_MODE_OEM	1
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode);
+
+/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */
+
+/* async events 0x10xx */
+
+/* Lan Queue Overflow Event (direct, 0x1001) */
+struct i40e_aqc_lan_overflow {
+	__le32	prtdcb_rupto;
+	__le32	otx_ctl;
+	u8	reserved[8];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow);
+
+/* Get LLDP MIB (indirect 0x0A00) */
+struct i40e_aqc_lldp_get_mib {
+	u8	type;
+	u8	reserved1;
+#define I40E_AQ_LLDP_MIB_TYPE_MASK		0x3
+#define I40E_AQ_LLDP_MIB_LOCAL			0x0
+#define I40E_AQ_LLDP_MIB_REMOTE			0x1
+#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE	0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK		0xC
+#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT		0x2
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE	0x0
+#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR	0x1
+#define I40E_AQ_LLDP_TX_SHIFT			0x4
+#define I40E_AQ_LLDP_TX_MASK			(0x03 << I40E_AQ_LLDP_TX_SHIFT)
+/* TX pause flags use I40E_AQ_LINK_TX_* above */
+	__le16	local_len;
+	__le16	remote_len;
+	u8	reserved2[2];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
+
+/* Configure LLDP MIB Change Event (direct 0x0A01)
+ * also used for the event (with type in the command field)
+ */
+struct i40e_aqc_lldp_update_mib {
+	u8	command;
+#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE	0x0
+#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE	0x1
+	u8	reserved[7];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib);
+
+/* Add LLDP TLV (indirect 0x0A02)
+ * Delete LLDP TLV (indirect 0x0A04)
+ */
+struct i40e_aqc_lldp_add_tlv {
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved1[1];
+	__le16	len;
+	u8	reserved2[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv);
+
+/* Update LLDP TLV (indirect 0x0A03) */
+struct i40e_aqc_lldp_update_tlv {
+	u8	type; /* only nearest bridge and non-TPMR from 0x0A00 */
+	u8	reserved;
+	__le16	old_len;
+	__le16	new_offset;
+	__le16	new_len;
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
+
+/* Stop LLDP (direct 0x0A05) */
+struct i40e_aqc_lldp_stop {
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_STOP		0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN	0x1
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
+
+/* Start LLDP (direct 0x0A06) */
+
+struct i40e_aqc_lldp_start {
+	u8	command;
+#define I40E_AQ_LLDP_AGENT_START	0x1
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * uses the generic descriptor struct
+ * returns below as indirect response
+ */
+
+#define I40E_AQC_CEE_APP_FCOE_SHIFT	0x0
+#define I40E_AQC_CEE_APP_FCOE_MASK	(0x7 << I40E_AQC_CEE_APP_FCOE_SHIFT)
+#define I40E_AQC_CEE_APP_ISCSI_SHIFT	0x3
+#define I40E_AQC_CEE_APP_ISCSI_MASK	(0x7 << I40E_AQC_CEE_APP_ISCSI_SHIFT)
+#define I40E_AQC_CEE_APP_FIP_SHIFT	0x8
+#define I40E_AQC_CEE_APP_FIP_MASK	(0x7 << I40E_AQC_CEE_APP_FIP_SHIFT)
+
+#define I40E_AQC_CEE_PG_STATUS_SHIFT	0x0
+#define I40E_AQC_CEE_PG_STATUS_MASK	(0x7 << I40E_AQC_CEE_PG_STATUS_SHIFT)
+#define I40E_AQC_CEE_PFC_STATUS_SHIFT	0x3
+#define I40E_AQC_CEE_PFC_STATUS_MASK	(0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
+#define I40E_AQC_CEE_APP_STATUS_SHIFT	0x8
+#define I40E_AQC_CEE_APP_STATUS_MASK	(0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT	0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK	(0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT	0xB
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK	(0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT	0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK	(0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
+
+/* struct i40e_aqc_get_cee_dcb_cfg_v1_resp was originally defined with
+ * word boundary layout issues, which the Linux compilers silently deal
+ * with by adding padding, making the actual struct larger than designed.
+ * However, the FW compiler for the NIC is less lenient and complains
+ * about the struct.  Hence, the struct defined here has an extra byte in
+ * fields reserved3 and reserved4 to directly acknowledge that padding,
+ * and the new length is used in the length check macro.
+ */
+struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
+	u8	reserved1;
+	u8	oper_num_tc;
+	u8	oper_prio_tc[4];
+	u8	reserved2;
+	u8	oper_tc_bw[8];
+	u8	oper_pfc_en;
+	u8	reserved3[2];
+	__le16	oper_app_prio;
+	u8	reserved4[2];
+	__le16	tlv_status;
+};
+
+I40E_CHECK_STRUCT_LEN(0x18, i40e_aqc_get_cee_dcb_cfg_v1_resp);
+
+struct i40e_aqc_get_cee_dcb_cfg_resp {
+	u8	oper_num_tc;
+	u8	oper_prio_tc[4];
+	u8	oper_tc_bw[8];
+	u8	oper_pfc_en;
+	__le16	oper_app_prio;
+	__le32	tlv_status;
+	u8	reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
+
+/*	Set Local LLDP MIB (indirect 0x0A08)
+ *	Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT	0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK		(1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+	u8	type;
+	u8	reserved0;
+	__le16	length;
+	u8	reserved1[4];
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
+
+/*	Stop/Start LLDP Agent (direct 0x0A09)
+ *	Used for stopping/starting specific LLDP agent. e.g. DCBx
+ */
+struct i40e_aqc_lldp_stop_start_specific_agent {
+#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT	0
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK	(1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+	u8	command;
+	u8	reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+
+/* Add Udp Tunnel command and completion (direct 0x0B00) */
+struct i40e_aqc_add_udp_tunnel {
+	__le16	udp_port;
+	u8	reserved0[3];
+	u8	protocol_type;
+#define I40E_AQC_TUNNEL_TYPE_VXLAN	0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE	0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO	0x10
+	u8	reserved1[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
+
+struct i40e_aqc_add_udp_tunnel_completion {
+	__le16 udp_port;
+	u8	filter_entry_index;
+	u8	multiple_pfs;
+#define I40E_AQC_SINGLE_PF		0x0
+#define I40E_AQC_MULTIPLE_PFS		0x1
+	u8	total_filters;
+	u8	reserved[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion);
+
+/* remove UDP Tunnel command (0x0B01) */
+struct i40e_aqc_remove_udp_tunnel {
+	u8	reserved[2];
+	u8	index; /* 0 to 15 */
+	u8	reserved2[13];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel);
+
+struct i40e_aqc_del_udp_tunnel_completion {
+	__le16	udp_port;
+	u8	index; /* 0 to 15 */
+	u8	multiple_pfs;
+	u8	total_filters_used;
+	u8	reserved1[11];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+#ifdef X722_SUPPORT
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+	__le16	vsi_id;
+	u8	reserved[6];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+	u8 standard_rss_key[0x28];
+	u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct  i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID		(0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK	(0x3FF << \
+					I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+	__le16	vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK	(0x1 << \
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI	0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF	1
+	__le16	flags;
+	u8	reserved[4];
+	__le32	addr_high;
+	__le32	addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif
+
+/* tunnel key structure 0x0B10 */
+
+struct i40e_aqc_tunnel_key_structure {
+	u8	key1_off;
+	u8	key2_off;
+	u8	key1_len;  /* 0 to 15 */
+	u8	key2_len;  /* 0 to 15 */
+	u8	flags;
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE	0x01
+/* response flags */
+#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS	0x01
+#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED	0x02
+#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN	0x03
+	u8	network_key_index;
+#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN		0x0
+#define I40E_AQC_NETWORK_KEY_INDEX_NGE			0x1
+#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP	0x2
+#define I40E_AQC_NETWORK_KEY_INDEX_GRE			0x3
+	u8	reserved[10];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
+
+/* OEM mode commands (direct 0xFE0x) */
+struct i40e_aqc_oem_param_change {
+	__le32	param_type;
+#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL	0
+#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL	1
+#define I40E_AQ_OEM_PARAM_MAC		2
+	__le32	param_value1;
+	__le16	param_value2;
+	u8	reserved[6];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
+
+struct i40e_aqc_oem_state_change {
+	__le32	state;
+#define I40E_AQ_OEM_STATE_LINK_DOWN	0x0
+#define I40E_AQ_OEM_STATE_LINK_UP	0x1
+	u8	reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change);
+
+/* Initialize OCSD (0xFE02, direct) */
+struct i40e_aqc_opc_oem_ocsd_initialize {
+	u8 type_status;
+	u8 reserved1[3];
+	__le32 ocsd_memory_block_addr_high;
+	__le32 ocsd_memory_block_addr_low;
+	__le32 requested_update_interval;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize);
+
+/* Initialize OCBB  (0xFE03, direct) */
+struct i40e_aqc_opc_oem_ocbb_initialize {
+	u8 type_status;
+	u8 reserved1[3];
+	__le32 ocbb_memory_block_addr_high;
+	__le32 ocbb_memory_block_addr_low;
+	u8 reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
+
+/* debug commands */
+
+/* get device id (0xFF00) uses the generic structure */
+
+/* set test more (0xFF01, internal) */
+
+struct i40e_acq_set_test_mode {
+	u8	mode;
+#define I40E_AQ_TEST_PARTIAL	0
+#define I40E_AQ_TEST_FULL	1
+#define I40E_AQ_TEST_NVM	2
+	u8	reserved[3];
+	u8	command;
+#define I40E_AQ_TEST_OPEN	0
+#define I40E_AQ_TEST_CLOSE	1
+#define I40E_AQ_TEST_INC	2
+	u8	reserved2[3];
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode);
+
+/* Debug Read Register command (0xFF03)
+ * Debug Write Register command (0xFF04)
+ */
+struct i40e_aqc_debug_reg_read_write {
+	__le32 reserved;
+	__le32 address;
+	__le32 value_high;
+	__le32 value_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write);
+
+/* Scatter/gather Reg Read  (indirect 0xFF05)
+ * Scatter/gather Reg Write (indirect 0xFF06)
+ */
+
+/* i40e_aq_desc is used for the command */
+struct i40e_aqc_debug_reg_sg_element_data {
+	__le32 address;
+	__le32 value;
+};
+
+/* Debug Modify register (direct 0xFF07) */
+struct i40e_aqc_debug_modify_reg {
+	__le32 address;
+	__le32 value;
+	__le32 clear_mask;
+	__le32 set_mask;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
+
+/* dump internal data (0xFF08, indirect) */
+
+#define I40E_AQ_CLUSTER_ID_AUX		0
+#define I40E_AQ_CLUSTER_ID_SWITCH_FLU	1
+#define I40E_AQ_CLUSTER_ID_TXSCHED	2
+#define I40E_AQ_CLUSTER_ID_HMC		3
+#define I40E_AQ_CLUSTER_ID_MAC0		4
+#define I40E_AQ_CLUSTER_ID_MAC1		5
+#define I40E_AQ_CLUSTER_ID_MAC2		6
+#define I40E_AQ_CLUSTER_ID_MAC3		7
+#define I40E_AQ_CLUSTER_ID_DCB		8
+#define I40E_AQ_CLUSTER_ID_EMP_MEM	9
+#define I40E_AQ_CLUSTER_ID_PKT_BUF	10
+#define I40E_AQ_CLUSTER_ID_ALTRAM	11
+
+struct i40e_aqc_debug_dump_internals {
+	u8	cluster_id;
+	u8	table_id;
+	__le16	data_size;
+	__le32	idx;
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals);
+
+struct i40e_aqc_debug_modify_internals {
+	u8	cluster_id;
+	u8	cluster_specific_params[7];
+	__le32	address_high;
+	__le32	address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals);
+
+#endif


Property changes on: trunk/sys/dev/ixl/i40e_adminq_cmd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_alloc.h
===================================================================
--- trunk/sys/dev/ixl/i40e_alloc.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_alloc.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,67 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_alloc.h 292099 2015-12-11 13:05:18Z smh $*/
+
+#ifndef _I40E_ALLOC_H_
+#define _I40E_ALLOC_H_
+
+struct i40e_hw;
+
+/* Memory allocation types */
+enum i40e_memory_type {
+	i40e_mem_arq_buf = 0,		/* ARQ indirect command buffer */
+	i40e_mem_asq_buf = 1,
+	i40e_mem_atq_buf = 2,		/* ATQ indirect command buffer */
+	i40e_mem_arq_ring = 3,		/* ARQ descriptor ring */
+	i40e_mem_atq_ring = 4,		/* ATQ descriptor ring */
+	i40e_mem_pd = 5,		/* Page Descriptor */
+	i40e_mem_bp = 6,		/* Backing Page - 4KB */
+	i40e_mem_bp_jumbo = 7,		/* Backing Page - > 4KB */
+	i40e_mem_reserved
+};
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40e_status_code i40e_allocate_dma_mem(struct i40e_hw *hw,
+					    struct i40e_dma_mem *mem,
+					    enum i40e_memory_type type,
+					    u64 size, u32 alignment);
+enum i40e_status_code i40e_free_dma_mem(struct i40e_hw *hw,
+					struct i40e_dma_mem *mem);
+enum i40e_status_code i40e_allocate_virt_mem(struct i40e_hw *hw,
+					     struct i40e_virt_mem *mem,
+					     u32 size);
+enum i40e_status_code i40e_free_virt_mem(struct i40e_hw *hw,
+					 struct i40e_virt_mem *mem);
+
+#endif /* _I40E_ALLOC_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_alloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_common.c
===================================================================
--- trunk/sys/dev/ixl/i40e_common.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_common.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,5688 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_common.c 292100 2015-12-11 13:08:38Z smh $*/
+
+#include "i40e_type.h"
+#include "i40e_adminq.h"
+#include "i40e_prototype.h"
+#include "i40e_virtchnl.h"
+
+
+/**
+ * i40e_set_mac_type - Sets MAC type
+ * @hw: pointer to the HW structure
+ *
+ * This function sets the mac type of the adapter based on the
+ * vendor ID and device ID stored in the hw structure.
+ **/
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	DEBUGFUNC("i40e_set_mac_type\n");
+
+	if (hw->vendor_id == I40E_INTEL_VENDOR_ID) {
+		switch (hw->device_id) {
+		case I40E_DEV_ID_SFP_XL710:
+		case I40E_DEV_ID_QEMU:
+		case I40E_DEV_ID_KX_A:
+		case I40E_DEV_ID_KX_B:
+		case I40E_DEV_ID_KX_C:
+		case I40E_DEV_ID_QSFP_A:
+		case I40E_DEV_ID_QSFP_B:
+		case I40E_DEV_ID_QSFP_C:
+		case I40E_DEV_ID_10G_BASE_T:
+		case I40E_DEV_ID_10G_BASE_T4:
+		case I40E_DEV_ID_20G_KR2:
+		case I40E_DEV_ID_20G_KR2_A:
+			hw->mac.type = I40E_MAC_XL710;
+			break;
+#ifdef X722_SUPPORT
+		case I40E_DEV_ID_SFP_X722:
+		case I40E_DEV_ID_1G_BASE_T_X722:
+		case I40E_DEV_ID_10G_BASE_T_X722:
+			hw->mac.type = I40E_MAC_X722;
+			break;
+#endif
+#ifdef X722_SUPPORT
+		case I40E_DEV_ID_X722_VF:
+		case I40E_DEV_ID_X722_VF_HV:
+			hw->mac.type = I40E_MAC_X722_VF;
+			break;
+#endif
+		case I40E_DEV_ID_VF:
+		case I40E_DEV_ID_VF_HV:
+			hw->mac.type = I40E_MAC_VF;
+			break;
+		default:
+			hw->mac.type = I40E_MAC_GENERIC;
+			break;
+		}
+	} else {
+		status = I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	DEBUGOUT2("i40e_set_mac_type found mac: %d, returns: %d\n",
+		  hw->mac.type, status);
+	return status;
+}
+
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+	switch (aq_err) {
+	case I40E_AQ_RC_OK:
+		return "OK";
+	case I40E_AQ_RC_EPERM:
+		return "I40E_AQ_RC_EPERM";
+	case I40E_AQ_RC_ENOENT:
+		return "I40E_AQ_RC_ENOENT";
+	case I40E_AQ_RC_ESRCH:
+		return "I40E_AQ_RC_ESRCH";
+	case I40E_AQ_RC_EINTR:
+		return "I40E_AQ_RC_EINTR";
+	case I40E_AQ_RC_EIO:
+		return "I40E_AQ_RC_EIO";
+	case I40E_AQ_RC_ENXIO:
+		return "I40E_AQ_RC_ENXIO";
+	case I40E_AQ_RC_E2BIG:
+		return "I40E_AQ_RC_E2BIG";
+	case I40E_AQ_RC_EAGAIN:
+		return "I40E_AQ_RC_EAGAIN";
+	case I40E_AQ_RC_ENOMEM:
+		return "I40E_AQ_RC_ENOMEM";
+	case I40E_AQ_RC_EACCES:
+		return "I40E_AQ_RC_EACCES";
+	case I40E_AQ_RC_EFAULT:
+		return "I40E_AQ_RC_EFAULT";
+	case I40E_AQ_RC_EBUSY:
+		return "I40E_AQ_RC_EBUSY";
+	case I40E_AQ_RC_EEXIST:
+		return "I40E_AQ_RC_EEXIST";
+	case I40E_AQ_RC_EINVAL:
+		return "I40E_AQ_RC_EINVAL";
+	case I40E_AQ_RC_ENOTTY:
+		return "I40E_AQ_RC_ENOTTY";
+	case I40E_AQ_RC_ENOSPC:
+		return "I40E_AQ_RC_ENOSPC";
+	case I40E_AQ_RC_ENOSYS:
+		return "I40E_AQ_RC_ENOSYS";
+	case I40E_AQ_RC_ERANGE:
+		return "I40E_AQ_RC_ERANGE";
+	case I40E_AQ_RC_EFLUSHED:
+		return "I40E_AQ_RC_EFLUSHED";
+	case I40E_AQ_RC_BAD_ADDR:
+		return "I40E_AQ_RC_BAD_ADDR";
+	case I40E_AQ_RC_EMODE:
+		return "I40E_AQ_RC_EMODE";
+	case I40E_AQ_RC_EFBIG:
+		return "I40E_AQ_RC_EFBIG";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+	return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
+{
+	switch (stat_err) {
+	case I40E_SUCCESS:
+		return "OK";
+	case I40E_ERR_NVM:
+		return "I40E_ERR_NVM";
+	case I40E_ERR_NVM_CHECKSUM:
+		return "I40E_ERR_NVM_CHECKSUM";
+	case I40E_ERR_PHY:
+		return "I40E_ERR_PHY";
+	case I40E_ERR_CONFIG:
+		return "I40E_ERR_CONFIG";
+	case I40E_ERR_PARAM:
+		return "I40E_ERR_PARAM";
+	case I40E_ERR_MAC_TYPE:
+		return "I40E_ERR_MAC_TYPE";
+	case I40E_ERR_UNKNOWN_PHY:
+		return "I40E_ERR_UNKNOWN_PHY";
+	case I40E_ERR_LINK_SETUP:
+		return "I40E_ERR_LINK_SETUP";
+	case I40E_ERR_ADAPTER_STOPPED:
+		return "I40E_ERR_ADAPTER_STOPPED";
+	case I40E_ERR_INVALID_MAC_ADDR:
+		return "I40E_ERR_INVALID_MAC_ADDR";
+	case I40E_ERR_DEVICE_NOT_SUPPORTED:
+		return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+	case I40E_ERR_MASTER_REQUESTS_PENDING:
+		return "I40E_ERR_MASTER_REQUESTS_PENDING";
+	case I40E_ERR_INVALID_LINK_SETTINGS:
+		return "I40E_ERR_INVALID_LINK_SETTINGS";
+	case I40E_ERR_AUTONEG_NOT_COMPLETE:
+		return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+	case I40E_ERR_RESET_FAILED:
+		return "I40E_ERR_RESET_FAILED";
+	case I40E_ERR_SWFW_SYNC:
+		return "I40E_ERR_SWFW_SYNC";
+	case I40E_ERR_NO_AVAILABLE_VSI:
+		return "I40E_ERR_NO_AVAILABLE_VSI";
+	case I40E_ERR_NO_MEMORY:
+		return "I40E_ERR_NO_MEMORY";
+	case I40E_ERR_BAD_PTR:
+		return "I40E_ERR_BAD_PTR";
+	case I40E_ERR_RING_FULL:
+		return "I40E_ERR_RING_FULL";
+	case I40E_ERR_INVALID_PD_ID:
+		return "I40E_ERR_INVALID_PD_ID";
+	case I40E_ERR_INVALID_QP_ID:
+		return "I40E_ERR_INVALID_QP_ID";
+	case I40E_ERR_INVALID_CQ_ID:
+		return "I40E_ERR_INVALID_CQ_ID";
+	case I40E_ERR_INVALID_CEQ_ID:
+		return "I40E_ERR_INVALID_CEQ_ID";
+	case I40E_ERR_INVALID_AEQ_ID:
+		return "I40E_ERR_INVALID_AEQ_ID";
+	case I40E_ERR_INVALID_SIZE:
+		return "I40E_ERR_INVALID_SIZE";
+	case I40E_ERR_INVALID_ARP_INDEX:
+		return "I40E_ERR_INVALID_ARP_INDEX";
+	case I40E_ERR_INVALID_FPM_FUNC_ID:
+		return "I40E_ERR_INVALID_FPM_FUNC_ID";
+	case I40E_ERR_QP_INVALID_MSG_SIZE:
+		return "I40E_ERR_QP_INVALID_MSG_SIZE";
+	case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+		return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+	case I40E_ERR_INVALID_FRAG_COUNT:
+		return "I40E_ERR_INVALID_FRAG_COUNT";
+	case I40E_ERR_QUEUE_EMPTY:
+		return "I40E_ERR_QUEUE_EMPTY";
+	case I40E_ERR_INVALID_ALIGNMENT:
+		return "I40E_ERR_INVALID_ALIGNMENT";
+	case I40E_ERR_FLUSHED_QUEUE:
+		return "I40E_ERR_FLUSHED_QUEUE";
+	case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+		return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+	case I40E_ERR_INVALID_IMM_DATA_SIZE:
+		return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+	case I40E_ERR_TIMEOUT:
+		return "I40E_ERR_TIMEOUT";
+	case I40E_ERR_OPCODE_MISMATCH:
+		return "I40E_ERR_OPCODE_MISMATCH";
+	case I40E_ERR_CQP_COMPL_ERROR:
+		return "I40E_ERR_CQP_COMPL_ERROR";
+	case I40E_ERR_INVALID_VF_ID:
+		return "I40E_ERR_INVALID_VF_ID";
+	case I40E_ERR_INVALID_HMCFN_ID:
+		return "I40E_ERR_INVALID_HMCFN_ID";
+	case I40E_ERR_BACKING_PAGE_ERROR:
+		return "I40E_ERR_BACKING_PAGE_ERROR";
+	case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+		return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+	case I40E_ERR_INVALID_PBLE_INDEX:
+		return "I40E_ERR_INVALID_PBLE_INDEX";
+	case I40E_ERR_INVALID_SD_INDEX:
+		return "I40E_ERR_INVALID_SD_INDEX";
+	case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+		return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+	case I40E_ERR_INVALID_SD_TYPE:
+		return "I40E_ERR_INVALID_SD_TYPE";
+	case I40E_ERR_MEMCPY_FAILED:
+		return "I40E_ERR_MEMCPY_FAILED";
+	case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+		return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+	case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+		return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+	case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+		return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+	case I40E_ERR_SRQ_ENABLED:
+		return "I40E_ERR_SRQ_ENABLED";
+	case I40E_ERR_ADMIN_QUEUE_ERROR:
+		return "I40E_ERR_ADMIN_QUEUE_ERROR";
+	case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+		return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+	case I40E_ERR_BUF_TOO_SHORT:
+		return "I40E_ERR_BUF_TOO_SHORT";
+	case I40E_ERR_ADMIN_QUEUE_FULL:
+		return "I40E_ERR_ADMIN_QUEUE_FULL";
+	case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+		return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+	case I40E_ERR_BAD_IWARP_CQE:
+		return "I40E_ERR_BAD_IWARP_CQE";
+	case I40E_ERR_NVM_BLANK_MODE:
+		return "I40E_ERR_NVM_BLANK_MODE";
+	case I40E_ERR_NOT_IMPLEMENTED:
+		return "I40E_ERR_NOT_IMPLEMENTED";
+	case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+		return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+	case I40E_ERR_DIAG_TEST_FAILED:
+		return "I40E_ERR_DIAG_TEST_FAILED";
+	case I40E_ERR_NOT_READY:
+		return "I40E_ERR_NOT_READY";
+	case I40E_NOT_SUPPORTED:
+		return "I40E_NOT_SUPPORTED";
+	case I40E_ERR_FIRMWARE_API_VERSION:
+		return "I40E_ERR_FIRMWARE_API_VERSION";
+	}
+
+	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+	return hw->err_str;
+}
+
+/**
+ * i40e_debug_aq
+ * @hw: debug mask related to admin queue
+ * @mask: debug mask
+ * @desc: pointer to admin queue descriptor
+ * @buffer: pointer to command buffer
+ * @buf_len: max length of buffer
+ *
+ * Dumps debug log about adminq command with descriptor contents.
+ **/
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
+		   void *buffer, u16 buf_len)
+{
+	struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+	u16 len = LE16_TO_CPU(aq_desc->datalen);
+	u8 *buf = (u8 *)buffer;
+	u16 i = 0;
+
+	if ((!(mask & hw->debug_mask)) || (desc == NULL))
+		return;
+
+	i40e_debug(hw, mask,
+		   "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
+		   LE16_TO_CPU(aq_desc->opcode),
+		   LE16_TO_CPU(aq_desc->flags),
+		   LE16_TO_CPU(aq_desc->datalen),
+		   LE16_TO_CPU(aq_desc->retval));
+	i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->cookie_high),
+		   LE32_TO_CPU(aq_desc->cookie_low));
+	i40e_debug(hw, mask, "\tparam (0,1)  0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->params.internal.param0),
+		   LE32_TO_CPU(aq_desc->params.internal.param1));
+	i40e_debug(hw, mask, "\taddr (h,l)   0x%08X 0x%08X\n",
+		   LE32_TO_CPU(aq_desc->params.external.addr_high),
+		   LE32_TO_CPU(aq_desc->params.external.addr_low));
+
+	if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+		i40e_debug(hw, mask, "AQ CMD Buffer:\n");
+		if (buf_len < len)
+			len = buf_len;
+		/* write the full 16-byte chunks */
+		for (i = 0; i < (len - 16); i += 16)
+			i40e_debug(hw, mask,
+				   "\t0x%04X  %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+				   i, buf[i], buf[i+1], buf[i+2], buf[i+3],
+				   buf[i+4], buf[i+5], buf[i+6], buf[i+7],
+				   buf[i+8], buf[i+9], buf[i+10], buf[i+11],
+				   buf[i+12], buf[i+13], buf[i+14], buf[i+15]);
+		/* write whatever's left over without overrunning the buffer */
+		if (i < len) {
+			char d_buf[80];
+			int j = 0;
+
+			memset(d_buf, 0, sizeof(d_buf));
+			j += sprintf(d_buf, "\t0x%04X ", i);
+			while (i < len)
+				j += sprintf(&d_buf[j], " %02X", buf[i++]);
+			i40e_debug(hw, mask, "%s\n", d_buf);
+		}
+	}
+}
+
+/**
+ * i40e_check_asq_alive
+ * @hw: pointer to the hw struct
+ *
+ * Returns TRUE if Queue is enabled else FALSE.
+ **/
+bool i40e_check_asq_alive(struct i40e_hw *hw)
+{
+	if (hw->aq.asq.len)
+		if (!i40e_is_vf(hw))
+			return !!(rd32(hw, hw->aq.asq.len) &
+				I40E_PF_ATQLEN_ATQENABLE_MASK);
+		if (i40e_is_vf(hw))
+			return !!(rd32(hw, hw->aq.asq.len) &
+				I40E_VF_ATQLEN1_ATQENABLE_MASK);
+	return FALSE;
+}
+
+/**
+ * i40e_aq_queue_shutdown
+ * @hw: pointer to the hw struct
+ * @unloading: is the driver unloading itself
+ *
+ * Tell the Firmware that we're shutting down the AdminQ and whether
+ * or not the driver is unloading as well.
+ **/
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
+					     bool unloading)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_queue_shutdown *cmd =
+		(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_queue_shutdown);
+
+	if (unloading)
+		cmd->driver_unloading = CPU_TO_LE32(I40E_AQ_DRIVER_UNLOADING);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+#ifdef X722_SUPPORT
+
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set TRUE to set the table, FALSE to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+						     u16 vsi_id, bool pf_lut,
+						     u8 *lut, u16 lut_size,
+						     bool set)
+{
+	enum i40e_status_code status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_lut *cmd_resp =
+		   (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_lut);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_lut);
+
+	/* Indirect command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			CPU_TO_LE16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+	cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+	if (pf_lut)
+		cmd_resp->flags |= CPU_TO_LE16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+	else
+		cmd_resp->flags |= CPU_TO_LE16((u16)
+					((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+					I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+	cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)lut));
+	cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)lut));
+
+	status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+					  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+				       FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+					  bool pf_lut, u8 *lut, u16 lut_size)
+{
+	return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set TRUE to set the key, FALSE to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key,
+				      bool set)
+{
+	enum i40e_status_code status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_set_rss_key *cmd_resp =
+			(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+	u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+	if (set)
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_set_rss_key);
+	else
+		i40e_fill_default_direct_cmd_desc(&desc,
+						  i40e_aqc_opc_get_rss_key);
+
+	/* Indirect command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+	cmd_resp->vsi_id =
+			CPU_TO_LE16((u16)((vsi_id <<
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+					  I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+	cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+	cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)key));
+	cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)key));
+
+	status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+				      u16 vsi_id,
+				      struct i40e_aqc_get_set_rss_key_data *key)
+{
+	return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
+}
+#endif /* X722_SUPPORT */
+
+/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ *
+ * Typical work flow:
+ *
+ * IF NOT i40e_ptype_lookup[ptype].known
+ * THEN
+ *      Packet is unknown
+ * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
+ *      Use the rest of the fields to look at the tunnels, inner protocols, etc
+ * ELSE
+ *      Use the enum i40e_rx_l2_ptype to decode the packet type
+ * ENDIF
+ */
+
+/* macro to make the table lines short */
+#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
+	{	PTYPE, \
+		1, \
+		I40E_RX_PTYPE_OUTER_##OUTER_IP, \
+		I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
+		I40E_RX_PTYPE_##OUTER_FRAG, \
+		I40E_RX_PTYPE_TUNNEL_##T, \
+		I40E_RX_PTYPE_TUNNEL_END_##TE, \
+		I40E_RX_PTYPE_##TEF, \
+		I40E_RX_PTYPE_INNER_PROT_##I, \
+		I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
+
+#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
+		{ PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
+
+/* shorter macros makes the table fit but are terse */
+#define I40E_RX_PTYPE_NOF		I40E_RX_PTYPE_NOT_FRAG
+#define I40E_RX_PTYPE_FRG		I40E_RX_PTYPE_FRAG
+#define I40E_RX_PTYPE_INNER_PROT_TS	I40E_RX_PTYPE_INNER_PROT_TIMESYNC
+
+/* Lookup table mapping the HW PTYPE to the bit field for decoding */
+struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
+	/* L2 Packet types */
+	I40E_PTT_UNUSED_ENTRY(0),
+	I40E_PTT(1,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(2,  L2, NONE, NOF, NONE, NONE, NOF, TS,   PAY2),
+	I40E_PTT(3,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT_UNUSED_ENTRY(4),
+	I40E_PTT_UNUSED_ENTRY(5),
+	I40E_PTT(6,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(7,  L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT_UNUSED_ENTRY(8),
+	I40E_PTT_UNUSED_ENTRY(9),
+	I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
+	I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
+	I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
+
+	/* Non Tunneled IPv4 */
+	I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(25),
+	I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP,  PAY4),
+	I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
+	I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+	/* IPv4 --> IPv4 */
+	I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(32),
+	I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> IPv6 */
+	I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(39),
+	I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT */
+	I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 --> GRE/NAT --> IPv4 */
+	I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(47),
+	I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> IPv6 */
+	I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(54),
+	I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> MAC */
+	I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 --> GRE/NAT --> MAC --> IPv4 */
+	I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(62),
+	I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT -> MAC --> IPv6 */
+	I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(69),
+	I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv4 --> GRE/NAT --> MAC/VLAN */
+	I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+	/* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
+	I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(77),
+	I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
+	I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(84),
+	I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+	/* Non Tunneled IPv6 */
+	I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
+	I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP,  PAY3),
+	I40E_PTT_UNUSED_ENTRY(91),
+	I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP,  PAY4),
+	I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
+	I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
+
+	/* IPv6 --> IPv4 */
+	I40E_PTT(95,  IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(96,  IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(97,  IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(98),
+	I40E_PTT(99,  IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> IPv6 */
+	I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(105),
+	I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT */
+	I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> IPv4 */
+	I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(113),
+	I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> IPv6 */
+	I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(120),
+	I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC */
+	I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> MAC -> IPv4 */
+	I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(128),
+	I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC -> IPv6 */
+	I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(135),
+	I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN */
+	I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
+	I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
+	I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
+	I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(143),
+	I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP,  PAY4),
+	I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
+	I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
+
+	/* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
+	I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
+	I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
+	I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP,  PAY4),
+	I40E_PTT_UNUSED_ENTRY(150),
+	I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP,  PAY4),
+	I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
+	I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
+
+	/* unused entries */
+	I40E_PTT_UNUSED_ENTRY(154),
+	I40E_PTT_UNUSED_ENTRY(155),
+	I40E_PTT_UNUSED_ENTRY(156),
+	I40E_PTT_UNUSED_ENTRY(157),
+	I40E_PTT_UNUSED_ENTRY(158),
+	I40E_PTT_UNUSED_ENTRY(159),
+
+	I40E_PTT_UNUSED_ENTRY(160),
+	I40E_PTT_UNUSED_ENTRY(161),
+	I40E_PTT_UNUSED_ENTRY(162),
+	I40E_PTT_UNUSED_ENTRY(163),
+	I40E_PTT_UNUSED_ENTRY(164),
+	I40E_PTT_UNUSED_ENTRY(165),
+	I40E_PTT_UNUSED_ENTRY(166),
+	I40E_PTT_UNUSED_ENTRY(167),
+	I40E_PTT_UNUSED_ENTRY(168),
+	I40E_PTT_UNUSED_ENTRY(169),
+
+	I40E_PTT_UNUSED_ENTRY(170),
+	I40E_PTT_UNUSED_ENTRY(171),
+	I40E_PTT_UNUSED_ENTRY(172),
+	I40E_PTT_UNUSED_ENTRY(173),
+	I40E_PTT_UNUSED_ENTRY(174),
+	I40E_PTT_UNUSED_ENTRY(175),
+	I40E_PTT_UNUSED_ENTRY(176),
+	I40E_PTT_UNUSED_ENTRY(177),
+	I40E_PTT_UNUSED_ENTRY(178),
+	I40E_PTT_UNUSED_ENTRY(179),
+
+	I40E_PTT_UNUSED_ENTRY(180),
+	I40E_PTT_UNUSED_ENTRY(181),
+	I40E_PTT_UNUSED_ENTRY(182),
+	I40E_PTT_UNUSED_ENTRY(183),
+	I40E_PTT_UNUSED_ENTRY(184),
+	I40E_PTT_UNUSED_ENTRY(185),
+	I40E_PTT_UNUSED_ENTRY(186),
+	I40E_PTT_UNUSED_ENTRY(187),
+	I40E_PTT_UNUSED_ENTRY(188),
+	I40E_PTT_UNUSED_ENTRY(189),
+
+	I40E_PTT_UNUSED_ENTRY(190),
+	I40E_PTT_UNUSED_ENTRY(191),
+	I40E_PTT_UNUSED_ENTRY(192),
+	I40E_PTT_UNUSED_ENTRY(193),
+	I40E_PTT_UNUSED_ENTRY(194),
+	I40E_PTT_UNUSED_ENTRY(195),
+	I40E_PTT_UNUSED_ENTRY(196),
+	I40E_PTT_UNUSED_ENTRY(197),
+	I40E_PTT_UNUSED_ENTRY(198),
+	I40E_PTT_UNUSED_ENTRY(199),
+
+	I40E_PTT_UNUSED_ENTRY(200),
+	I40E_PTT_UNUSED_ENTRY(201),
+	I40E_PTT_UNUSED_ENTRY(202),
+	I40E_PTT_UNUSED_ENTRY(203),
+	I40E_PTT_UNUSED_ENTRY(204),
+	I40E_PTT_UNUSED_ENTRY(205),
+	I40E_PTT_UNUSED_ENTRY(206),
+	I40E_PTT_UNUSED_ENTRY(207),
+	I40E_PTT_UNUSED_ENTRY(208),
+	I40E_PTT_UNUSED_ENTRY(209),
+
+	I40E_PTT_UNUSED_ENTRY(210),
+	I40E_PTT_UNUSED_ENTRY(211),
+	I40E_PTT_UNUSED_ENTRY(212),
+	I40E_PTT_UNUSED_ENTRY(213),
+	I40E_PTT_UNUSED_ENTRY(214),
+	I40E_PTT_UNUSED_ENTRY(215),
+	I40E_PTT_UNUSED_ENTRY(216),
+	I40E_PTT_UNUSED_ENTRY(217),
+	I40E_PTT_UNUSED_ENTRY(218),
+	I40E_PTT_UNUSED_ENTRY(219),
+
+	I40E_PTT_UNUSED_ENTRY(220),
+	I40E_PTT_UNUSED_ENTRY(221),
+	I40E_PTT_UNUSED_ENTRY(222),
+	I40E_PTT_UNUSED_ENTRY(223),
+	I40E_PTT_UNUSED_ENTRY(224),
+	I40E_PTT_UNUSED_ENTRY(225),
+	I40E_PTT_UNUSED_ENTRY(226),
+	I40E_PTT_UNUSED_ENTRY(227),
+	I40E_PTT_UNUSED_ENTRY(228),
+	I40E_PTT_UNUSED_ENTRY(229),
+
+	I40E_PTT_UNUSED_ENTRY(230),
+	I40E_PTT_UNUSED_ENTRY(231),
+	I40E_PTT_UNUSED_ENTRY(232),
+	I40E_PTT_UNUSED_ENTRY(233),
+	I40E_PTT_UNUSED_ENTRY(234),
+	I40E_PTT_UNUSED_ENTRY(235),
+	I40E_PTT_UNUSED_ENTRY(236),
+	I40E_PTT_UNUSED_ENTRY(237),
+	I40E_PTT_UNUSED_ENTRY(238),
+	I40E_PTT_UNUSED_ENTRY(239),
+
+	I40E_PTT_UNUSED_ENTRY(240),
+	I40E_PTT_UNUSED_ENTRY(241),
+	I40E_PTT_UNUSED_ENTRY(242),
+	I40E_PTT_UNUSED_ENTRY(243),
+	I40E_PTT_UNUSED_ENTRY(244),
+	I40E_PTT_UNUSED_ENTRY(245),
+	I40E_PTT_UNUSED_ENTRY(246),
+	I40E_PTT_UNUSED_ENTRY(247),
+	I40E_PTT_UNUSED_ENTRY(248),
+	I40E_PTT_UNUSED_ENTRY(249),
+
+	I40E_PTT_UNUSED_ENTRY(250),
+	I40E_PTT_UNUSED_ENTRY(251),
+	I40E_PTT_UNUSED_ENTRY(252),
+	I40E_PTT_UNUSED_ENTRY(253),
+	I40E_PTT_UNUSED_ENTRY(254),
+	I40E_PTT_UNUSED_ENTRY(255)
+};
+
+
+/**
+ * i40e_validate_mac_addr - Validate unicast MAC address
+ * @mac_addr: pointer to MAC address
+ *
+ * Tests a MAC address to ensure it is a valid Individual Address
+ **/
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	DEBUGFUNC("i40e_validate_mac_addr");
+
+	/* Broadcast addresses ARE multicast addresses
+	 * Make sure it is not a multicast address
+	 * Reject the zero address
+	 */
+	if (I40E_IS_MULTICAST(mac_addr) ||
+	    (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+	      mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0))
+		status = I40E_ERR_INVALID_MAC_ADDR;
+
+	return status;
+}
+
+/**
+ * i40e_init_shared_code - Initialize the shared code
+ * @hw: pointer to hardware structure
+ *
+ * This assigns the MAC type and PHY code and inits the NVM.
+ * Does not touch the hardware. This function must be called prior to any
+ * other function in the shared code. The i40e_hw structure should be
+ * memset to 0 prior to calling this function.  The following fields in
+ * hw structure should be filled in prior to calling this function:
+ * hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ * subsystem_vendor_id, and revision_id
+ **/
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+	u32 port, ari, func_rid;
+
+	DEBUGFUNC("i40e_init_shared_code");
+
+	i40e_set_mac_type(hw);
+
+	switch (hw->mac.type) {
+	case I40E_MAC_XL710:
+#ifdef X722_SUPPORT
+	case I40E_MAC_X722:
+#endif
+		break;
+	default:
+		return I40E_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	hw->phy.get_link_info = TRUE;
+
+	/* Determine port number and PF number*/
+	port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK)
+					   >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
+	hw->port = (u8)port;
+	ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >>
+						 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
+	func_rid = rd32(hw, I40E_PF_FUNC_RID);
+	if (ari)
+		hw->pf_id = (u8)(func_rid & 0xff);
+	else
+		hw->pf_id = (u8)(func_rid & 0x7);
+
+	status = i40e_init_nvm(hw);
+	return status;
+}
+
+/**
+ * i40e_aq_mac_address_read - Retrieve the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: a return indicator of what addresses were added to the addr store
+ * @addrs: the requestor's mac addr store
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+static enum i40e_status_code i40e_aq_mac_address_read(struct i40e_hw *hw,
+				   u16 *flags,
+				   struct i40e_aqc_mac_address_read_data *addrs,
+				   struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_mac_address_read *cmd_data =
+		(struct i40e_aqc_mac_address_read *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+
+	status = i40e_asq_send_command(hw, &desc, addrs,
+				       sizeof(*addrs), cmd_details);
+	*flags = LE16_TO_CPU(cmd_data->command_flags);
+
+	return status;
+}
+
+/**
+ * i40e_aq_mac_address_write - Change the MAC addresses
+ * @hw: pointer to the hw struct
+ * @flags: indicates which MAC to be written
+ * @mac_addr: address to write
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+				    u16 flags, u8 *mac_addr,
+				    struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_mac_address_write *cmd_data =
+		(struct i40e_aqc_mac_address_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_mac_address_write);
+	cmd_data->command_flags = CPU_TO_LE16(flags);
+	cmd_data->mac_sah = CPU_TO_LE16((u16)mac_addr[0] << 8 | mac_addr[1]);
+	cmd_data->mac_sal = CPU_TO_LE32(((u32)mac_addr[2] << 24) |
+					((u32)mac_addr[3] << 16) |
+					((u32)mac_addr[4] << 8) |
+					mac_addr[5]);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_mac_addr - get MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to MAC address
+ *
+ * Reads the adapter's MAC address from register
+ **/
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+	struct i40e_aqc_mac_address_read_data addrs;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+
+	if (flags & I40E_AQC_LAN_ADDR_VALID)
+		memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+
+	return status;
+}
+
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+	struct i40e_aqc_mac_address_read_data addrs;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+	if (status)
+		return status;
+
+	if (flags & I40E_AQC_PORT_ADDR_VALID)
+		memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+	else
+		status = I40E_ERR_INVALID_MAC_ADDR;
+
+	return status;
+}
+
+/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+	u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+	u32 reg_block = 0;
+	u32 reg_val;
+
+	if (abs_queue_idx >= 128) {
+		reg_block = abs_queue_idx / 128;
+		abs_queue_idx %= 128;
+	}
+
+	reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+	reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+	reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+	if (enable)
+		reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+	else
+		reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+	wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
+ *  i40e_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+					    u32 pba_num_size)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+	u16 pba_word = 0;
+	u16 pba_size = 0;
+	u16 pba_ptr = 0;
+	u16 i = 0;
+
+	status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
+	if ((status != I40E_SUCCESS) || (pba_word != 0xFAFA)) {
+		DEBUGOUT("Failed to read PBA flags or flag is invalid.\n");
+		return status;
+	}
+
+	status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
+	if (status != I40E_SUCCESS) {
+		DEBUGOUT("Failed to read PBA Block pointer.\n");
+		return status;
+	}
+
+	status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
+	if (status != I40E_SUCCESS) {
+		DEBUGOUT("Failed to read PBA Block size.\n");
+		return status;
+	}
+
+	/* Subtract one to get PBA word count (PBA Size word is included in
+	 * total size)
+	 */
+	pba_size--;
+	if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+		DEBUGOUT("Buffer to small for PBA data.\n");
+		return I40E_ERR_PARAM;
+	}
+
+	for (i = 0; i < pba_size; i++) {
+		status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+		if (status != I40E_SUCCESS) {
+			DEBUGOUT1("Failed to read PBA Block word %d.\n", i);
+			return status;
+		}
+
+		pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+		pba_num[(i * 2) + 1] = pba_word & 0xFF;
+	}
+	pba_num[(pba_size * 2)] = '\0';
+
+	return status;
+}
+
+/**
+ * i40e_get_media_type - Gets media type
+ * @hw: pointer to the hardware structure
+ **/
+static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
+{
+	enum i40e_media_type media;
+
+	switch (hw->phy.link_info.phy_type) {
+	case I40E_PHY_TYPE_10GBASE_SR:
+	case I40E_PHY_TYPE_10GBASE_LR:
+	case I40E_PHY_TYPE_1000BASE_SX:
+	case I40E_PHY_TYPE_1000BASE_LX:
+	case I40E_PHY_TYPE_40GBASE_SR4:
+	case I40E_PHY_TYPE_40GBASE_LR4:
+		media = I40E_MEDIA_TYPE_FIBER;
+		break;
+	case I40E_PHY_TYPE_100BASE_TX:
+	case I40E_PHY_TYPE_1000BASE_T:
+	case I40E_PHY_TYPE_10GBASE_T:
+		media = I40E_MEDIA_TYPE_BASET;
+		break;
+	case I40E_PHY_TYPE_10GBASE_CR1_CU:
+	case I40E_PHY_TYPE_40GBASE_CR4_CU:
+	case I40E_PHY_TYPE_10GBASE_CR1:
+	case I40E_PHY_TYPE_40GBASE_CR4:
+	case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+	case I40E_PHY_TYPE_40GBASE_AOC:
+	case I40E_PHY_TYPE_10GBASE_AOC:
+		media = I40E_MEDIA_TYPE_DA;
+		break;
+	case I40E_PHY_TYPE_1000BASE_KX:
+	case I40E_PHY_TYPE_10GBASE_KX4:
+	case I40E_PHY_TYPE_10GBASE_KR:
+	case I40E_PHY_TYPE_40GBASE_KR4:
+	case I40E_PHY_TYPE_20GBASE_KR2:
+		media = I40E_MEDIA_TYPE_BACKPLANE;
+		break;
+	case I40E_PHY_TYPE_SGMII:
+	case I40E_PHY_TYPE_XAUI:
+	case I40E_PHY_TYPE_XFI:
+	case I40E_PHY_TYPE_XLAUI:
+	case I40E_PHY_TYPE_XLPPI:
+	default:
+		media = I40E_MEDIA_TYPE_UNKNOWN;
+		break;
+	}
+
+	return media;
+}
+
+#define I40E_PF_RESET_WAIT_COUNT	200
+/**
+ * i40e_pf_reset - Reset the PF
+ * @hw: pointer to the hardware structure
+ *
+ * Assuming someone else has triggered a global reset,
+ * assure the global reset is complete and then reset the PF
+ **/
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
+{
+	u32 cnt = 0;
+	u32 cnt1 = 0;
+	u32 reg = 0;
+	u32 grst_del;
+
+	/* Poll for Global Reset steady state in case of recent GRST.
+	 * The grst delay value is in 100ms units, and we'll wait a
+	 * couple counts longer to be sure we don't just miss the end.
+	 */
+	grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
+			I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
+			I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
+	for (cnt = 0; cnt < grst_del + 10; cnt++) {
+		reg = rd32(hw, I40E_GLGEN_RSTAT);
+		if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+			break;
+		i40e_msec_delay(100);
+	}
+	if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+		DEBUGOUT("Global reset polling failed to complete.\n");
+		return I40E_ERR_RESET_FAILED;
+	}
+
+	/* Now Wait for the FW to be ready */
+	for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
+		reg = rd32(hw, I40E_GLNVM_ULD);
+		reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+			I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
+		if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+			    I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) {
+			DEBUGOUT1("Core and Global modules ready %d\n", cnt1);
+			break;
+		}
+		i40e_msec_delay(10);
+	}
+	if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
+		     I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
+		DEBUGOUT("wait for FW Reset complete timedout\n");
+		DEBUGOUT1("I40E_GLNVM_ULD = 0x%x\n", reg);
+		return I40E_ERR_RESET_FAILED;
+	}
+
+	/* If there was a Global Reset in progress when we got here,
+	 * we don't need to do the PF Reset
+	 */
+	if (!cnt) {
+		reg = rd32(hw, I40E_PFGEN_CTRL);
+		wr32(hw, I40E_PFGEN_CTRL,
+		     (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
+		for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
+			reg = rd32(hw, I40E_PFGEN_CTRL);
+			if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
+				break;
+			i40e_msec_delay(1);
+		}
+		if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+			DEBUGOUT("PF reset polling failed to complete.\n");
+			return I40E_ERR_RESET_FAILED;
+		}
+	}
+
+	i40e_clear_pxe_mode(hw);
+
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+	u32 num_queues, base_queue;
+	u32 num_pf_int;
+	u32 num_vf_int;
+	u32 num_vfs;
+	u32 i, j;
+	u32 val;
+	u32 eol = 0x7ff;
+
+	/* get number of interrupts, queues, and vfs */
+	val = rd32(hw, I40E_GLPCI_CNF2);
+	num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+			I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+	num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+			I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+	val = rd32(hw, I40E_PFLAN_QALLOC);
+	base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+			I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+	j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+			I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+	if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+		num_queues = (j - base_queue) + 1;
+	else
+		num_queues = 0;
+
+	val = rd32(hw, I40E_PF_VT_PFALLOC);
+	i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+			I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+	j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+			I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+	if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+		num_vfs = (j - i) + 1;
+	else
+		num_vfs = 0;
+
+	/* stop all the interrupts */
+	wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+	val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+	for (i = 0; i < num_pf_int - 2; i++)
+		wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+	/* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+	val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+	wr32(hw, I40E_PFINT_LNKLST0, val);
+	for (i = 0; i < num_pf_int - 2; i++)
+		wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+	val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+	for (i = 0; i < num_vfs; i++)
+		wr32(hw, I40E_VPINT_LNKLST0(i), val);
+	for (i = 0; i < num_vf_int - 2; i++)
+		wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+	/* warn the HW of the coming Tx disables */
+	for (i = 0; i < num_queues; i++) {
+		u32 abs_queue_idx = base_queue + i;
+		u32 reg_block = 0;
+
+		if (abs_queue_idx >= 128) {
+			reg_block = abs_queue_idx / 128;
+			abs_queue_idx %= 128;
+		}
+
+		val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+		val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+		val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+		val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+		wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+	}
+	i40e_usec_delay(400);
+
+	/* stop all the queues */
+	for (i = 0; i < num_queues; i++) {
+		wr32(hw, I40E_QINT_TQCTL(i), 0);
+		wr32(hw, I40E_QTX_ENA(i), 0);
+		wr32(hw, I40E_QINT_RQCTL(i), 0);
+		wr32(hw, I40E_QRX_ENA(i), 0);
+	}
+
+	/* short wait for all queue disables to settle */
+	i40e_usec_delay(50);
+}
+
+/**
+ * i40e_clear_pxe_mode - clear pxe operations mode
+ * @hw: pointer to the hw struct
+ *
+ * Make sure all PXE mode settings are cleared, including things
+ * like descriptor fetch/write-back mode.
+ **/
+void i40e_clear_pxe_mode(struct i40e_hw *hw)
+{
+	if (i40e_check_asq_alive(hw))
+		i40e_aq_clear_pxe_mode(hw, NULL);
+}
+
+/**
+ * i40e_led_is_mine - helper to find matching led
+ * @hw: pointer to the hw struct
+ * @idx: index into GPIO registers
+ *
+ * returns: 0 if no match, otherwise the value of the GPIO_CTL register
+ */
+static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
+{
+	u32 gpio_val = 0;
+	u32 port;
+
+	if (!hw->func_caps.led[idx])
+		return 0;
+
+	gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
+	port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
+		I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
+
+	/* if PRT_NUM_NA is 1 then this LED is not port specific, OR
+	 * if it is not our port then ignore
+	 */
+	if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) ||
+	    (port != hw->port))
+		return 0;
+
+	return gpio_val;
+}
+
+#define I40E_COMBINED_ACTIVITY 0xA
+#define I40E_FILTER_ACTIVITY 0xE
+#define I40E_LINK_ACTIVITY 0xC
+#define I40E_MAC_ACTIVITY 0xD
+#define I40E_LED0 22
+
+/**
+ * i40e_led_get - return current on/off mode
+ * @hw: pointer to the hw struct
+ *
+ * The value returned is the 'mode' field as defined in the
+ * GPIO register definitions: 0x0 = off, 0xf = on, and other
+ * values are variations of possible behaviors relating to
+ * blink, link, and wire.
+ **/
+u32 i40e_led_get(struct i40e_hw *hw)
+{
+	u32 current_mode = 0;
+	u32 mode = 0;
+	int i;
+
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
+
+		if (!gpio_val)
+			continue;
+
+		/* ignore gpio LED src mode entries related to the activity
+		 *  LEDs
+		 */
+		current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+				>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+		switch (current_mode) {
+		case I40E_COMBINED_ACTIVITY:
+		case I40E_FILTER_ACTIVITY:
+		case I40E_MAC_ACTIVITY:
+			continue;
+		default:
+			break;
+		}
+
+		mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
+			I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
+		break;
+	}
+
+	return mode;
+}
+
+/**
+ * i40e_led_set - set new on/off mode
+ * @hw: pointer to the hw struct
+ * @mode: 0=off, 0xf=on (else see manual for mode details)
+ * @blink: TRUE if the LED should blink when on, FALSE if steady
+ *
+ * if this function is used to turn on the blink it should
+ * be used to disable the blink when restoring the original state.
+ **/
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
+{
+	u32 current_mode = 0;
+	int i;
+
+	if (mode & 0xfffffff0)
+		DEBUGOUT1("invalid mode passed in %X\n", mode);
+
+	/* as per the documentation GPIO 22-29 are the LED
+	 * GPIO pins named LED0..LED7
+	 */
+	for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) {
+		u32 gpio_val = i40e_led_is_mine(hw, i);
+
+		if (!gpio_val)
+			continue;
+
+		/* ignore gpio LED src mode entries related to the activity
+		 * LEDs
+		 */
+		current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
+				>> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
+		switch (current_mode) {
+		case I40E_COMBINED_ACTIVITY:
+		case I40E_FILTER_ACTIVITY:
+		case I40E_MAC_ACTIVITY:
+			continue;
+		default:
+			break;
+		}
+
+		gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
+		/* this & is a bit of paranoia, but serves as a range check */
+		gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
+			     I40E_GLGEN_GPIO_CTL_LED_MODE_MASK);
+
+		if (mode == I40E_LINK_ACTIVITY)
+			blink = FALSE;
+
+		if (blink)
+			gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+		else
+			gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+
+		wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
+		break;
+	}
+}
+
+/* Admin command wrappers */
+
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+			bool qualified_modules, bool report_init,
+			struct i40e_aq_get_phy_abilities_resp *abilities,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+	u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+	if (!abilities)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_phy_abilities);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (abilities_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	if (qualified_modules)
+		desc.params.external.param0 |=
+			CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+	if (report_init)
+		desc.params.external.param0 |=
+			CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+	status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+				    cmd_details);
+
+	if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+		status = I40E_ERR_UNKNOWN_PHY;
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+				struct i40e_aq_set_phy_config *config,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aq_set_phy_config *cmd =
+		(struct i40e_aq_set_phy_config *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (!config)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_phy_config);
+
+	*cmd = *config;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+				  bool atomic_restart)
+{
+	enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct i40e_aq_set_phy_config config;
+	enum i40e_status_code status;
+	u8 pause_mask = 0x0;
+
+	*aq_failures = 0x0;
+
+	switch (fc_mode) {
+	case I40E_FC_FULL:
+		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+		break;
+	case I40E_FC_RX_PAUSE:
+		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+		break;
+	case I40E_FC_TX_PAUSE:
+		pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+		break;
+	default:
+		break;
+	}
+
+	/* Get the current phy config */
+	status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+					      NULL);
+	if (status) {
+		*aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+		return status;
+	}
+
+	memset(&config, 0, sizeof(config));
+	/* clear the old pause settings */
+	config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+			   ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+	/* set the new abilities */
+	config.abilities |= pause_mask;
+	/* If the abilities have changed, then set the new config */
+	if (config.abilities != abilities.abilities) {
+		/* Auto restart link so settings take effect */
+		if (atomic_restart)
+			config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+		/* Copy over all the old settings */
+		config.phy_type = abilities.phy_type;
+		config.link_speed = abilities.link_speed;
+		config.eee_capability = abilities.eee_capability;
+		config.eeer = abilities.eeer_val;
+		config.low_power_ctrl = abilities.d3_lpan;
+		status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+		if (status)
+			*aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+	}
+	/* Update the link info */
+	status = i40e_update_link_info(hw);
+	if (status) {
+		/* Wait a little bit (on 40G cards it sometimes takes a really
+		 * long time for link to come back from the atomic reset)
+		 * and try once more
+		 */
+		i40e_msec_delay(1000);
+		status = i40e_update_link_info(hw);
+	}
+	if (status)
+		*aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_mac_config
+ * @hw: pointer to the hw struct
+ * @max_frame_size: Maximum Frame Size to be supported by the port
+ * @crc_en: Tell HW to append a CRC to outgoing frames
+ * @pacing: Pacing configurations
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Configure MAC settings for frame size, jumbo frame support and the
+ * addition of a CRC by the hardware.
+ **/
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+				u16 max_frame_size,
+				bool crc_en, u16 pacing,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aq_set_mac_config *cmd =
+		(struct i40e_aq_set_mac_config *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (max_frame_size == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_mac_config);
+
+	cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
+	cmd->params = ((u8)pacing & 0x0F) << 3;
+	if (crc_en)
+		cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	enum i40e_status_code status;
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_clear_pxe *cmd =
+		(struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_clear_pxe_mode);
+
+	cmd->rx_cnt = 0x2;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_link_restart_an
+ * @hw: pointer to the hw struct
+ * @enable_link: if TRUE: enable link, if FALSE: disable link
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets up the link and restarts the Auto-Negotiation over the link.
+ **/
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+		bool enable_link, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_link_restart_an *cmd =
+		(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_link_restart_an);
+
+	cmd->command = I40E_AQ_PHY_RESTART_AN;
+	if (enable_link)
+		cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+	else
+		cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the link status of the adapter.
+ **/
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+				bool enable_lse, struct i40e_link_status *link,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_link_status *resp =
+		(struct i40e_aqc_get_link_status *)&desc.params.raw;
+	struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+	enum i40e_status_code status;
+	bool tx_pause, rx_pause;
+	u16 command_flags;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
+
+	if (enable_lse)
+		command_flags = I40E_AQ_LSE_ENABLE;
+	else
+		command_flags = I40E_AQ_LSE_DISABLE;
+	resp->command_flags = CPU_TO_LE16(command_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status != I40E_SUCCESS)
+		goto aq_get_link_info_exit;
+
+	/* save off old link status information */
+	i40e_memcpy(&hw->phy.link_info_old, hw_link_info,
+		    sizeof(*hw_link_info), I40E_NONDMA_TO_NONDMA);
+
+	/* update link status */
+	hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type;
+	hw->phy.media_type = i40e_get_media_type(hw);
+	hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
+	hw_link_info->link_info = resp->link_info;
+	hw_link_info->an_info = resp->an_info;
+	hw_link_info->ext_info = resp->ext_info;
+	hw_link_info->loopback = resp->loopback;
+	hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
+	hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+	/* update fc info */
+	tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+	rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+	if (tx_pause & rx_pause)
+		hw->fc.current_mode = I40E_FC_FULL;
+	else if (tx_pause)
+		hw->fc.current_mode = I40E_FC_TX_PAUSE;
+	else if (rx_pause)
+		hw->fc.current_mode = I40E_FC_RX_PAUSE;
+	else
+		hw->fc.current_mode = I40E_FC_NONE;
+
+	if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+		hw_link_info->crc_enable = TRUE;
+	else
+		hw_link_info->crc_enable = FALSE;
+
+	if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+		hw_link_info->lse_enable = TRUE;
+	else
+		hw_link_info->lse_enable = FALSE;
+
+	if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+	     hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
+		hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
+
+	/* save link status information */
+	if (link)
+		i40e_memcpy(link, hw_link_info, sizeof(*hw_link_info),
+			    I40E_NONDMA_TO_NONDMA);
+
+	/* flag cleared so helper functions don't call AQ again */
+	hw->phy.get_link_info = FALSE;
+
+aq_get_link_info_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_set_phy_int_mask
+ * @hw: pointer to the hw struct
+ * @mask: interrupt mask to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set link interrupt mask.
+ **/
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+				u16 mask,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_phy_int_mask *cmd =
+		(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_phy_int_mask);
+
+	cmd->event_mask = CPU_TO_LE16(mask);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+				u64 *advt_reg,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_an_advt_reg *resp =
+		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_local_advt_reg);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status != I40E_SUCCESS)
+		goto aq_get_local_advt_reg_exit;
+
+	*advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+	*advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_local_advt_reg_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_set_local_advt_reg
+ * @hw: pointer to the hw struct
+ * @advt_reg: local AN advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the Local AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+				u64 advt_reg,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_an_advt_reg *cmd =
+		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_local_advt_reg);
+
+	cmd->local_an_reg0 = CPU_TO_LE32(I40E_LO_DWORD(advt_reg));
+	cmd->local_an_reg1 = CPU_TO_LE16(I40E_HI_DWORD(advt_reg));
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_partner_advt
+ * @hw: pointer to the hw struct
+ * @advt_reg: AN partner advertisement register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the link partner AN advertisement register value.
+ **/
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+				u64 *advt_reg,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_an_advt_reg *resp =
+		(struct i40e_aqc_an_advt_reg *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_partner_advt);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status != I40E_SUCCESS)
+		goto aq_get_partner_advt_exit;
+
+	*advt_reg = (u64)(LE16_TO_CPU(resp->local_an_reg1)) << 32;
+	*advt_reg |= LE32_TO_CPU(resp->local_an_reg0);
+
+aq_get_partner_advt_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_set_lb_modes
+ * @hw: pointer to the hw struct
+ * @lb_modes: loopback mode to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Sets loopback modes.
+ **/
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw,
+				u16 lb_modes,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_lb_mode *cmd =
+		(struct i40e_aqc_set_lb_mode *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_lb_modes);
+
+	cmd->lb_mode = CPU_TO_LE16(lb_modes);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_phy_debug
+ * @hw: pointer to the hw struct
+ * @cmd_flags: debug command flags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Reset the external PHY.
+ **/
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_phy_debug *cmd =
+		(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_phy_debug);
+
+	cmd->command_flags = cmd_flags;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_vsi
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add a VSI context to the hardware.
+**/
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_vsi);
+
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->uplink_seid);
+	cmd->connection_type = vsi_ctx->connection_type;
+	cmd->vf_id = vsi_ctx->vf_num;
+	cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), cmd_details);
+
+	if (status != I40E_SUCCESS)
+		goto aq_add_vsi_exit;
+
+	vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+	vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+	vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_add_vsi_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
+				u16 seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)
+		&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	cmd->promiscuous_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+	cmd->seid = CPU_TO_LE16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_unicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set unicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set)
+		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_multicast_promiscuous
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set: set multicast promiscuous enable/disable
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+				u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set)
+		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (enable)
+		flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 flags = 0;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (enable)
+		flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+	cmd->promiscuous_flags = CPU_TO_LE16(flags);
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_vsi_broadcast
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @set_filter: TRUE to set filter, FALSE to clear filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear the broadcast promiscuous flag (filter) for a given VSI.
+ **/
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+				u16 seid, bool set_filter,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+		(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+	if (set_filter)
+		cmd->promiscuous_flags
+			    |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	else
+		cmd->promiscuous_flags
+			    &= CPU_TO_LE16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+	cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+	cmd->seid = CPU_TO_LE16(seid);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_vsi_params - get VSI configuration info
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	struct i40e_aqc_add_get_update_vsi_completion *resp =
+		(struct i40e_aqc_add_get_update_vsi_completion *)
+		&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_vsi_parameters);
+
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), NULL);
+
+	if (status != I40E_SUCCESS)
+		goto aq_get_vsi_params_exit;
+
+	vsi_ctx->seid = LE16_TO_CPU(resp->seid);
+	vsi_ctx->vsi_number = LE16_TO_CPU(resp->vsi_number);
+	vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+	vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
+aq_get_vsi_params_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_update_vsi_params
+ * @hw: pointer to the hw struct
+ * @vsi_ctx: pointer to a vsi context struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update a VSI context.
+ **/
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_get_update_vsi *cmd =
+		(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_update_vsi_parameters);
+	cmd->uplink_seid = CPU_TO_LE16(vsi_ctx->seid);
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+
+	status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
+				    sizeof(vsi_ctx->info), cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_switch_config
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the result buffer
+ * @buf_size: length of input buffer
+ * @start_seid: seid to start for the report, 0 == beginning
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Fill the buf with switch configuration returned from AdminQ command
+ **/
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+				struct i40e_aqc_get_switch_config_resp *buf,
+				u16 buf_size, u16 *start_seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *scfg =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_switch_config);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	scfg->seid = CPU_TO_LE16(*start_seid);
+
+	status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details);
+	*start_seid = LE16_TO_CPU(scfg->seid);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_firmware_version
+ * @hw: pointer to the hw struct
+ * @fw_major_version: firmware major version
+ * @fw_minor_version: firmware minor version
+ * @fw_build: firmware build number
+ * @api_major_version: major queue version
+ * @api_minor_version: minor queue version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the firmware version from the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+				u16 *fw_major_version, u16 *fw_minor_version,
+				u32 *fw_build,
+				u16 *api_major_version, u16 *api_minor_version,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_version *resp =
+		(struct i40e_aqc_get_version *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status == I40E_SUCCESS) {
+		if (fw_major_version != NULL)
+			*fw_major_version = LE16_TO_CPU(resp->fw_major);
+		if (fw_minor_version != NULL)
+			*fw_minor_version = LE16_TO_CPU(resp->fw_minor);
+		if (fw_build != NULL)
+			*fw_build = LE32_TO_CPU(resp->fw_build);
+		if (api_major_version != NULL)
+			*api_major_version = LE16_TO_CPU(resp->api_major);
+		if (api_minor_version != NULL)
+			*api_minor_version = LE16_TO_CPU(resp->api_minor);
+
+		/* A workaround to fix the API version in SW */
+		if (api_major_version && api_minor_version &&
+		    fw_major_version && fw_minor_version &&
+		    ((*api_major_version == 1) && (*api_minor_version == 1)) &&
+		    (((*fw_major_version == 4) && (*fw_minor_version >= 2)) ||
+		     (*fw_major_version > 4)))
+			*api_minor_version = 2;
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_send_driver_version
+ * @hw: pointer to the hw struct
+ * @dv: driver's major, minor version
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Send the driver version to the firmware
+ **/
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+				struct i40e_driver_version *dv,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_driver_version *cmd =
+		(struct i40e_aqc_driver_version *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 len;
+
+	if (dv == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version);
+
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+	cmd->driver_major_ver = dv->major_version;
+	cmd->driver_minor_ver = dv->minor_version;
+	cmd->driver_build_ver = dv->build_version;
+	cmd->driver_subbuild_ver = dv->subbuild_version;
+
+	len = 0;
+	while (len < sizeof(dv->driver_string) &&
+	       (dv->driver_string[len] < 0x80) &&
+	       dv->driver_string[len])
+		len++;
+	status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+				       len, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_get_link_status - get status of the HW network link
+ * @hw: pointer to the hw struct
+ * @link_up: pointer to bool (TRUE/FALSE = linkup/linkdown)
+ *
+ * Variable link_up TRUE if link is up, FALSE if link is down.
+ * The variable link_up is invalid if returned value of status != I40E_SUCCESS
+ *
+ * Side effect: LinkStatusEvent reporting becomes enabled
+ **/
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+{
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	if (hw->phy.get_link_info) {
+		status = i40e_update_link_info(hw);
+
+		if (status != I40E_SUCCESS)
+			i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n",
+				   status);
+	}
+
+	*link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+
+	return status;
+}
+
+/**
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
+{
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+	if (status)
+		return status;
+
+	status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+					      NULL);
+	if (status)
+		return status;
+
+	memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+		sizeof(hw->phy.link_info.module_type));
+
+	return status;
+}
+
+
+/**
+ * i40e_get_link_speed
+ * @hw: pointer to the hw struct
+ *
+ * Returns the link speed of the adapter.
+ **/
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw)
+{
+	enum i40e_aq_link_speed speed = I40E_LINK_SPEED_UNKNOWN;
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	if (hw->phy.get_link_info) {
+		status = i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+
+		if (status != I40E_SUCCESS)
+			goto i40e_link_speed_exit;
+	}
+
+	speed = hw->phy.link_info.link_speed;
+
+i40e_link_speed_exit:
+	return speed;
+}
+
+/**
+ * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC
+ * @hw: pointer to the hw struct
+ * @uplink_seid: the MAC or other gizmo SEID
+ * @downlink_seid: the VSI SEID
+ * @enabled_tc: bitmap of TCs to be enabled
+ * @default_port: TRUE for default port VSI, FALSE for control port
+ * @enable_l2_filtering: TRUE to add L2 filter table rules to regular forwarding rules for cloud support
+ * @veb_seid: pointer to where to put the resulting VEB SEID
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This asks the FW to add a VEB between the uplink and downlink
+ * elements.  If the uplink SEID is 0, this will be a floating VEB.
+ **/
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+				u16 downlink_seid, u8 enabled_tc,
+				bool default_port, bool enable_l2_filtering,
+				u16 *veb_seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_veb *cmd =
+		(struct i40e_aqc_add_veb *)&desc.params.raw;
+	struct i40e_aqc_add_veb_completion *resp =
+		(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 veb_flags = 0;
+
+	/* SEIDs need to either both be set or both be 0 for floating VEB */
+	if (!!uplink_seid != !!downlink_seid)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb);
+
+	cmd->uplink_seid = CPU_TO_LE16(uplink_seid);
+	cmd->downlink_seid = CPU_TO_LE16(downlink_seid);
+	cmd->enable_tcs = enabled_tc;
+	if (!uplink_seid)
+		veb_flags |= I40E_AQC_ADD_VEB_FLOATING;
+	if (default_port)
+		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT;
+	else
+		veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
+
+	if (enable_l2_filtering)
+		veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER;
+
+	cmd->veb_flags = CPU_TO_LE16(veb_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && veb_seid)
+		*veb_seid = LE16_TO_CPU(resp->veb_seid);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_veb_parameters - Retrieve VEB parameters
+ * @hw: pointer to the hw struct
+ * @veb_seid: the SEID of the VEB to query
+ * @switch_id: the uplink switch id
+ * @floating: set to TRUE if the VEB is floating
+ * @statistic_index: index of the stats counter block for this VEB
+ * @vebs_used: number of VEB's used by function
+ * @vebs_free: total VEB's not reserved by any function
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This retrieves the parameters for a particular VEB, specified by
+ * uplink_seid, and returns them to the caller.
+ **/
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+				u16 veb_seid, u16 *switch_id,
+				bool *floating, u16 *statistic_index,
+				u16 *vebs_used, u16 *vebs_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
+		(struct i40e_aqc_get_veb_parameters_completion *)
+		&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (veb_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_get_veb_parameters);
+	cmd_resp->seid = CPU_TO_LE16(veb_seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (status)
+		goto get_veb_exit;
+
+	if (switch_id)
+		*switch_id = LE16_TO_CPU(cmd_resp->switch_id);
+	if (statistic_index)
+		*statistic_index = LE16_TO_CPU(cmd_resp->statistic_index);
+	if (vebs_used)
+		*vebs_used = LE16_TO_CPU(cmd_resp->vebs_used);
+	if (vebs_free)
+		*vebs_free = LE16_TO_CPU(cmd_resp->vebs_free);
+	if (floating) {
+		u16 flags = LE16_TO_CPU(cmd_resp->veb_flags);
+
+		if (flags & I40E_AQC_ADD_VEB_FLOATING)
+			*floating = TRUE;
+		else
+			*floating = FALSE;
+	}
+
+get_veb_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_add_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add MAC/VLAN addresses to the HW filtering
+ **/
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(*mv_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				    cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_macvlan
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the mac address
+ * @mv_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Remove MAC/VLAN addresses from the HW filtering
+ **/
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !mv_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(*mv_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_vlan - Add VLAN ids to the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of vlan filters to be added
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !v_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(*v_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_vlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_vlan - Remove VLANs from the HW filtering
+ * @hw: pointer to the hw struct
+ * @seid: VSI for the vlan filters
+ * @v_list: list of macvlans to be removed
+ * @count: length of the list
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_macvlan *cmd =
+		(struct i40e_aqc_macvlan *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 buf_size;
+
+	if (count == 0 || !v_list || !hw)
+		return I40E_ERR_PARAM;
+
+	buf_size = count * sizeof(*v_list);
+
+	/* prep the rest of the request */
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_vlan);
+	cmd->num_addresses = CPU_TO_LE16(count);
+	cmd->seid[0] = CPU_TO_LE16(seid | I40E_AQC_MACVLAN_CMD_SEID_VALID);
+	cmd->seid[1] = 0;
+	cmd->seid[2] = 0;
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, v_list, buf_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: vf id to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * send msg to vf
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_pf_vf_message *cmd =
+		(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
+	cmd->id = CPU_TO_LE32(vfid);
+	desc.cookie_high = CPU_TO_LE32(v_opcode);
+	desc.cookie_low = CPU_TO_LE32(v_retval);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+	if (msglen) {
+		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF |
+						I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = CPU_TO_LE16(msglen);
+	}
+	status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_debug_read_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+				u32 reg_addr, u64 *reg_val,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_debug_reg_read_write *cmd_resp =
+		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (reg_val == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg);
+
+	cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (status == I40E_SUCCESS) {
+		*reg_val = ((u64)LE32_TO_CPU(cmd_resp->value_high) << 32) |
+			   (u64)LE32_TO_CPU(cmd_resp->value_low);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+				u32 reg_addr, u64 reg_val,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_debug_reg_read_write *cmd =
+		(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+	cmd->address = CPU_TO_LE32(reg_addr);
+	cmd->value_high = CPU_TO_LE32((u32)(reg_val >> 32));
+	cmd->value_low = CPU_TO_LE32((u32)(reg_val & 0xFFFFFFFF));
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * query the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile *profile,
+				u8 *pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aq_get_set_hmc_resource_profile *resp =
+		(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_query_hmc_resource_profile);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	*profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
+		   I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
+	*pe_vf_enabled_count = resp->pe_vf_enabled &
+			       I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_hmc_resource_profile
+ * @hw: pointer to the hw struct
+ * @profile: type of profile the HMC is to be set as
+ * @pe_vf_enabled_count: the number of PE enabled VFs the system has
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * set the HMC profile of the device.
+ **/
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile profile,
+				u8 pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aq_get_set_hmc_resource_profile *cmd =
+		(struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_set_hmc_resource_profile);
+
+	cmd->pm_profile = (u8)profile;
+	cmd->pe_vf_enabled = pe_vf_enabled_count;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_request_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * requests common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				enum i40e_aq_resource_access_type access,
+				u8 sdp_number, u64 *timeout,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_request_resource *cmd_resp =
+		(struct i40e_aqc_request_resource *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	DEBUGFUNC("i40e_aq_request_resource");
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
+
+	cmd_resp->resource_id = CPU_TO_LE16(resource);
+	cmd_resp->access_type = CPU_TO_LE16(access);
+	cmd_resp->resource_number = CPU_TO_LE32(sdp_number);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	/* The completion specifies the maximum time in ms that the driver
+	 * may hold the resource in the Timeout field.
+	 * If the resource is held by someone else, the command completes with
+	 * busy return value and the timeout field indicates the maximum time
+	 * the current owner of the resource has to free it.
+	 */
+	if (status == I40E_SUCCESS || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY)
+		*timeout = LE32_TO_CPU(cmd_resp->timeout);
+
+	return status;
+}
+
+/**
+ * i40e_aq_release_resource
+ * @hw: pointer to the hw struct
+ * @resource: resource id
+ * @sdp_number: resource number
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * release common resource using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				u8 sdp_number,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_request_resource *cmd =
+		(struct i40e_aqc_request_resource *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	DEBUGFUNC("i40e_aq_release_resource");
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
+
+	cmd->resource_id = CPU_TO_LE16(resource);
+	cmd->resource_number = CPU_TO_LE32(sdp_number);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_read_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Read the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	DEBUGFUNC("i40e_aq_read_nvm");
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_read_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_read_nvm_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_read_nvm_config - read an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature id
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: pointer to count of elements read by FW
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, u32 field_id, void *data,
+				u16 buf_size, u16 *element_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_config_read *cmd =
+		(struct i40e_aqc_nvm_config_read *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_read);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+	cmd->element_id = CPU_TO_LE16((u16)(0xffff & field_id));
+	if (cmd_flags & I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK)
+		cmd->element_id_msw = CPU_TO_LE16((u16)(field_id >> 16));
+	else
+		cmd->element_id_msw = 0;
+
+	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+	if (!status && element_count)
+		*element_count = LE16_TO_CPU(cmd->element_count);
+
+	return status;
+}
+
+/**
+ * i40e_aq_write_nvm_config - write an nvm config block
+ * @hw: pointer to the hw struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @element_count: count of elements to be written
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, void *data, u16 buf_size,
+				u16 element_count,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_config_write *cmd =
+		(struct i40e_aqc_nvm_config_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_config_write);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buf_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd->element_count = CPU_TO_LE16(element_count);
+	cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+	status = i40e_asq_send_command(hw, &desc, data, buf_size, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_oem_post_update - triggers an OEM specific flow after update
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_oem_post_update);
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+	if (status && LE16_TO_CPU(desc.retval) == I40E_AQ_RC_ESRCH)
+		status = I40E_ERR_NOT_IMPLEMENTED;
+
+	return status;
+}
+
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, bool last_command,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	DEBUGFUNC("i40e_aq_erase_nvm");
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_erase_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+	return status;
+}
+
+#define I40E_DEV_FUNC_CAP_SWITCH_MODE	0x01
+#define I40E_DEV_FUNC_CAP_MGMT_MODE	0x02
+#define I40E_DEV_FUNC_CAP_NPAR		0x03
+#define I40E_DEV_FUNC_CAP_OS2BMC	0x04
+#define I40E_DEV_FUNC_CAP_VALID_FUNC	0x05
+#define I40E_DEV_FUNC_CAP_SRIOV_1_1	0x12
+#define I40E_DEV_FUNC_CAP_VF		0x13
+#define I40E_DEV_FUNC_CAP_VMDQ		0x14
+#define I40E_DEV_FUNC_CAP_802_1_QBG	0x15
+#define I40E_DEV_FUNC_CAP_802_1_QBH	0x16
+#define I40E_DEV_FUNC_CAP_VSI		0x17
+#define I40E_DEV_FUNC_CAP_DCB		0x18
+#define I40E_DEV_FUNC_CAP_FCOE		0x21
+#define I40E_DEV_FUNC_CAP_ISCSI		0x22
+#define I40E_DEV_FUNC_CAP_RSS		0x40
+#define I40E_DEV_FUNC_CAP_RX_QUEUES	0x41
+#define I40E_DEV_FUNC_CAP_TX_QUEUES	0x42
+#define I40E_DEV_FUNC_CAP_MSIX		0x43
+#define I40E_DEV_FUNC_CAP_MSIX_VF	0x44
+#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR	0x45
+#define I40E_DEV_FUNC_CAP_IEEE_1588	0x46
+#define I40E_DEV_FUNC_CAP_FLEX10	0xF1
+#define I40E_DEV_FUNC_CAP_CEM		0xF2
+#define I40E_DEV_FUNC_CAP_IWARP		0x51
+#define I40E_DEV_FUNC_CAP_LED		0x61
+#define I40E_DEV_FUNC_CAP_SDP		0x62
+#define I40E_DEV_FUNC_CAP_MDIO		0x63
+#define I40E_DEV_FUNC_CAP_WR_CSR_PROT	0x64
+
+/**
+ * i40e_parse_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: pointer to a buffer containing device/function capability records
+ * @cap_count: number of capability records in the list
+ * @list_type_opc: type of capabilities list to parse
+ *
+ * Parse the device/function capabilities list.
+ **/
+static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
+				     u32 cap_count,
+				     enum i40e_admin_queue_opc list_type_opc)
+{
+	struct i40e_aqc_list_capabilities_element_resp *cap;
+	u32 valid_functions, num_functions;
+	u32 number, logical_id, phys_id;
+	struct i40e_hw_capabilities *p;
+	u8 major_rev;
+	u32 i = 0;
+	u16 id;
+
+	cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
+
+	if (list_type_opc == i40e_aqc_opc_list_dev_capabilities)
+		p = (struct i40e_hw_capabilities *)&hw->dev_caps;
+	else if (list_type_opc == i40e_aqc_opc_list_func_capabilities)
+		p = (struct i40e_hw_capabilities *)&hw->func_caps;
+	else
+		return;
+
+	for (i = 0; i < cap_count; i++, cap++) {
+		id = LE16_TO_CPU(cap->id);
+		number = LE32_TO_CPU(cap->number);
+		logical_id = LE32_TO_CPU(cap->logical_id);
+		phys_id = LE32_TO_CPU(cap->phys_id);
+		major_rev = cap->major_rev;
+
+		switch (id) {
+		case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+			p->switch_mode = number;
+			break;
+		case I40E_DEV_FUNC_CAP_MGMT_MODE:
+			p->management_mode = number;
+			break;
+		case I40E_DEV_FUNC_CAP_NPAR:
+			p->npar_enable = number;
+			break;
+		case I40E_DEV_FUNC_CAP_OS2BMC:
+			p->os2bmc = number;
+			break;
+		case I40E_DEV_FUNC_CAP_VALID_FUNC:
+			p->valid_functions = number;
+			break;
+		case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+			if (number == 1)
+				p->sr_iov_1_1 = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_VF:
+			p->num_vfs = number;
+			p->vf_base_id = logical_id;
+			break;
+		case I40E_DEV_FUNC_CAP_VMDQ:
+			if (number == 1)
+				p->vmdq = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_802_1_QBG:
+			if (number == 1)
+				p->evb_802_1_qbg = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_802_1_QBH:
+			if (number == 1)
+				p->evb_802_1_qbh = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_VSI:
+			p->num_vsis = number;
+			break;
+		case I40E_DEV_FUNC_CAP_DCB:
+			if (number == 1) {
+				p->dcb = TRUE;
+				p->enabled_tcmap = logical_id;
+				p->maxtc = phys_id;
+			}
+			break;
+		case I40E_DEV_FUNC_CAP_FCOE:
+			if (number == 1)
+				p->fcoe = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_ISCSI:
+			if (number == 1)
+				p->iscsi = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_RSS:
+			p->rss = TRUE;
+			p->rss_table_size = number;
+			p->rss_table_entry_width = logical_id;
+			break;
+		case I40E_DEV_FUNC_CAP_RX_QUEUES:
+			p->num_rx_qp = number;
+			p->base_queue = phys_id;
+			break;
+		case I40E_DEV_FUNC_CAP_TX_QUEUES:
+			p->num_tx_qp = number;
+			p->base_queue = phys_id;
+			break;
+		case I40E_DEV_FUNC_CAP_MSIX:
+			p->num_msix_vectors = number;
+			break;
+		case I40E_DEV_FUNC_CAP_MSIX_VF:
+			p->num_msix_vectors_vf = number;
+			break;
+		case I40E_DEV_FUNC_CAP_FLEX10:
+			if (major_rev == 1) {
+				if (number == 1) {
+					p->flex10_enable = TRUE;
+					p->flex10_capable = TRUE;
+				}
+			} else {
+				/* Capability revision >= 2 */
+				if (number & 1)
+					p->flex10_enable = TRUE;
+				if (number & 2)
+					p->flex10_capable = TRUE;
+			}
+			p->flex10_mode = logical_id;
+			p->flex10_status = phys_id;
+			break;
+		case I40E_DEV_FUNC_CAP_CEM:
+			if (number == 1)
+				p->mgmt_cem = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_IWARP:
+			if (number == 1)
+				p->iwarp = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_LED:
+			if (phys_id < I40E_HW_CAP_MAX_GPIO)
+				p->led[phys_id] = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_SDP:
+			if (phys_id < I40E_HW_CAP_MAX_GPIO)
+				p->sdp[phys_id] = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_MDIO:
+			if (number == 1) {
+				p->mdio_port_num = phys_id;
+				p->mdio_port_mode = logical_id;
+			}
+			break;
+		case I40E_DEV_FUNC_CAP_IEEE_1588:
+			if (number == 1)
+				p->ieee_1588 = TRUE;
+			break;
+		case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+			p->fd = TRUE;
+			p->fd_filters_guaranteed = number;
+			p->fd_filters_best_effort = logical_id;
+			break;
+		case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+			p->wr_csr_prot = (u64)number;
+			p->wr_csr_prot |= (u64)logical_id << 32;
+			break;
+		default:
+			break;
+		}
+	}
+
+	if (p->fcoe)
+		i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
+
+	/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
+	p->fcoe = FALSE;
+
+	/* count the enabled ports (aka the "not disabled" ports) */
+	hw->num_ports = 0;
+	for (i = 0; i < 4; i++) {
+		u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i);
+		u64 port_cfg = 0;
+
+		/* use AQ read to get the physical register offset instead
+		 * of the port relative offset
+		 */
+		i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL);
+		if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK))
+			hw->num_ports++;
+	}
+
+	valid_functions = p->valid_functions;
+	num_functions = 0;
+	while (valid_functions) {
+		if (valid_functions & 1)
+			num_functions++;
+		valid_functions >>= 1;
+	}
+
+	/* partition id is 1-based, and functions are evenly spread
+	 * across the ports as partitions
+	 */
+	hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+	hw->num_partitions = num_functions / hw->num_ports;
+
+	/* additional HW specific goodies that might
+	 * someday be HW version specific
+	 */
+	p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS;
+}
+
+/**
+ * i40e_aq_discover_capabilities
+ * @hw: pointer to the hw struct
+ * @buff: a virtual buffer to hold the capabilities
+ * @buff_size: Size of the virtual buffer
+ * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM
+ * @list_type_opc: capabilities type to discover - pass in the command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get the device capabilities descriptions from the firmware
+ **/
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+				void *buff, u16 buff_size, u16 *data_size,
+				enum i40e_admin_queue_opc list_type_opc,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aqc_list_capabilites *cmd;
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status = I40E_SUCCESS;
+
+	cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
+
+	if (list_type_opc != i40e_aqc_opc_list_func_capabilities &&
+		list_type_opc != i40e_aqc_opc_list_dev_capabilities) {
+		status = I40E_ERR_PARAM;
+		goto exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, list_type_opc);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	*data_size = LE16_TO_CPU(desc.datalen);
+
+	if (status)
+		goto exit;
+
+	i40e_parse_discover_capabilities(hw, buff, LE32_TO_CPU(cmd->count),
+					 list_type_opc);
+
+exit:
+	return status;
+}
+
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_nvm_update *cmd =
+		(struct i40e_aqc_nvm_update *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	DEBUGFUNC("i40e_aq_update_nvm");
+
+	/* In offset the highest byte must be zeroed. */
+	if (offset & 0xFF000000) {
+		status = I40E_ERR_PARAM;
+		goto i40e_aq_update_nvm_exit;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+	/* If this is the last command in a series, set the proper flag. */
+	if (last_command)
+		cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+	cmd->module_pointer = module_pointer;
+	cmd->offset = CPU_TO_LE32(offset);
+	cmd->length = CPU_TO_LE16(length);
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+	return status;
+}
+
+/**
+ * i40e_aq_get_lldp_mib
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @local_len : length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet).
+ **/
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+				u8 mib_type, void *buff, u16 buff_size,
+				u16 *local_len, u16 *remote_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_get_mib *cmd =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	struct i40e_aqc_lldp_get_mib *resp =
+		(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib);
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+
+	cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK;
+	cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		       I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (local_len != NULL)
+			*local_len = LE16_TO_CPU(resp->local_len);
+		if (remote_len != NULL)
+			*remote_len = LE16_TO_CPU(resp->remote_len);
+	}
+
+	return status;
+}
+
+ /**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+				u8 mib_type, void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_set_local_mib *cmd =
+		(struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_lldp_set_local_mib);
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	cmd->type = mib_type;
+	cmd->length = CPU_TO_LE16(buff_size);
+	cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+	cmd->address_low =  CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	return status;
+}
+
+/**
+ * i40e_aq_cfg_lldp_mib_change_event
+ * @hw: pointer to the hw struct
+ * @enable_update: Enable or Disable event posting
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes
+ **/
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+				bool enable_update,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_update_mib *cmd =
+		(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
+
+	if (!enable_update)
+		cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to add
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be added
+ * @mib_len: length of the LLDP MIB returned in response
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Add the specified TLV to LLDP Local MIB for the given bridge type,
+ * it is responsibility of the caller to make sure that the TLV is not
+ * already present in the LLDPDU.
+ * In return firmware will write the complete LLDP MIB with the newly
+ * added TLV in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+				void *buff, u16 buff_size, u16 tlv_len,
+				u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_add_tlv *cmd =
+		(struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff || tlv_len == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
+
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		      I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+	cmd->len = CPU_TO_LE16(tlv_len);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (mib_len != NULL)
+			*mib_len = LE16_TO_CPU(desc.datalen);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_update_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: buffer with TLV to update
+ * @buff_size: size of the buffer holding original and updated TLVs
+ * @old_len: Length of the Original TLV
+ * @new_len: Length of the Updated TLV
+ * @offset: offset of the updated TLV in the buff
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the specified TLV to the LLDP Local MIB for the given bridge type.
+ * Firmware will place the complete LLDP MIB in response buffer with the
+ * updated TLV.
+ **/
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+				u8 bridge_type, void *buff, u16 buff_size,
+				u16 old_len, u16 new_len, u16 offset,
+				u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_update_tlv *cmd =
+		(struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff || offset == 0 ||
+	    old_len == 0 || new_len == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
+
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		      I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+	cmd->old_len = CPU_TO_LE16(old_len);
+	cmd->new_offset = CPU_TO_LE16(offset);
+	cmd->new_len = CPU_TO_LE16(new_len);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (mib_len != NULL)
+			*mib_len = LE16_TO_CPU(desc.datalen);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_delete_lldp_tlv
+ * @hw: pointer to the hw struct
+ * @bridge_type: type of bridge
+ * @buff: pointer to a user supplied buffer that has the TLV
+ * @buff_size: length of the buffer
+ * @tlv_len: length of the TLV to be deleted
+ * @mib_len: length of the returned LLDP MIB
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Delete the specified TLV from LLDP Local MIB for the given bridge type.
+ * The firmware places the entire LLDP MIB in the response buffer.
+ **/
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+				u8 bridge_type, void *buff, u16 buff_size,
+				u16 tlv_len, u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_add_tlv *cmd =
+		(struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+	desc.datalen = CPU_TO_LE16(buff_size);
+	cmd->len = CPU_TO_LE16(tlv_len);
+	cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
+		      I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (mib_len != NULL)
+			*mib_len = LE16_TO_CPU(desc.datalen);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_stop_lldp
+ * @hw: pointer to the hw struct
+ * @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent
+ **/
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_stop *cmd =
+		(struct i40e_aqc_lldp_stop *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
+
+	if (shutdown_agent)
+		cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_start_lldp
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports.
+ **/
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_start *cmd =
+		(struct i40e_aqc_lldp_start *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
+
+	cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_cee_dcb_config
+ * @hw: pointer to the hw struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @buff_size: size of the buffer passed
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware
+ **/
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size,
+				       cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the hw struct
+ * @start_agent: True if DCBx Agent needs to be Started
+ *				False if DCBx Agent needs to be Stopped
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent
+ **/
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+				bool start_agent,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_lldp_stop_start_specific_agent *cmd =
+		(struct i40e_aqc_lldp_stop_start_specific_agent *)
+				&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_lldp_stop_start_spec_agent);
+
+	if (start_agent)
+		cmd->command = I40E_AQC_START_SPECIFIC_AGENT_MASK;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @udp_port: the UDP port to add
+ * @header_len: length of the tunneling header length in DWords
+ * @protocol_index: protocol index type
+ * @filter_index: pointer to filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+				u16 udp_port, u8 protocol_index,
+				u8 *filter_index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_udp_tunnel *cmd =
+		(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
+	struct i40e_aqc_del_udp_tunnel_completion *resp =
+		(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
+
+	cmd->udp_port = CPU_TO_LE16(udp_port);
+	cmd->protocol_type = protocol_index;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && filter_index)
+		*filter_index = resp->index;
+
+	return status;
+}
+
+/**
+ * i40e_aq_del_udp_tunnel
+ * @hw: pointer to the hw struct
+ * @index: filter index
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_remove_udp_tunnel *cmd =
+		(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
+
+	cmd->index = index;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_get_switch_resource_alloc (0x0204)
+ * @hw: pointer to the hw struct
+ * @num_entries: pointer to u8 to store the number of resource entries returned
+ * @buf: pointer to a user supplied buffer.  This buffer must be large enough
+ *        to store the resource information for all resource types.  Each
+ *        resource type is a i40e_aqc_switch_resource_alloc_data structure.
+ * @count: size, in bytes, of the buffer provided
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Query the resources allocated to a function.
+ **/
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+			u8 *num_entries,
+			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+			u16 count,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_get_switch_resource_alloc *cmd_resp =
+		(struct i40e_aqc_get_switch_resource_alloc *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 length = count * sizeof(*buf);
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					i40e_aqc_opc_get_switch_resource_alloc);
+
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+	if (!status && num_entries)
+		*num_entries = cmd_resp->num_entries;
+
+	return status;
+}
+
+/**
+ * i40e_aq_delete_element - Delete switch element
+ * @hw: pointer to the hw struct
+ * @seid: the SEID to delete from the switch
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes a switch element from the switch.
+ **/
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_switch_seid *cmd =
+		(struct i40e_aqc_switch_seid *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
+
+	cmd->seid = CPU_TO_LE16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * @hw: pointer to the hw struct
+ * @flags: component flags
+ * @mac_seid: uplink seid (MAC SEID)
+ * @vsi_seid: connected vsi seid
+ * @ret_seid: seid of create pv component
+ *
+ * This instantiates an i40e port virtualizer with specified flags.
+ * Depending on specified flags the port virtualizer can act as a
+ * 802.1Qbr port virtualizer or a 802.1Qbg S-component.
+ */
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+				       u16 mac_seid, u16 vsi_seid,
+				       u16 *ret_seid)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_update_pv *cmd =
+		(struct i40e_aqc_add_update_pv *)&desc.params.raw;
+	struct i40e_aqc_add_update_pv_completion *resp =
+		(struct i40e_aqc_add_update_pv_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_pv);
+	cmd->command_flags = CPU_TO_LE16(flags);
+	cmd->uplink_seid = CPU_TO_LE16(mac_seid);
+	cmd->connected_seid = CPU_TO_LE16(vsi_seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+	if (!status && ret_seid)
+		*ret_seid = LE16_TO_CPU(resp->pv_seid);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_tag - Add an S/E-tag
+ * @hw: pointer to the hw struct
+ * @direct_to_queue: should s-tag direct flow to a specific queue
+ * @vsi_seid: VSI SEID to use this tag
+ * @tag: value of the tag
+ * @queue_num: queue number, only valid is direct_to_queue is TRUE
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates an S- or E-tag to a VSI in the switch complex.  It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+				u16 vsi_seid, u16 tag, u16 queue_num,
+				u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_tag *cmd =
+		(struct i40e_aqc_add_tag *)&desc.params.raw;
+	struct i40e_aqc_add_remove_tag_completion *resp =
+		(struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_tag);
+
+	cmd->seid = CPU_TO_LE16(vsi_seid);
+	cmd->tag = CPU_TO_LE16(tag);
+	if (direct_to_queue) {
+		cmd->flags = CPU_TO_LE16(I40E_AQC_ADD_TAG_FLAG_TO_QUEUE);
+		cmd->queue_number = CPU_TO_LE16(queue_num);
+	}
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (tags_used != NULL)
+			*tags_used = LE16_TO_CPU(resp->tags_used);
+		if (tags_free != NULL)
+			*tags_free = LE16_TO_CPU(resp->tags_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_tag - Remove an S- or E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID this tag is associated with
+ * @tag: value of the S-tag to delete
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an S- or E-tag from a VSI in the switch complex.  It returns
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+				u16 tag, u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_remove_tag *cmd =
+		(struct i40e_aqc_remove_tag *)&desc.params.raw;
+	struct i40e_aqc_add_remove_tag_completion *resp =
+		(struct i40e_aqc_add_remove_tag_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_tag);
+
+	cmd->seid = CPU_TO_LE16(vsi_seid);
+	cmd->tag = CPU_TO_LE16(tag);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (tags_used != NULL)
+			*tags_used = LE16_TO_CPU(resp->tags_used);
+		if (tags_free != NULL)
+			*tags_free = LE16_TO_CPU(resp->tags_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_mcast_etag - Add a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer of this SEID to associate E-tag with
+ * @etag: value of E-tag to add
+ * @num_tags_in_buf: number of unicast E-tags in indirect buffer
+ * @buf: address of indirect buffer
+ * @tags_used: return value, number of E-tags in use by this port
+ * @tags_free: return value, number of unallocated M-tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This associates a multicast E-tag to a port virtualizer.  It will return
+ * the number of tags allocated by the PF, and the number of unallocated
+ * tags available.
+ *
+ * The indirect buffer pointed to by buf is a list of 2-byte E-tags,
+ * num_tags_in_buf long.
+ **/
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+				u16 etag, u8 num_tags_in_buf, void *buf,
+				u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_mcast_etag *cmd =
+		(struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+	struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+	   (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 length = sizeof(u16) * num_tags_in_buf;
+
+	if ((pv_seid == 0) || (buf == NULL) || (num_tags_in_buf == 0))
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_multicast_etag);
+
+	cmd->pv_seid = CPU_TO_LE16(pv_seid);
+	cmd->etag = CPU_TO_LE16(etag);
+	cmd->num_unicast_etags = num_tags_in_buf;
+
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	if (length > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
+
+	if (!status) {
+		if (tags_used != NULL)
+			*tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+		if (tags_free != NULL)
+			*tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_mcast_etag - Remove a multicast E-tag
+ * @hw: pointer to the hw struct
+ * @pv_seid: Port Virtualizer SEID this M-tag is associated with
+ * @etag: value of the E-tag to remove
+ * @tags_used: return value, number of tags in use by this port
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This deletes an E-tag from the port virtualizer.  It will return
+ * the number of tags allocated by the port, and the number of unallocated
+ * tags available.
+ **/
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
+				u16 etag, u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_mcast_etag *cmd =
+		(struct i40e_aqc_add_remove_mcast_etag *)&desc.params.raw;
+	struct i40e_aqc_add_remove_mcast_etag_completion *resp =
+	   (struct i40e_aqc_add_remove_mcast_etag_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+
+	if (pv_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_multicast_etag);
+
+	cmd->pv_seid = CPU_TO_LE16(pv_seid);
+	cmd->etag = CPU_TO_LE16(etag);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (tags_used != NULL)
+			*tags_used = LE16_TO_CPU(resp->mcast_etags_used);
+		if (tags_free != NULL)
+			*tags_free = LE16_TO_CPU(resp->mcast_etags_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_update_tag - Update an S/E-tag
+ * @hw: pointer to the hw struct
+ * @vsi_seid: VSI SEID using this S-tag
+ * @old_tag: old tag value
+ * @new_tag: new tag value
+ * @tags_used: return value, number of tags in use by this PF
+ * @tags_free: return value, number of unallocated tags
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This updates the value of the tag currently attached to this VSI
+ * in the switch complex.  It will return the number of tags allocated
+ * by the PF, and the number of unallocated tags available.
+ **/
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+				u16 old_tag, u16 new_tag, u16 *tags_used,
+				u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_update_tag *cmd =
+		(struct i40e_aqc_update_tag *)&desc.params.raw;
+	struct i40e_aqc_update_tag_completion *resp =
+		(struct i40e_aqc_update_tag_completion *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_update_tag);
+
+	cmd->seid = CPU_TO_LE16(vsi_seid);
+	cmd->old_tag = CPU_TO_LE16(old_tag);
+	cmd->new_tag = CPU_TO_LE16(new_tag);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (tags_used != NULL)
+			*tags_used = LE16_TO_CPU(resp->tags_used);
+		if (tags_free != NULL)
+			*tags_free = LE16_TO_CPU(resp->tags_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_dcb_ignore_pfc - Ignore PFC for given TCs
+ * @hw: pointer to the hw struct
+ * @tcmap: TC map for request/release any ignore PFC condition
+ * @request: request or release ignore PFC condition
+ * @tcmap_ret: return TCs for which PFC is currently ignored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This sends out request/release to ignore PFC condition for a TC.
+ * It will return the TCs for which PFC is currently ignored.
+ **/
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap,
+				bool request, u8 *tcmap_ret,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_pfc_ignore *cmd_resp =
+		(struct i40e_aqc_pfc_ignore *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_ignore_pfc);
+
+	if (request)
+		cmd_resp->command_flags = I40E_AQC_PFC_IGNORE_SET;
+
+	cmd_resp->tc_bitmap = tcmap;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status) {
+		if (tcmap_ret != NULL)
+			*tcmap_ret = cmd_resp->tc_bitmap;
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_dcb_updated - DCB Updated Command
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * When LLDP is handled in PF this command is used by the PF
+ * to notify EMP that a DCB setting is modified.
+ * When LLDP is handled in EMP this command is used by the PF
+ * to notify EMP whenever one of the following parameters get
+ * modified:
+ *   - PFCLinkDelayAllowance in PRTDCB_GENC.PFCLDA
+ *   - PCIRTT in PRTDCB_GENC.PCIRTT
+ *   - Maximum Frame Size for non-FCoE TCs set by PRTDCB_TDPUC.MAX_TXFRAME.
+ * EMP will return when the shared RPB settings have been
+ * recomputed and modified. The retval field in the descriptor
+ * will be set to 0 when RPB is modified.
+ **/
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_statistics - Add a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * allocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+				u16 vlan_id, u16 *stat_index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_statistics *cmd_resp =
+		(struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if ((seid == 0) || (stat_index == NULL))
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_statistics);
+
+	cmd_resp->seid = CPU_TO_LE16(seid);
+	cmd_resp->vlan = CPU_TO_LE16(vlan_id);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && stat_index)
+		*stat_index = LE16_TO_CPU(cmd_resp->stat_index);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_statistics - Remove a statistics block to a VLAN in a switch.
+ * @hw: pointer to the hw struct
+ * @seid: defines the SEID of the switch for which the stats are requested
+ * @vlan_id: the VLAN ID for which the statistics are requested
+ * @stat_index: index of the statistics counters block assigned to this VLAN
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * XL710 supports 128 smonVlanStats counters.This command is used to
+ * deallocate a set of smonVlanStats counters to a specific VLAN in a specific
+ * switch.
+ **/
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+				u16 vlan_id, u16 stat_index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_statistics *cmd =
+		(struct i40e_aqc_add_remove_statistics *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (seid == 0)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_statistics);
+
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->vlan  = CPU_TO_LE16(vlan_id);
+	cmd->stat_index = CPU_TO_LE16(stat_index);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_set_port_parameters - set physical port parameters.
+ * @hw: pointer to the hw struct
+ * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
+ * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
+ * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
+ * @double_vlan: if set double VLAN is enabled
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+				u16 bad_frame_vsi, bool save_bad_pac,
+				bool pad_short_pac, bool double_vlan,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aqc_set_port_parameters *cmd;
+	enum i40e_status_code status;
+	struct i40e_aq_desc desc;
+	u16 command_flags = 0;
+
+	cmd = (struct i40e_aqc_set_port_parameters *)&desc.params.raw;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_set_port_parameters);
+
+	cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
+	if (save_bad_pac)
+		command_flags |= I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS;
+	if (pad_short_pac)
+		command_flags |= I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS;
+	if (double_vlan)
+		command_flags |= I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA;
+	cmd->command_flags = CPU_TO_LE16(command_flags);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler
+ * @hw: pointer to the hw struct
+ * @seid: seid for the physical port/switching component/vsi
+ * @buff: Indirect buffer to hold data parameters and response
+ * @buff_size: Indirect buffer size
+ * @opcode: Tx scheduler AQ command opcode
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Generic command handler for Tx scheduler AQ commands
+ **/
+static enum i40e_status_code i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+				void *buff, u16 buff_size,
+				 enum i40e_admin_queue_opc opcode,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_tx_sched_ind *cmd =
+		(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+	enum i40e_status_code status;
+	bool cmd_param_flag = FALSE;
+
+	switch (opcode) {
+	case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit:
+	case i40e_aqc_opc_configure_vsi_tc_bw:
+	case i40e_aqc_opc_enable_switching_comp_ets:
+	case i40e_aqc_opc_modify_switching_comp_ets:
+	case i40e_aqc_opc_disable_switching_comp_ets:
+	case i40e_aqc_opc_configure_switching_comp_ets_bw_limit:
+	case i40e_aqc_opc_configure_switching_comp_bw_config:
+		cmd_param_flag = TRUE;
+		break;
+	case i40e_aqc_opc_query_vsi_bw_config:
+	case i40e_aqc_opc_query_vsi_ets_sla_config:
+	case i40e_aqc_opc_query_switching_comp_ets_config:
+	case i40e_aqc_opc_query_port_ets_config:
+	case i40e_aqc_opc_query_switching_comp_bw_config:
+		cmd_param_flag = FALSE;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	i40e_fill_default_direct_cmd_desc(&desc, opcode);
+
+	/* Indirect command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (cmd_param_flag)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	cmd->vsi_seid = CPU_TO_LE16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+				u16 seid, u16 credit, u8 max_credit,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_configure_vsi_bw_limit *cmd =
+		(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_configure_vsi_bw_limit);
+
+	cmd->vsi_seid = CPU_TO_LE16(seid);
+	cmd->credit = CPU_TO_LE16(credit);
+	cmd->max_credit = max_credit;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_config_switch_comp_bw_limit - Configure Switching component BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: switching component seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_bw: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+				u16 seid, u16 credit, u8 max_bw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_configure_switching_comp_bw_limit *cmd =
+	  (struct i40e_aqc_configure_switching_comp_bw_limit *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_configure_switching_comp_bw_limit);
+
+	cmd->seid = CPU_TO_LE16(seid);
+	cmd->credit = CPU_TO_LE16(credit);
+	cmd->max_bw = max_bw;
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_config_vsi_ets_sla_bw_limit - Config VSI BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_configure_vsi_ets_sla_bw_limit,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_configure_vsi_tc_bw,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_config_switch_comp_ets_bw_limit - Config Switch comp BW Limit per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer holding enabled TCs, per TC BW limit/credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+	struct i40e_hw *hw, u16 seid,
+	struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+	struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+			    i40e_aqc_opc_configure_switching_comp_ets_bw_limit,
+			    cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_bw_config - Query VSI BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_vsi_bw_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI
+ * @bw_data: Buffer to hold VSI BW configuration per TC
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_vsi_ets_sla_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's per TC BW config
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				   i40e_aqc_opc_query_switching_comp_ets_config,
+				   cmd_details);
+}
+
+/**
+ * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the VSI or switching component connected to Physical Port
+ * @bw_data: Buffer to hold current ETS configuration for the Physical Port
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_port_ets_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_port_ets_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration
+ * @hw: pointer to the hw struct
+ * @seid: seid of the switching component
+ * @bw_data: Buffer to hold switching component's BW configuration
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details)
+{
+	return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
+				    i40e_aqc_opc_query_switching_comp_bw_config,
+				    cmd_details);
+}
+
+/**
+ * i40e_validate_filter_settings
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Check and validate the filter control settings passed.
+ * The function checks for the valid filter/context sizes being
+ * passed for FCoE and PE.
+ *
+ * Returns I40E_SUCCESS if the values passed are valid and within
+ * range else returns an error.
+ **/
+static enum i40e_status_code i40e_validate_filter_settings(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings)
+{
+	u32 fcoe_cntx_size, fcoe_filt_size;
+	u32 pe_cntx_size, pe_filt_size;
+	u32 fcoe_fmax;
+
+	u32 val;
+
+	/* Validate FCoE settings passed */
+	switch (settings->fcoe_filt_num) {
+	case I40E_HASH_FILTER_SIZE_1K:
+	case I40E_HASH_FILTER_SIZE_2K:
+	case I40E_HASH_FILTER_SIZE_4K:
+	case I40E_HASH_FILTER_SIZE_8K:
+	case I40E_HASH_FILTER_SIZE_16K:
+	case I40E_HASH_FILTER_SIZE_32K:
+		fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+		fcoe_filt_size <<= (u32)settings->fcoe_filt_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	switch (settings->fcoe_cntx_num) {
+	case I40E_DMA_CNTX_SIZE_512:
+	case I40E_DMA_CNTX_SIZE_1K:
+	case I40E_DMA_CNTX_SIZE_2K:
+	case I40E_DMA_CNTX_SIZE_4K:
+		fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+		fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	/* Validate PE settings passed */
+	switch (settings->pe_filt_num) {
+	case I40E_HASH_FILTER_SIZE_1K:
+	case I40E_HASH_FILTER_SIZE_2K:
+	case I40E_HASH_FILTER_SIZE_4K:
+	case I40E_HASH_FILTER_SIZE_8K:
+	case I40E_HASH_FILTER_SIZE_16K:
+	case I40E_HASH_FILTER_SIZE_32K:
+	case I40E_HASH_FILTER_SIZE_64K:
+	case I40E_HASH_FILTER_SIZE_128K:
+	case I40E_HASH_FILTER_SIZE_256K:
+	case I40E_HASH_FILTER_SIZE_512K:
+	case I40E_HASH_FILTER_SIZE_1M:
+		pe_filt_size = I40E_HASH_FILTER_BASE_SIZE;
+		pe_filt_size <<= (u32)settings->pe_filt_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	switch (settings->pe_cntx_num) {
+	case I40E_DMA_CNTX_SIZE_512:
+	case I40E_DMA_CNTX_SIZE_1K:
+	case I40E_DMA_CNTX_SIZE_2K:
+	case I40E_DMA_CNTX_SIZE_4K:
+	case I40E_DMA_CNTX_SIZE_8K:
+	case I40E_DMA_CNTX_SIZE_16K:
+	case I40E_DMA_CNTX_SIZE_32K:
+	case I40E_DMA_CNTX_SIZE_64K:
+	case I40E_DMA_CNTX_SIZE_128K:
+	case I40E_DMA_CNTX_SIZE_256K:
+		pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE;
+		pe_cntx_size <<= (u32)settings->pe_cntx_num;
+		break;
+	default:
+		return I40E_ERR_PARAM;
+	}
+
+	/* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */
+	val = rd32(hw, I40E_GLHMC_FCOEFMAX);
+	fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK)
+		     >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT;
+	if (fcoe_filt_size + fcoe_cntx_size >  fcoe_fmax)
+		return I40E_ERR_INVALID_SIZE;
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_filter_control
+ * @hw: pointer to the hardware structure
+ * @settings: Filter control settings
+ *
+ * Set the Queue Filters for PE/FCoE and enable filters required
+ * for a single PF. It is expected that these settings are programmed
+ * at the driver initialization time.
+ **/
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings)
+{
+	enum i40e_status_code ret = I40E_SUCCESS;
+	u32 hash_lut_size = 0;
+	u32 val;
+
+	if (!settings)
+		return I40E_ERR_PARAM;
+
+	/* Validate the input settings */
+	ret = i40e_validate_filter_settings(hw, settings);
+	if (ret)
+		return ret;
+
+	/* Read the PF Queue Filter control register */
+	val = rd32(hw, I40E_PFQF_CTL_0);
+
+	/* Program required PE hash buckets for the PF */
+	val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
+	val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PEHSIZE_MASK;
+	/* Program required PE contexts for the PF */
+	val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK;
+	val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PEDSIZE_MASK;
+
+	/* Program required FCoE hash buckets for the PF */
+	val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+	val |= ((u32)settings->fcoe_filt_num <<
+			I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PFFCHSIZE_MASK;
+	/* Program required FCoE DDP contexts for the PF */
+	val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+	val |= ((u32)settings->fcoe_cntx_num <<
+			I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_PFFCDSIZE_MASK;
+
+	/* Program Hash LUT size for the PF */
+	val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+	if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512)
+		hash_lut_size = 1;
+	val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) &
+		I40E_PFQF_CTL_0_HASHLUTSIZE_MASK;
+
+	/* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */
+	if (settings->enable_fdir)
+		val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
+	if (settings->enable_ethtype)
+		val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK;
+	if (settings->enable_macvlan)
+		val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
+
+	wr32(hw, I40E_PFQF_CTL_0, val);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter
+ * @hw: pointer to the hw struct
+ * @mac_addr: MAC address to use in the filter
+ * @ethtype: Ethertype to use in the filter
+ * @flags: Flags that needs to be applied to the filter
+ * @vsi_seid: seid of the control VSI
+ * @queue: VSI queue number to send the packet to
+ * @is_add: Add control packet filter if True else remove
+ * @stats: Structure to hold information on control filter counts
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * This command will Add or Remove control packet filter for a control VSI.
+ * In return it will update the total number of perfect filter count in
+ * the stats member.
+ **/
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+				u8 *mac_addr, u16 ethtype, u16 flags,
+				u16 vsi_seid, u16 queue, bool is_add,
+				struct i40e_control_filter_stats *stats,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_control_packet_filter *cmd =
+		(struct i40e_aqc_add_remove_control_packet_filter *)
+		&desc.params.raw;
+	struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
+		(struct i40e_aqc_add_remove_control_packet_filter_completion *)
+		&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (vsi_seid == 0)
+		return I40E_ERR_PARAM;
+
+	if (is_add) {
+		i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_add_control_packet_filter);
+		cmd->queue = CPU_TO_LE16(queue);
+	} else {
+		i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_remove_control_packet_filter);
+	}
+
+	if (mac_addr)
+		i40e_memcpy(cmd->mac, mac_addr, I40E_ETH_LENGTH_OF_ADDRESS,
+			    I40E_NONDMA_TO_NONDMA);
+
+	cmd->etype = CPU_TO_LE16(ethtype);
+	cmd->flags = CPU_TO_LE16(flags);
+	cmd->seid = CPU_TO_LE16(vsi_seid);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	if (!status && stats) {
+		stats->mac_etype_used = LE16_TO_CPU(resp->mac_etype_used);
+		stats->etype_used = LE16_TO_CPU(resp->etype_used);
+		stats->mac_etype_free = LE16_TO_CPU(resp->mac_etype_free);
+		stats->etype_free = LE16_TO_CPU(resp->etype_free);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_add_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+	u16 seid,
+	struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+	u8 filter_count)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+	u16 buff_len;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_add_cloud_filters);
+
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_remove_cloud_filters
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI.  The contents of the
+ * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * in by the caller of the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+		u8 filter_count)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_add_remove_cloud_filters *cmd =
+	(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+	enum i40e_status_code status;
+	u16 buff_len;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_remove_cloud_filters);
+
+	buff_len = filter_count * sizeof(*filters);
+	desc.datalen = CPU_TO_LE16(buff_len);
+	desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+	cmd->num_filters = filter_count;
+	cmd->seid = CPU_TO_LE16(seid);
+
+	status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_alternate_write
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+				u32 reg_addr0, u32 reg_val0,
+				u32 reg_addr1, u32 reg_val1)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_write *cmd_resp =
+		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_write);
+	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+	cmd_resp->data0 = CPU_TO_LE32(reg_val0);
+	cmd_resp->data1 = CPU_TO_LE32(reg_val1);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_alternate_write_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of a first register to be modified
+ * @dw_count: number of alternate structure fields to write
+ * @buffer: pointer to the command buffer
+ *
+ * Write 'dw_count' dwords from 'buffer' to alternate structure
+ * starting at 'addr'.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+				u32 addr, u32 dw_count, void *buffer)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_ind_write *cmd_resp =
+		(struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buffer == NULL)
+		return I40E_ERR_PARAM;
+
+	/* Indirect command */
+	i40e_fill_default_direct_cmd_desc(&desc,
+					 i40e_aqc_opc_alternate_write_indirect);
+
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+	if (dw_count > (I40E_AQ_LARGE_BUF/4))
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd_resp->address = CPU_TO_LE32(addr);
+	cmd_resp->length = CPU_TO_LE32(dw_count);
+	cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_WORD((u64)buffer));
+	cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+	status = i40e_asq_send_command(hw, &desc, buffer,
+				       I40E_LO_DWORD(4*dw_count), NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_alternate_read
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure. Fields are indicated
+ * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
+ * is not passed then only register at 'reg_addr0' is read.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+				u32 reg_addr0, u32 *reg_val0,
+				u32 reg_addr1, u32 *reg_val1)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_write *cmd_resp =
+		(struct i40e_aqc_alternate_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (reg_val0 == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read);
+	cmd_resp->address0 = CPU_TO_LE32(reg_addr0);
+	cmd_resp->address1 = CPU_TO_LE32(reg_addr1);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	if (status == I40E_SUCCESS) {
+		*reg_val0 = LE32_TO_CPU(cmd_resp->data0);
+
+		if (reg_val1 != NULL)
+			*reg_val1 = LE32_TO_CPU(cmd_resp->data1);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_aq_alternate_read_indirect
+ * @hw: pointer to the hardware structure
+ * @addr: address of the alternate structure field
+ * @dw_count: number of alternate structure fields to read
+ * @buffer: pointer to the command buffer
+ *
+ * Read 'dw_count' dwords from alternate structure starting at 'addr' and
+ * place them in 'buffer'. The buffer should be allocated by caller.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+				u32 addr, u32 dw_count, void *buffer)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_ind_write *cmd_resp =
+		(struct i40e_aqc_alternate_ind_write *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buffer == NULL)
+		return I40E_ERR_PARAM;
+
+	/* Indirect command */
+	i40e_fill_default_direct_cmd_desc(&desc,
+		i40e_aqc_opc_alternate_read_indirect);
+
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_RD);
+	desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF);
+	if (dw_count > (I40E_AQ_LARGE_BUF/4))
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd_resp->address = CPU_TO_LE32(addr);
+	cmd_resp->length = CPU_TO_LE32(dw_count);
+	cmd_resp->addr_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buffer));
+	cmd_resp->addr_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buffer));
+
+	status = i40e_asq_send_command(hw, &desc, buffer,
+				       I40E_LO_DWORD(4*dw_count), NULL);
+
+	return status;
+}
+
+/**
+ *  i40e_aq_alternate_clear
+ *  @hw: pointer to the HW structure.
+ *
+ *  Clear the alternate structures of the port from which the function
+ *  is called.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_alternate_clear_port);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
+ *  i40e_aq_alternate_write_done
+ *  @hw: pointer to the HW structure.
+ *  @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ *  @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ *  Indicates to the FW that alternate structures have been changed.
+ *
+ **/
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+		u8 bios_mode, bool *reset_needed)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_write_done *cmd =
+		(struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (reset_needed == NULL)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_alternate_write_done);
+
+	cmd->cmd_flags = CPU_TO_LE16(bios_mode);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+	if (!status && reset_needed)
+		*reset_needed = ((LE16_TO_CPU(cmd->cmd_flags) &
+				 I40E_AQ_ALTERNATE_RESET_NEEDED) != 0);
+
+	return status;
+}
+
+/**
+ *  i40e_aq_set_oem_mode
+ *  @hw: pointer to the HW structure.
+ *  @oem_mode: the OEM mode to be used
+ *
+ *  Sets the device to a specific operating mode. Currently the only supported
+ *  mode is no_clp, which causes FW to refrain from using Alternate RAM.
+ *
+ **/
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+		u8 oem_mode)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_alternate_write_done *cmd =
+		(struct i40e_aqc_alternate_write_done *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_alternate_set_mode);
+
+	cmd->cmd_flags = CPU_TO_LE16(oem_mode);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
+
+	return status;
+}
+
+/**
+ * i40e_aq_resume_port_tx
+ * @hw: pointer to the hardware structure
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Resume port's Tx traffic
+ **/
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
+
+	status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_set_pci_config_data - store PCI bus info
+ * @hw: pointer to hardware structure
+ * @link_status: the link status word from PCI config space
+ *
+ * Stores the PCI bus info (speed, width, type) within the i40e_hw structure
+ **/
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
+{
+	hw->bus.type = i40e_bus_type_pci_express;
+
+	switch (link_status & I40E_PCI_LINK_WIDTH) {
+	case I40E_PCI_LINK_WIDTH_1:
+		hw->bus.width = i40e_bus_width_pcie_x1;
+		break;
+	case I40E_PCI_LINK_WIDTH_2:
+		hw->bus.width = i40e_bus_width_pcie_x2;
+		break;
+	case I40E_PCI_LINK_WIDTH_4:
+		hw->bus.width = i40e_bus_width_pcie_x4;
+		break;
+	case I40E_PCI_LINK_WIDTH_8:
+		hw->bus.width = i40e_bus_width_pcie_x8;
+		break;
+	default:
+		hw->bus.width = i40e_bus_width_unknown;
+		break;
+	}
+
+	switch (link_status & I40E_PCI_LINK_SPEED) {
+	case I40E_PCI_LINK_SPEED_2500:
+		hw->bus.speed = i40e_bus_speed_2500;
+		break;
+	case I40E_PCI_LINK_SPEED_5000:
+		hw->bus.speed = i40e_bus_speed_5000;
+		break;
+	case I40E_PCI_LINK_SPEED_8000:
+		hw->bus.speed = i40e_bus_speed_8000;
+		break;
+	default:
+		hw->bus.speed = i40e_bus_speed_unknown;
+		break;
+	}
+}
+
+/**
+ * i40e_aq_debug_dump
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table id within cluster
+ * @start_index: index of line in the block to read
+ * @buff_size: dump buffer size
+ * @buff: dump buffer
+ * @ret_buff_size: actual buffer size returned
+ * @ret_next_table: next block to read
+ * @ret_next_index: next index to read
+ *
+ * Dump internal FW/HW data for debug purposes.
+ *
+ **/
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+				u8 table_id, u32 start_index, u16 buff_size,
+				void *buff, u16 *ret_buff_size,
+				u8 *ret_next_table, u32 *ret_next_index,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_aqc_debug_dump_internals *cmd =
+		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+	struct i40e_aqc_debug_dump_internals *resp =
+		(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
+	enum i40e_status_code status;
+
+	if (buff_size == 0 || !buff)
+		return I40E_ERR_PARAM;
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+					  i40e_aqc_opc_debug_dump_internals);
+	/* Indirect Command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	if (buff_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	cmd->cluster_id = cluster_id;
+	cmd->table_id = table_id;
+	cmd->idx = CPU_TO_LE32(start_index);
+
+	desc.datalen = CPU_TO_LE16(buff_size);
+
+	status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+	if (!status) {
+		if (ret_buff_size != NULL)
+			*ret_buff_size = LE16_TO_CPU(desc.datalen);
+		if (ret_next_table != NULL)
+			*ret_next_table = resp->table_id;
+		if (ret_next_index != NULL)
+			*ret_next_index = LE32_TO_CPU(resp->idx);
+	}
+
+	return status;
+}
+
+/**
+ * i40e_read_bw_from_alt_ram
+ * @hw: pointer to the hardware structure
+ * @max_bw: pointer for max_bw read
+ * @min_bw: pointer for min_bw read
+ * @min_valid: pointer for bool that is TRUE if min_bw is a valid value
+ * @max_valid: pointer for bool that is TRUE if max_bw is a valid value
+ *
+ * Read bw from the alternate ram for the given pf
+ **/
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+					u32 *max_bw, u32 *min_bw,
+					bool *min_valid, bool *max_valid)
+{
+	enum i40e_status_code status;
+	u32 max_bw_addr, min_bw_addr;
+
+	/* Calculate the address of the min/max bw registers */
+	max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+		      I40E_ALT_STRUCT_MAX_BW_OFFSET +
+		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+	min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
+		      I40E_ALT_STRUCT_MIN_BW_OFFSET +
+		      (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id);
+
+	/* Read the bandwidths from alt ram */
+	status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw,
+					min_bw_addr, min_bw);
+
+	if (*min_bw & I40E_ALT_BW_VALID_MASK)
+		*min_valid = TRUE;
+	else
+		*min_valid = FALSE;
+
+	if (*max_bw & I40E_ALT_BW_VALID_MASK)
+		*max_valid = TRUE;
+	else
+		*max_valid = FALSE;
+
+	return status;
+}
+
+/**
+ * i40e_aq_configure_partition_bw
+ * @hw: pointer to the hardware structure
+ * @bw_data: Buffer holding valid pfs and bw limits
+ * @cmd_details: pointer to command details
+ *
+ * Configure partitions guaranteed/max bw
+ **/
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+			struct i40e_aqc_configure_partition_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details)
+{
+	enum i40e_status_code status;
+	struct i40e_aq_desc desc;
+	u16 bwd_size = sizeof(*bw_data);
+
+	i40e_fill_default_direct_cmd_desc(&desc,
+				i40e_aqc_opc_configure_partition_bw);
+
+	/* Indirect command */
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+	if (bwd_size > I40E_AQ_LARGE_BUF)
+		desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+	desc.datalen = CPU_TO_LE16(bwd_size);
+
+	status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
+
+	return status;
+}
+
+/**
+ * i40e_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cmd_details: pointer to command details
+ *
+ * Send message to PF driver using admin queue. By default, this message
+ * is sent asynchronously, i.e. i40e_asq_send_command() does not wait for
+ * completion before returning.
+ **/
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum i40e_virtchnl_ops v_opcode,
+				enum i40e_status_code v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details)
+{
+	struct i40e_aq_desc desc;
+	struct i40e_asq_cmd_details details;
+	enum i40e_status_code status;
+
+	i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
+	desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_SI);
+	desc.cookie_high = CPU_TO_LE32(v_opcode);
+	desc.cookie_low = CPU_TO_LE32(v_retval);
+	if (msglen) {
+		desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF
+						| I40E_AQ_FLAG_RD));
+		if (msglen > I40E_AQ_LARGE_BUF)
+			desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+		desc.datalen = CPU_TO_LE16(msglen);
+	}
+	if (!cmd_details) {
+		i40e_memset(&details, 0, sizeof(details), I40E_NONDMA_MEM);
+		details.async = TRUE;
+		cmd_details = &details;
+	}
+	status = i40e_asq_send_command(hw, (struct i40e_aq_desc *)&desc, msg,
+				       msglen, cmd_details);
+	return status;
+}
+
+/**
+ * i40e_vf_parse_hw_config
+ * @hw: pointer to the hardware structure
+ * @msg: pointer to the virtual channel VF resource structure
+ *
+ * Given a VF resource message from the PF, populate the hw struct
+ * with appropriate information.
+ **/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct i40e_virtchnl_vf_resource *msg)
+{
+	struct i40e_virtchnl_vsi_resource *vsi_res;
+	int i;
+
+	vsi_res = &msg->vsi_res[0];
+
+	hw->dev_caps.num_vsis = msg->num_vsis;
+	hw->dev_caps.num_rx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_tx_qp = msg->num_queue_pairs;
+	hw->dev_caps.num_msix_vectors_vf = msg->max_vectors;
+	hw->dev_caps.dcb = msg->vf_offload_flags &
+			   I40E_VIRTCHNL_VF_OFFLOAD_L2;
+	hw->dev_caps.fcoe = (msg->vf_offload_flags &
+			     I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
+	hw->dev_caps.iwarp = (msg->vf_offload_flags &
+			      I40E_VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0;
+	for (i = 0; i < msg->num_vsis; i++) {
+		if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
+			i40e_memcpy(hw->mac.perm_addr,
+				    vsi_res->default_mac_addr,
+				    I40E_ETH_LENGTH_OF_ADDRESS,
+				    I40E_NONDMA_TO_NONDMA);
+			i40e_memcpy(hw->mac.addr, vsi_res->default_mac_addr,
+				    I40E_ETH_LENGTH_OF_ADDRESS,
+				    I40E_NONDMA_TO_NONDMA);
+		}
+		vsi_res++;
+	}
+}
+
+/**
+ * i40e_vf_reset
+ * @hw: pointer to the hardware structure
+ *
+ * Send a VF_RESET message to the PF. Does not wait for response from PF
+ * as none will be forthcoming. Immediately after calling this function,
+ * the admin queue should be shut down and (optionally) reinitialized.
+ **/
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
+{
+	return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
+				      I40E_SUCCESS, NULL, 0, NULL);
+}


Property changes on: trunk/sys/dev/ixl/i40e_common.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_devids.h
===================================================================
--- trunk/sys/dev/ixl/i40e_devids.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_devids.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,69 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_devids.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_DEVIDS_H_
+#define _I40E_DEVIDS_H_
+
+/* Vendor ID */
+#define I40E_INTEL_VENDOR_ID		0x8086
+
+/* Device IDs */
+#define I40E_DEV_ID_SFP_XL710		0x1572
+#define I40E_DEV_ID_QEMU		0x1574
+#define I40E_DEV_ID_KX_A		0x157F
+#define I40E_DEV_ID_KX_B		0x1580
+#define I40E_DEV_ID_KX_C		0x1581
+#define I40E_DEV_ID_QSFP_A		0x1583
+#define I40E_DEV_ID_QSFP_B		0x1584
+#define I40E_DEV_ID_QSFP_C		0x1585
+#define I40E_DEV_ID_10G_BASE_T		0x1586
+#define I40E_DEV_ID_20G_KR2		0x1587
+#define I40E_DEV_ID_20G_KR2_A		0x1588
+#define I40E_DEV_ID_10G_BASE_T4		0x1589
+#define I40E_DEV_ID_VF			0x154C
+#define I40E_DEV_ID_VF_HV		0x1571
+#ifdef X722_SUPPORT
+#define I40E_DEV_ID_SFP_X722		0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722	0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722	0x37D2
+#define I40E_DEV_ID_X722_VF		0x37CD
+#define I40E_DEV_ID_X722_VF_HV		0x37D9
+#endif /* X722_SUPPORT */
+
+#define i40e_is_40G_device(d)		((d) == I40E_DEV_ID_QSFP_A  || \
+					 (d) == I40E_DEV_ID_QSFP_B  || \
+					 (d) == I40E_DEV_ID_QSFP_C)
+
+#endif /* _I40E_DEVIDS_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_devids.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_hmc.c
===================================================================
--- trunk/sys/dev/ixl/i40e_hmc.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_hmc.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,374 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_hmc.c 292100 2015-12-11 13:08:38Z smh $*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_status.h"
+#include "i40e_alloc.h"
+#include "i40e_hmc.h"
+#ifndef I40E_NO_TYPE_HEADER
+#include "i40e_type.h"
+#endif
+
+/**
+ * i40e_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ **/
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 sd_index,
+					      enum i40e_sd_entry_type type,
+					      u64 direct_mode_sz)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_sd_entry *sd_entry;
+	enum   i40e_memory_type mem_type;
+	bool dma_mem_alloc_done = FALSE;
+	struct i40e_dma_mem mem;
+	u64 alloc_len;
+
+	if (NULL == hmc_info->sd_table.sd_entry) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_add_sd_table_entry: bad sd_entry\n");
+		goto exit;
+	}
+
+	if (sd_index >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_SD_INDEX;
+		DEBUGOUT("i40e_add_sd_table_entry: bad sd_index\n");
+		goto exit;
+	}
+
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+	if (!sd_entry->valid) {
+		if (I40E_SD_TYPE_PAGED == type) {
+			mem_type = i40e_mem_pd;
+			alloc_len = I40E_HMC_PAGED_BP_SIZE;
+		} else {
+			mem_type = i40e_mem_bp_jumbo;
+			alloc_len = direct_mode_sz;
+		}
+
+		/* allocate a 4K pd page or 2M backing page */
+		ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+						 I40E_HMC_PD_BP_BUF_ALIGNMENT);
+		if (ret_code)
+			goto exit;
+		dma_mem_alloc_done = TRUE;
+		if (I40E_SD_TYPE_PAGED == type) {
+			ret_code = i40e_allocate_virt_mem(hw,
+					&sd_entry->u.pd_table.pd_entry_virt_mem,
+					sizeof(struct i40e_hmc_pd_entry) * 512);
+			if (ret_code)
+				goto exit;
+			sd_entry->u.pd_table.pd_entry =
+				(struct i40e_hmc_pd_entry *)
+				sd_entry->u.pd_table.pd_entry_virt_mem.va;
+			i40e_memcpy(&sd_entry->u.pd_table.pd_page_addr,
+				    &mem, sizeof(struct i40e_dma_mem),
+				    I40E_NONDMA_TO_NONDMA);
+		} else {
+			i40e_memcpy(&sd_entry->u.bp.addr,
+				    &mem, sizeof(struct i40e_dma_mem),
+				    I40E_NONDMA_TO_NONDMA);
+			sd_entry->u.bp.sd_pd_index = sd_index;
+		}
+		/* initialize the sd entry */
+		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+		/* increment the ref count */
+		I40E_INC_SD_REFCNT(&hmc_info->sd_table);
+	}
+	/* Increment backing page reference count */
+	if (I40E_SD_TYPE_DIRECT == sd_entry->entry_type)
+		I40E_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+	if (I40E_SUCCESS != ret_code)
+		if (dma_mem_alloc_done)
+			i40e_free_dma_mem(hw, &mem);
+
+	return ret_code;
+}
+
+/**
+ * i40e_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ *	1. Initializes the pd entry
+ *	2. Adds pd_entry in the pd_table
+ *	3. Mark the entry valid in i40e_hmc_pd_entry structure
+ *	4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ *	1. The memory for pd should be pinned down, physically contiguous and
+ *	   aligned on 4K boundary and zeroed memory.
+ *	2. It should be 4K in size.
+ **/
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_pd_table *pd_table;
+	struct i40e_hmc_pd_entry *pd_entry;
+	struct i40e_dma_mem mem;
+	struct i40e_dma_mem *page = &mem;
+	u32 sd_idx, rel_pd_idx;
+	u64 *pd_addr;
+	u64 page_desc;
+
+	if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+		DEBUGOUT("i40e_add_pd_table_entry: bad pd_index\n");
+		goto exit;
+	}
+
+	/* find corresponding sd */
+	sd_idx = (pd_index / I40E_HMC_PD_CNT_IN_SD);
+	if (I40E_SD_TYPE_PAGED !=
+	    hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+		goto exit;
+
+	rel_pd_idx = (pd_index % I40E_HMC_PD_CNT_IN_SD);
+	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+	pd_entry = &pd_table->pd_entry[rel_pd_idx];
+	if (!pd_entry->valid) {
+		if (rsrc_pg) {
+			pd_entry->rsrc_pg = TRUE;
+			page = rsrc_pg;
+		} else {
+			/* allocate a 4K backing page */
+			ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+						I40E_HMC_PAGED_BP_SIZE,
+						I40E_HMC_PD_BP_BUF_ALIGNMENT);
+			if (ret_code)
+				goto exit;
+			pd_entry->rsrc_pg = FALSE;
+		}
+
+		i40e_memcpy(&pd_entry->bp.addr, page,
+			    sizeof(struct i40e_dma_mem), I40E_NONDMA_TO_NONDMA);
+		pd_entry->bp.sd_pd_index = pd_index;
+		pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
+		/* Set page address and valid bit */
+		page_desc = page->pa | 0x1;
+
+		pd_addr = (u64 *)pd_table->pd_page_addr.va;
+		pd_addr += rel_pd_idx;
+
+		/* Add the backing page physical address in the pd entry */
+		i40e_memcpy(pd_addr, &page_desc, sizeof(u64),
+			    I40E_NONDMA_TO_DMA);
+
+		pd_entry->sd_index = sd_idx;
+		pd_entry->valid = TRUE;
+		I40E_INC_PD_REFCNT(pd_table);
+	}
+	I40E_INC_BP_REFCNT(&pd_entry->bp);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ *	1. Marks the entry in pd tabe (for paged address mode) or in sd table
+ *	   (for direct address mode) invalid.
+ *	2. Write to register PMPDINV to invalidate the backing page in FV cache
+ *	3. Decrement the ref count for the pd _entry
+ * assumptions:
+ *	1. Caller can deallocate the memory used by backing storage after this
+ *	   function returns.
+ **/
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+					struct i40e_hmc_info *hmc_info,
+					u32 idx)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_pd_entry *pd_entry;
+	struct i40e_hmc_pd_table *pd_table;
+	struct i40e_hmc_sd_entry *sd_entry;
+	u32 sd_idx, rel_pd_idx;
+	u64 *pd_addr;
+
+	/* calculate index */
+	sd_idx = idx / I40E_HMC_PD_CNT_IN_SD;
+	rel_pd_idx = idx % I40E_HMC_PD_CNT_IN_SD;
+	if (sd_idx >= hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
+		DEBUGOUT("i40e_remove_pd_bp: bad idx\n");
+		goto exit;
+	}
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+	if (I40E_SD_TYPE_PAGED != sd_entry->entry_type) {
+		ret_code = I40E_ERR_INVALID_SD_TYPE;
+		DEBUGOUT("i40e_remove_pd_bp: wrong sd_entry type\n");
+		goto exit;
+	}
+	/* get the entry and decrease its ref counter */
+	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+	pd_entry = &pd_table->pd_entry[rel_pd_idx];
+	I40E_DEC_BP_REFCNT(&pd_entry->bp);
+	if (pd_entry->bp.ref_cnt)
+		goto exit;
+
+	/* mark the entry invalid */
+	pd_entry->valid = FALSE;
+	I40E_DEC_PD_REFCNT(pd_table);
+	pd_addr = (u64 *)pd_table->pd_page_addr.va;
+	pd_addr += rel_pd_idx;
+	i40e_memset(pd_addr, 0, sizeof(u64), I40E_DMA_MEM);
+	I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+
+	/* free memory here */
+	if (!pd_entry->rsrc_pg)
+		ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+	if (I40E_SUCCESS != ret_code)
+		goto exit;
+	if (!pd_table->ref_cnt)
+		i40e_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ **/
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+					     u32 idx)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	/* get the entry and decrease its ref counter */
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	I40E_DEC_BP_REFCNT(&sd_entry->u.bp);
+	if (sd_entry->u.bp.ref_cnt) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto exit;
+	}
+	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+	/* mark the entry invalid */
+	sd_entry->valid = FALSE;
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp_new - Removes a backing page from a segment descriptor
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+					    struct i40e_hmc_info *hmc_info,
+					    u32 idx, bool is_pf)
+{
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
+
+	/* get the entry and decrease its ref counter */
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+	return i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
+}
+
+/**
+ * i40e_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ **/
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+					       u32 idx)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+	if (sd_entry->u.pd_table.ref_cnt) {
+		ret_code = I40E_ERR_NOT_READY;
+		goto exit;
+	}
+
+	/* mark the entry invalid */
+	sd_entry->valid = FALSE;
+
+	I40E_DEC_SD_REFCNT(&hmc_info->sd_table);
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page_new - Removes a PD page from sd entry.
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ * @is_pf: used to distinguish between VF and PF
+ **/
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 idx, bool is_pf)
+{
+	struct i40e_hmc_sd_entry *sd_entry;
+
+	if (!is_pf)
+		return I40E_NOT_SUPPORTED;
+
+	sd_entry = &hmc_info->sd_table.sd_entry[idx];
+	I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+	return i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
+}


Property changes on: trunk/sys/dev/ixl/i40e_hmc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_hmc.h
===================================================================
--- trunk/sys/dev/ixl/i40e_hmc.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_hmc.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,247 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_hmc.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_HMC_H_
+#define _I40E_HMC_H_
+
+#define I40E_HMC_MAX_BP_COUNT 512
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+#define I40E_HMC_INFO_SIGNATURE		0x484D5347 /* HMSG */
+#define I40E_HMC_PD_CNT_IN_SD		512
+#define I40E_HMC_DIRECT_BP_SIZE		0x200000 /* 2M */
+#define I40E_HMC_PAGED_BP_SIZE		4096
+#define I40E_HMC_PD_BP_BUF_ALIGNMENT	4096
+#define I40E_FIRST_VF_FPM_ID		16
+
+struct i40e_hmc_obj_info {
+	u64 base;	/* base addr in FPM */
+	u32 max_cnt;	/* max count available for this hmc func */
+	u32 cnt;	/* count of objects driver actually wants to create */
+	u64 size;	/* size in bytes of one object */
+};
+
+enum i40e_sd_entry_type {
+	I40E_SD_TYPE_INVALID = 0,
+	I40E_SD_TYPE_PAGED   = 1,
+	I40E_SD_TYPE_DIRECT  = 2
+};
+
+struct i40e_hmc_bp {
+	enum i40e_sd_entry_type entry_type;
+	struct i40e_dma_mem addr; /* populate to be used by hw */
+	u32 sd_pd_index;
+	u32 ref_cnt;
+};
+
+struct i40e_hmc_pd_entry {
+	struct i40e_hmc_bp bp;
+	u32 sd_index;
+	bool rsrc_pg;
+	bool valid;
+};
+
+struct i40e_hmc_pd_table {
+	struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */
+	struct i40e_hmc_pd_entry  *pd_entry; /* [512] for sw book keeping */
+	struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */
+
+	u32 ref_cnt;
+	u32 sd_index;
+};
+
+struct i40e_hmc_sd_entry {
+	enum i40e_sd_entry_type entry_type;
+	bool valid;
+
+	union {
+		struct i40e_hmc_pd_table pd_table;
+		struct i40e_hmc_bp bp;
+	} u;
+};
+
+struct i40e_hmc_sd_table {
+	struct i40e_virt_mem addr; /* used to track sd_entry allocations */
+	u32 sd_cnt;
+	u32 ref_cnt;
+	struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */
+};
+
+struct i40e_hmc_info {
+	u32 signature;
+	/* equals to pci func num for PF and dynamically allocated for VFs */
+	u8 hmc_fn_id;
+	u16 first_sd_index; /* index of the first available SD */
+
+	/* hmc objects */
+	struct i40e_hmc_obj_info *hmc_obj;
+	struct i40e_virt_mem hmc_obj_virt_mem;
+	struct i40e_hmc_sd_table sd_table;
+};
+
+#define I40E_INC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt++)
+#define I40E_INC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt++)
+#define I40E_INC_BP_REFCNT(bp)		((bp)->ref_cnt++)
+
+#define I40E_DEC_SD_REFCNT(sd_table)	((sd_table)->ref_cnt--)
+#define I40E_DEC_PD_REFCNT(pd_table)	((pd_table)->ref_cnt--)
+#define I40E_DEC_BP_REFCNT(bp)		((bp)->ref_cnt--)
+
+/**
+ * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware
+ * @hw: pointer to our hw struct
+ * @pa: pointer to physical address
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type)			\
+{									\
+	u32 val1, val2, val3;						\
+	val1 = (u32)(I40E_HI_DWORD(pa));				\
+	val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT <<			\
+		 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |			\
+		BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);		\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);			\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_index: segment descriptor index
+ * @type: if sd entry is direct or paged
+ **/
+#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type)			\
+{									\
+	u32 val2, val3;							\
+	val2 = (I40E_HMC_MAX_BP_COUNT <<				\
+		I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |		\
+		((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<		\
+		I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);			\
+	val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);	\
+	wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);				\
+	wr32((hw), I40E_PFHMC_SDDATALOW, val2);				\
+	wr32((hw), I40E_PFHMC_SDCMD, val3);				\
+}
+
+/**
+ * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ **/
+#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)			\
+	wr32((hw), I40E_PFHMC_PDINV,					\
+	    (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |		\
+	     ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\
+{									\
+	u64 fpm_addr, fpm_limit;					\
+	fpm_addr = (hmc_info)->hmc_obj[(type)].base +			\
+		   (hmc_info)->hmc_obj[(type)].size * (index);		\
+	fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\
+	*(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE);		\
+	*(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(sd_limit) += 1;						\
+}
+
+/**
+ * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40e_hmc_rsrc_type.
+ **/
+#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\
+{									\
+	u64 fpm_adr, fpm_limit;						\
+	fpm_adr = (hmc_info)->hmc_obj[(type)].base +			\
+		  (hmc_info)->hmc_obj[(type)].size * (idx);		\
+	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);	\
+	*(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE);		\
+	*(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE);	\
+	/* add one more to the limit to correct our range */		\
+	*(pd_limit) += 1;						\
+}
+enum i40e_status_code i40e_add_sd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 sd_index,
+					      enum i40e_sd_entry_type type,
+					      u64 direct_mode_sz);
+
+enum i40e_status_code i40e_add_pd_table_entry(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 pd_index,
+					      struct i40e_dma_mem *rsrc_pg);
+enum i40e_status_code i40e_remove_pd_bp(struct i40e_hw *hw,
+					struct i40e_hmc_info *hmc_info,
+					u32 idx);
+enum i40e_status_code i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+					     u32 idx);
+enum i40e_status_code i40e_remove_sd_bp_new(struct i40e_hw *hw,
+					    struct i40e_hmc_info *hmc_info,
+					    u32 idx, bool is_pf);
+enum i40e_status_code i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+					       u32 idx);
+enum i40e_status_code i40e_remove_pd_page_new(struct i40e_hw *hw,
+					      struct i40e_hmc_info *hmc_info,
+					      u32 idx, bool is_pf);
+
+#endif /* _I40E_HMC_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_hmc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_lan_hmc.c
===================================================================
--- trunk/sys/dev/ixl/i40e_lan_hmc.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_lan_hmc.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,1413 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_lan_hmc.c 292100 2015-12-11 13:08:38Z smh $*/
+
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_type.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_prototype.h"
+
+/* lan specific interface functions */
+
+/**
+ * i40e_align_l2obj_base - aligns base object pointer to 512 bytes
+ * @offset: base address offset needing alignment
+ *
+ * Aligns the layer 2 function private memory so it's 512-byte aligned.
+ **/
+static u64 i40e_align_l2obj_base(u64 offset)
+{
+	u64 aligned_offset = offset;
+
+	if ((offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT) > 0)
+		aligned_offset += (I40E_HMC_L2OBJ_BASE_ALIGNMENT -
+				   (offset % I40E_HMC_L2OBJ_BASE_ALIGNMENT));
+
+	return aligned_offset;
+}
+
+/**
+ * i40e_calculate_l2fpm_size - calculates layer 2 FPM memory size
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * Calculates the maximum amount of memory for the function required, based
+ * on the number of resources it must provide context for.
+ **/
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+			      u32 fcoe_cntx_num, u32 fcoe_filt_num)
+{
+	u64 fpm_size = 0;
+
+	fpm_size = txq_num * I40E_HMC_OBJ_SIZE_TXQ;
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (rxq_num * I40E_HMC_OBJ_SIZE_RXQ);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (fcoe_cntx_num * I40E_HMC_OBJ_SIZE_FCOE_CNTX);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	fpm_size += (fcoe_filt_num * I40E_HMC_OBJ_SIZE_FCOE_FILT);
+	fpm_size = i40e_align_l2obj_base(fpm_size);
+
+	return fpm_size;
+}
+
+/**
+ * i40e_init_lan_hmc - initialize i40e_hmc_info struct
+ * @hw: pointer to the HW structure
+ * @txq_num: number of Tx queues needing backing context
+ * @rxq_num: number of Rx queues needing backing context
+ * @fcoe_cntx_num: amount of FCoE statefull contexts needing backing context
+ * @fcoe_filt_num: number of FCoE filters needing backing context
+ *
+ * This function will be called once per physical function initialization.
+ * It will fill out the i40e_hmc_obj_info structure for LAN objects based on
+ * the driver's provided input, as well as information from the HMC itself
+ * loaded from NVRAM.
+ *
+ * Assumptions:
+ *   - HMC Resource Profile has been selected before calling this function.
+ **/
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+					u32 rxq_num, u32 fcoe_cntx_num,
+					u32 fcoe_filt_num)
+{
+	struct i40e_hmc_obj_info *obj, *full_obj;
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u64 l2fpm_size;
+	u32 size_exp;
+
+	hw->hmc.signature = I40E_HMC_INFO_SIGNATURE;
+	hw->hmc.hmc_fn_id = hw->pf_id;
+
+	/* allocate memory for hmc_obj */
+	ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem,
+			sizeof(struct i40e_hmc_obj_info) * I40E_HMC_LAN_MAX);
+	if (ret_code)
+		goto init_lan_hmc_out;
+	hw->hmc.hmc_obj = (struct i40e_hmc_obj_info *)
+			  hw->hmc.hmc_obj_virt_mem.va;
+
+	/* The full object will be used to create the LAN HMC SD */
+	full_obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_FULL];
+	full_obj->max_cnt = 0;
+	full_obj->cnt = 0;
+	full_obj->base = 0;
+	full_obj->size = 0;
+
+	/* Tx queue context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+	obj->cnt = txq_num;
+	obj->base = 0;
+	size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
+	obj->size = BIT_ULL(size_exp);
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (txq_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  txq_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* Rx queue context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_LANQMAX);
+	obj->cnt = rxq_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_TX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_LAN_TX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
+	obj->size = BIT_ULL(size_exp);
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (rxq_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  rxq_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* FCoE context information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEMAX);
+	obj->cnt = fcoe_cntx_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_LAN_RX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_LAN_RX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
+	obj->size = BIT_ULL(size_exp);
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (fcoe_cntx_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  fcoe_cntx_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	/* FCoE filter information */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+	obj->max_cnt = rd32(hw, I40E_GLHMC_FCOEFMAX);
+	obj->cnt = fcoe_filt_num;
+	obj->base = hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].base +
+		    (hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].cnt *
+		     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
+	obj->base = i40e_align_l2obj_base(obj->base);
+	size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
+	obj->size = BIT_ULL(size_exp);
+
+	/* validate values requested by driver don't exceed HMC capacity */
+	if (fcoe_filt_num > obj->max_cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
+			  fcoe_filt_num, obj->max_cnt, ret_code);
+		goto init_lan_hmc_out;
+	}
+
+	/* aggregate values into the full LAN object for later */
+	full_obj->max_cnt += obj->max_cnt;
+	full_obj->cnt += obj->cnt;
+
+	hw->hmc.first_sd_index = 0;
+	hw->hmc.sd_table.ref_cnt = 0;
+	l2fpm_size = i40e_calculate_l2fpm_size(txq_num, rxq_num, fcoe_cntx_num,
+					       fcoe_filt_num);
+	if (NULL == hw->hmc.sd_table.sd_entry) {
+		hw->hmc.sd_table.sd_cnt = (u32)
+				   (l2fpm_size + I40E_HMC_DIRECT_BP_SIZE - 1) /
+				   I40E_HMC_DIRECT_BP_SIZE;
+
+		/* allocate the sd_entry members in the sd_table */
+		ret_code = i40e_allocate_virt_mem(hw, &hw->hmc.sd_table.addr,
+					  (sizeof(struct i40e_hmc_sd_entry) *
+					  hw->hmc.sd_table.sd_cnt));
+		if (ret_code)
+			goto init_lan_hmc_out;
+		hw->hmc.sd_table.sd_entry =
+			(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
+	}
+	/* store in the LAN full object for later */
+	full_obj->size = l2fpm_size;
+
+init_lan_hmc_out:
+	return ret_code;
+}
+
+/**
+ * i40e_remove_pd_page - Remove a page from the page descriptor table
+ * @hw: pointer to the HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ *
+ * This function:
+ *	1. Marks the entry in pd table (for paged address mode) invalid
+ *	2. write to register PMPDINV to invalidate the backing page in FV cache
+ *	3. Decrement the ref count for  pd_entry
+ * assumptions:
+ *	1. caller can deallocate the memory used by pd after this function
+ *	   returns.
+ **/
+static enum i40e_status_code i40e_remove_pd_page(struct i40e_hw *hw,
+						 struct i40e_hmc_info *hmc_info,
+						 u32 idx)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (i40e_prep_remove_pd_page(hmc_info, idx) == I40E_SUCCESS)
+		ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, TRUE);
+
+	return ret_code;
+}
+
+/**
+ * i40e_remove_sd_bp - remove a backing page from a segment descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ *	1. Marks the entry in sd table (for direct address mode) invalid
+ *	2. write to register PMSDCMD, PMSDDATALOW(PMSDDATALOW.PMSDVALID set
+ *	   to 0) and PMSDDATAHIGH to invalidate the sd page
+ *	3. Decrement the ref count for the sd_entry
+ * assumptions:
+ *	1. caller can deallocate the memory used by backing storage after this
+ *	   function returns.
+ **/
+static enum i40e_status_code i40e_remove_sd_bp(struct i40e_hw *hw,
+					       struct i40e_hmc_info *hmc_info,
+					       u32 idx)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	if (i40e_prep_remove_sd_bp(hmc_info, idx) == I40E_SUCCESS)
+		ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, TRUE);
+
+	return ret_code;
+}
+
+/**
+ * i40e_create_lan_hmc_object - allocate backing store for hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ **/
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_create_obj_info *info)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_sd_entry *sd_entry;
+	u32 pd_idx1 = 0, pd_lmt1 = 0;
+	u32 pd_idx = 0, pd_lmt = 0;
+	bool pd_error = FALSE;
+	u32 sd_idx, sd_lmt;
+	u64 sd_size;
+	u32 i, j;
+
+	if (NULL == info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_create_lan_hmc_object: bad info ptr\n");
+		goto exit;
+	}
+	if (NULL == info->hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_create_lan_hmc_object: bad hmc_info ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_create_lan_hmc_object: bad signature\n");
+		goto exit;
+	}
+
+	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+	if ((info->start_idx + info->count) >
+	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT1("i40e_create_lan_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count,
+				 &sd_idx, &sd_lmt);
+	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+			ret_code = I40E_ERR_INVALID_SD_INDEX;
+			goto exit;
+	}
+	/* find pd index */
+	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count, &pd_idx,
+				 &pd_lmt);
+
+	/* This is to cover for cases where you may not want to have an SD with
+	 * the full 2M memory but something smaller. By not filling out any
+	 * size, the function will default the SD size to be 2M.
+	 */
+	if (info->direct_mode_sz == 0)
+		sd_size = I40E_HMC_DIRECT_BP_SIZE;
+	else
+		sd_size = info->direct_mode_sz;
+
+	/* check if all the sds are valid. If not, allocate a page and
+	 * initialize it.
+	 */
+	for (j = sd_idx; j < sd_lmt; j++) {
+		/* update the sd table entry */
+		ret_code = i40e_add_sd_table_entry(hw, info->hmc_info, j,
+						   info->entry_type,
+						   sd_size);
+		if (I40E_SUCCESS != ret_code)
+			goto exit_sd_error;
+		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+		if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+			/* check if all the pds in this sd are valid. If not,
+			 * allocate a page and initialize it.
+			 */
+
+			/* find pd_idx and pd_lmt in this sd */
+			pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT));
+			pd_lmt1 = min(pd_lmt,
+				      ((j + 1) * I40E_HMC_MAX_BP_COUNT));
+			for (i = pd_idx1; i < pd_lmt1; i++) {
+				/* update the pd table entry */
+				ret_code = i40e_add_pd_table_entry(hw,
+								info->hmc_info,
+								i, NULL);
+				if (I40E_SUCCESS != ret_code) {
+					pd_error = TRUE;
+					break;
+				}
+			}
+			if (pd_error) {
+				/* remove the backing pages from pd_idx1 to i */
+				while (i && (i > pd_idx1)) {
+					i40e_remove_pd_bp(hw, info->hmc_info,
+							  (i - 1));
+					i--;
+				}
+			}
+		}
+		if (!sd_entry->valid) {
+			sd_entry->valid = TRUE;
+			switch (sd_entry->entry_type) {
+			case I40E_SD_TYPE_PAGED:
+				I40E_SET_PF_SD_ENTRY(hw,
+					sd_entry->u.pd_table.pd_page_addr.pa,
+					j, sd_entry->entry_type);
+				break;
+			case I40E_SD_TYPE_DIRECT:
+				I40E_SET_PF_SD_ENTRY(hw, sd_entry->u.bp.addr.pa,
+						     j, sd_entry->entry_type);
+				break;
+			default:
+				ret_code = I40E_ERR_INVALID_SD_TYPE;
+				goto exit;
+			}
+		}
+	}
+	goto exit;
+
+exit_sd_error:
+	/* cleanup for sd entries from j to sd_idx */
+	while (j && (j > sd_idx)) {
+		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+		switch (sd_entry->entry_type) {
+		case I40E_SD_TYPE_PAGED:
+			pd_idx1 = max(pd_idx,
+				      ((j - 1) * I40E_HMC_MAX_BP_COUNT));
+			pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
+			for (i = pd_idx1; i < pd_lmt1; i++)
+				i40e_remove_pd_bp(hw, info->hmc_info, i);
+			i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
+			break;
+		case I40E_SD_TYPE_DIRECT:
+			i40e_remove_sd_bp(hw, info->hmc_info, (j - 1));
+			break;
+		default:
+			ret_code = I40E_ERR_INVALID_SD_TYPE;
+			break;
+		}
+		j--;
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_configure_lan_hmc - prepare the HMC backing store
+ * @hw: pointer to the hw structure
+ * @model: the model for the layout of the SD/PD tables
+ *
+ * - This function will be called once per physical function initialization.
+ * - This function will be called after i40e_init_lan_hmc() and before
+ *   any LAN/FCoE HMC objects can be created.
+ **/
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+					     enum i40e_hmc_model model)
+{
+	struct i40e_hmc_lan_create_obj_info info;
+	u8 hmc_fn_id = hw->hmc.hmc_fn_id;
+	struct i40e_hmc_obj_info *obj;
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+
+	/* Initialize part of the create object info struct */
+	info.hmc_info = &hw->hmc;
+	info.rsrc_type = I40E_HMC_LAN_FULL;
+	info.start_idx = 0;
+	info.direct_mode_sz = hw->hmc.hmc_obj[I40E_HMC_LAN_FULL].size;
+
+	/* Build the SD entry for the LAN objects */
+	switch (model) {
+	case I40E_HMC_MODEL_DIRECT_PREFERRED:
+	case I40E_HMC_MODEL_DIRECT_ONLY:
+		info.entry_type = I40E_SD_TYPE_DIRECT;
+		/* Make one big object, a single SD */
+		info.count = 1;
+		ret_code = i40e_create_lan_hmc_object(hw, &info);
+		if ((ret_code != I40E_SUCCESS) && (model == I40E_HMC_MODEL_DIRECT_PREFERRED))
+			goto try_type_paged;
+		else if (ret_code != I40E_SUCCESS)
+			goto configure_lan_hmc_out;
+		/* else clause falls through the break */
+		break;
+	case I40E_HMC_MODEL_PAGED_ONLY:
+try_type_paged:
+		info.entry_type = I40E_SD_TYPE_PAGED;
+		/* Make one big object in the PD table */
+		info.count = 1;
+		ret_code = i40e_create_lan_hmc_object(hw, &info);
+		if (ret_code != I40E_SUCCESS)
+			goto configure_lan_hmc_out;
+		break;
+	default:
+		/* unsupported type */
+		ret_code = I40E_ERR_INVALID_SD_TYPE;
+		DEBUGOUT1("i40e_configure_lan_hmc: Unknown SD type: %d\n",
+			  ret_code);
+		goto configure_lan_hmc_out;
+	}
+
+	/* Configure and program the FPM registers so objects can be created */
+
+	/* Tx contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_TX];
+	wr32(hw, I40E_GLHMC_LANTXBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_LANTXCNT(hmc_fn_id), obj->cnt);
+
+	/* Rx contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_LAN_RX];
+	wr32(hw, I40E_GLHMC_LANRXBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_LANRXCNT(hmc_fn_id), obj->cnt);
+
+	/* FCoE contexts */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX];
+	wr32(hw, I40E_GLHMC_FCOEDDPBASE(hmc_fn_id),
+	 (u32)((obj->base & I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_FCOEDDPCNT(hmc_fn_id), obj->cnt);
+
+	/* FCoE filters */
+	obj = &hw->hmc.hmc_obj[I40E_HMC_FCOE_FILT];
+	wr32(hw, I40E_GLHMC_FCOEFBASE(hmc_fn_id),
+	     (u32)((obj->base & I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK) / 512));
+	wr32(hw, I40E_GLHMC_FCOEFCNT(hmc_fn_id), obj->cnt);
+
+configure_lan_hmc_out:
+	return ret_code;
+}
+
+/**
+ * i40e_delete_hmc_object - remove hmc objects
+ * @hw: pointer to the HW structure
+ * @info: pointer to i40e_hmc_delete_obj_info struct
+ *
+ * This will de-populate the SDs and PDs.  It frees
+ * the memory for PDS and backing storage.  After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ **/
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_delete_obj_info *info)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_hmc_pd_table *pd_table;
+	u32 pd_idx, pd_lmt, rel_pd_idx;
+	u32 sd_idx, sd_lmt;
+	u32 i, j;
+
+	if (NULL == info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_delete_hmc_object: bad info ptr\n");
+		goto exit;
+	}
+	if (NULL == info->hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_delete_hmc_object: bad info->hmc_info ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != info->hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->signature\n");
+		goto exit;
+	}
+
+	if (NULL == info->hmc_info->sd_table.sd_entry) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_delete_hmc_object: bad sd_entry\n");
+		goto exit;
+	}
+
+	if (NULL == info->hmc_info->hmc_obj) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_delete_hmc_object: bad hmc_info->hmc_obj\n");
+		goto exit;
+	}
+	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	if ((info->start_idx + info->count) >
+	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
+		DEBUGOUT1("i40e_delete_hmc_object: returns error %d\n",
+			  ret_code);
+		goto exit;
+	}
+
+	I40E_FIND_PD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count, &pd_idx,
+				 &pd_lmt);
+
+	for (j = pd_idx; j < pd_lmt; j++) {
+		sd_idx = j / I40E_HMC_PD_CNT_IN_SD;
+
+		if (I40E_SD_TYPE_PAGED !=
+		    info->hmc_info->sd_table.sd_entry[sd_idx].entry_type)
+			continue;
+
+		rel_pd_idx = j % I40E_HMC_PD_CNT_IN_SD;
+
+		pd_table =
+			&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+		if (pd_table->pd_entry[rel_pd_idx].valid) {
+			ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
+			if (I40E_SUCCESS != ret_code)
+				goto exit;
+		}
+	}
+
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(info->hmc_info, info->rsrc_type,
+				 info->start_idx, info->count,
+				 &sd_idx, &sd_lmt);
+	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+		ret_code = I40E_ERR_INVALID_SD_INDEX;
+		goto exit;
+	}
+
+	for (i = sd_idx; i < sd_lmt; i++) {
+		if (!info->hmc_info->sd_table.sd_entry[i].valid)
+			continue;
+		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+		case I40E_SD_TYPE_DIRECT:
+			ret_code = i40e_remove_sd_bp(hw, info->hmc_info, i);
+			if (I40E_SUCCESS != ret_code)
+				goto exit;
+			break;
+		case I40E_SD_TYPE_PAGED:
+			ret_code = i40e_remove_pd_page(hw, info->hmc_info, i);
+			if (I40E_SUCCESS != ret_code)
+				goto exit;
+			break;
+		default:
+			break;
+		}
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_shutdown_lan_hmc - Remove HMC backing store, free allocated memory
+ * @hw: pointer to the hw structure
+ *
+ * This must be called by drivers as they are shutting down and being
+ * removed from the OS.
+ **/
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+{
+	struct i40e_hmc_lan_delete_obj_info info;
+	enum i40e_status_code ret_code;
+
+	info.hmc_info = &hw->hmc;
+	info.rsrc_type = I40E_HMC_LAN_FULL;
+	info.start_idx = 0;
+	info.count = 1;
+
+	/* delete the object */
+	ret_code = i40e_delete_lan_hmc_object(hw, &info);
+
+	/* free the SD table entry for LAN */
+	i40e_free_virt_mem(hw, &hw->hmc.sd_table.addr);
+	hw->hmc.sd_table.sd_cnt = 0;
+	hw->hmc.sd_table.sd_entry = NULL;
+
+	/* free memory used for hmc_obj */
+	i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+	hw->hmc.hmc_obj = NULL;
+
+	return ret_code;
+}
+
+#define I40E_HMC_STORE(_struct, _ele)		\
+	offsetof(struct _struct, _ele),		\
+	FIELD_SIZEOF(struct _struct, _ele)
+
+struct i40e_context_ele {
+	u16 offset;
+	u16 size_of;
+	u16 width;
+	u16 lsb;
+};
+
+/* LAN Tx Queue Context */
+static struct i40e_context_ele i40e_hmc_txq_ce_info[] = {
+					     /* Field      Width    LSB */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head),           13,      0 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, new_context),     1,     30 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, base),           57,     32 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, fc_ena),          1,     89 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, timesync_ena),    1,     90 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, fd_ena),          1,     91 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, alt_vlan_ena),    1,     92 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, cpuid),           8,     96 },
+/* line 1 */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, thead_wb),       13,  0 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_ena),     1, 32 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, qlen),           13, 33 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrdesc_ena),    1, 46 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphrpacket_ena),  1, 47 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, tphwdesc_ena),    1, 48 + 128 },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, head_wb_addr),   64, 64 + 128 },
+/* line 7 */
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, crc),            32,  0 + (7 * 128) },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist),        10, 84 + (7 * 128) },
+	{I40E_HMC_STORE(i40e_hmc_obj_txq, rdylist_act),     1, 94 + (7 * 128) },
+	{ 0 }
+};
+
+/* LAN Rx Queue Context */
+static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
+					 /* Field      Width    LSB */
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, head),        13,	0   },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, cpuid),        8,	13  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, base),        57,	32  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, qlen),        13,	89  },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dbuff),        7,	102 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hbuff),        5,	109 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dtype),        2,	114 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, dsize),        1,	116 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, crcstrip),     1,	117 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, fc_ena),       1,	118 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, l2tsel),       1,	119 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_0),     4,	120 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, hsplit_1),     2,	124 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, showiv),       1,	127 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, rxmax),       14,	174 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphrdesc_ena), 1,	193 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphwdesc_ena), 1,	194 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena),  1,	195 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena),  1,	196 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh),   3,	198 },
+	{ I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena),      1,	201 },
+	{ 0 }
+};
+
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+			    struct i40e_context_ele *ce_info,
+			    u8 *src)
+{
+	u8 src_byte, dest_byte, mask;
+	u8 *from, *dest;
+	u16 shift_width;
+
+	/* copy from the next struct field */
+	from = src + ce_info->offset;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+	mask = BIT(ce_info->width) - 1;
+
+	src_byte = *from;
+	src_byte &= mask;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+	src_byte <<= shift_width;
+
+	/* get the current bits from the target bit string */
+	dest = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&dest_byte, dest, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+	dest_byte &= ~mask;	/* get the bits not changing */
+	dest_byte |= src_byte;	/* add in the new bits */
+
+	/* put it all back */
+	i40e_memcpy(dest, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+			    struct i40e_context_ele *ce_info,
+			    u8 *src)
+{
+	u16 src_word, mask;
+	u8 *from, *dest;
+	u16 shift_width;
+	__le16 dest_word;
+
+	/* copy from the next struct field */
+	from = src + ce_info->offset;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+	mask = BIT(ce_info->width) - 1;
+
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_word = *(u16 *)from;
+	src_word &= mask;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+	src_word <<= shift_width;
+
+	/* get the current bits from the target bit string */
+	dest = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&dest_word, dest, sizeof(dest_word), I40E_DMA_TO_NONDMA);
+
+	dest_word &= ~(CPU_TO_LE16(mask));	/* get the bits not changing */
+	dest_word |= CPU_TO_LE16(src_word);	/* add in the new bits */
+
+	/* put it all back */
+	i40e_memcpy(dest, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+			     struct i40e_context_ele *ce_info,
+			     u8 *src)
+{
+	u32 src_dword, mask;
+	u8 *from, *dest;
+	u16 shift_width;
+	__le32 dest_dword;
+
+	/* copy from the next struct field */
+	from = src + ce_info->offset;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+
+	/* if the field width is exactly 32 on an x86 machine, then the shift
+	 * operation will not work because the SHL instructions count is masked
+	 * to 5 bits so the shift will do nothing
+	 */
+	if (ce_info->width < 32)
+		mask = BIT(ce_info->width) - 1;
+	else
+		mask = ~(u32)0;
+
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_dword = *(u32 *)from;
+	src_dword &= mask;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+	src_dword <<= shift_width;
+
+	/* get the current bits from the target bit string */
+	dest = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&dest_dword, dest, sizeof(dest_dword), I40E_DMA_TO_NONDMA);
+
+	dest_dword &= ~(CPU_TO_LE32(mask));	/* get the bits not changing */
+	dest_dword |= CPU_TO_LE32(src_dword);	/* add in the new bits */
+
+	/* put it all back */
+	i40e_memcpy(dest, &dest_dword, sizeof(dest_dword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+			     struct i40e_context_ele *ce_info,
+			     u8 *src)
+{
+	u64 src_qword, mask;
+	u8 *from, *dest;
+	u16 shift_width;
+	__le64 dest_qword;
+
+	/* copy from the next struct field */
+	from = src + ce_info->offset;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+
+	/* if the field width is exactly 64 on an x86 machine, then the shift
+	 * operation will not work because the SHL instructions count is masked
+	 * to 6 bits so the shift will do nothing
+	 */
+	if (ce_info->width < 64)
+		mask = BIT_ULL(ce_info->width) - 1;
+	else
+		mask = ~(u64)0;
+
+	/* don't swizzle the bits until after the mask because the mask bits
+	 * will be in a different bit position on big endian machines
+	 */
+	src_qword = *(u64 *)from;
+	src_qword &= mask;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+	src_qword <<= shift_width;
+
+	/* get the current bits from the target bit string */
+	dest = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&dest_qword, dest, sizeof(dest_qword), I40E_DMA_TO_NONDMA);
+
+	dest_qword &= ~(CPU_TO_LE64(mask));	/* get the bits not changing */
+	dest_qword |= CPU_TO_LE64(src_qword);	/* add in the new bits */
+
+	/* put it all back */
+	i40e_memcpy(dest, &dest_qword, sizeof(dest_qword), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_byte - read HMC context byte into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_byte(u8 *hmc_bits,
+			   struct i40e_context_ele *ce_info,
+			   u8 *dest)
+{
+	u8 dest_byte, mask;
+	u8 *src, *target;
+	u16 shift_width;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+	mask = BIT(ce_info->width) - 1;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+
+	/* get the current bits from the src bit string */
+	src = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&dest_byte, src, sizeof(dest_byte), I40E_DMA_TO_NONDMA);
+
+	dest_byte &= ~(mask);
+
+	dest_byte >>= shift_width;
+
+	/* get the address from the struct field */
+	target = dest + ce_info->offset;
+
+	/* put it back in the struct */
+	i40e_memcpy(target, &dest_byte, sizeof(dest_byte), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_word - read HMC context word into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_word(u8 *hmc_bits,
+			   struct i40e_context_ele *ce_info,
+			   u8 *dest)
+{
+	u16 dest_word, mask;
+	u8 *src, *target;
+	u16 shift_width;
+	__le16 src_word;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+	mask = BIT(ce_info->width) - 1;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+
+	/* get the current bits from the src bit string */
+	src = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&src_word, src, sizeof(src_word), I40E_DMA_TO_NONDMA);
+
+	/* the data in the memory is stored as little endian so mask it
+	 * correctly
+	 */
+	src_word &= ~(CPU_TO_LE16(mask));
+
+	/* get the data back into host order before shifting */
+	dest_word = LE16_TO_CPU(src_word);
+
+	dest_word >>= shift_width;
+
+	/* get the address from the struct field */
+	target = dest + ce_info->offset;
+
+	/* put it back in the struct */
+	i40e_memcpy(target, &dest_word, sizeof(dest_word), I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_dword - read HMC context dword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_dword(u8 *hmc_bits,
+			    struct i40e_context_ele *ce_info,
+			    u8 *dest)
+{
+	u32 dest_dword, mask;
+	u8 *src, *target;
+	u16 shift_width;
+	__le32 src_dword;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+
+	/* if the field width is exactly 32 on an x86 machine, then the shift
+	 * operation will not work because the SHL instructions count is masked
+	 * to 5 bits so the shift will do nothing
+	 */
+	if (ce_info->width < 32)
+		mask = BIT(ce_info->width) - 1;
+	else
+		mask = ~(u32)0;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+
+	/* get the current bits from the src bit string */
+	src = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&src_dword, src, sizeof(src_dword), I40E_DMA_TO_NONDMA);
+
+	/* the data in the memory is stored as little endian so mask it
+	 * correctly
+	 */
+	src_dword &= ~(CPU_TO_LE32(mask));
+
+	/* get the data back into host order before shifting */
+	dest_dword = LE32_TO_CPU(src_dword);
+
+	dest_dword >>= shift_width;
+
+	/* get the address from the struct field */
+	target = dest + ce_info->offset;
+
+	/* put it back in the struct */
+	i40e_memcpy(target, &dest_dword, sizeof(dest_dword),
+		    I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_read_qword - read HMC context qword into struct
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static void i40e_read_qword(u8 *hmc_bits,
+			    struct i40e_context_ele *ce_info,
+			    u8 *dest)
+{
+	u64 dest_qword, mask;
+	u8 *src, *target;
+	u16 shift_width;
+	__le64 src_qword;
+
+	/* prepare the bits and mask */
+	shift_width = ce_info->lsb % 8;
+
+	/* if the field width is exactly 64 on an x86 machine, then the shift
+	 * operation will not work because the SHL instructions count is masked
+	 * to 6 bits so the shift will do nothing
+	 */
+	if (ce_info->width < 64)
+		mask = BIT_ULL(ce_info->width) - 1;
+	else
+		mask = ~(u64)0;
+
+	/* shift to correct alignment */
+	mask <<= shift_width;
+
+	/* get the current bits from the src bit string */
+	src = hmc_bits + (ce_info->lsb / 8);
+
+	i40e_memcpy(&src_qword, src, sizeof(src_qword), I40E_DMA_TO_NONDMA);
+
+	/* the data in the memory is stored as little endian so mask it
+	 * correctly
+	 */
+	src_qword &= ~(CPU_TO_LE64(mask));
+
+	/* get the data back into host order before shifting */
+	dest_qword = LE64_TO_CPU(src_qword);
+
+	dest_qword >>= shift_width;
+
+	/* get the address from the struct field */
+	target = dest + ce_info->offset;
+
+	/* put it back in the struct */
+	i40e_memcpy(target, &dest_qword, sizeof(dest_qword),
+		    I40E_NONDMA_TO_DMA);
+}
+
+/**
+ * i40e_get_hmc_context - extract HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info: a description of the struct to be filled
+ * @dest: the struct to be filled
+ **/
+static enum i40e_status_code i40e_get_hmc_context(u8 *context_bytes,
+					struct i40e_context_ele *ce_info,
+					u8 *dest)
+{
+	int f;
+
+	for (f = 0; ce_info[f].width != 0; f++) {
+		switch (ce_info[f].size_of) {
+		case 1:
+			i40e_read_byte(context_bytes, &ce_info[f], dest);
+			break;
+		case 2:
+			i40e_read_word(context_bytes, &ce_info[f], dest);
+			break;
+		case 4:
+			i40e_read_dword(context_bytes, &ce_info[f], dest);
+			break;
+		case 8:
+			i40e_read_qword(context_bytes, &ce_info[f], dest);
+			break;
+		default:
+			/* nothing to do, just keep going */
+			break;
+		}
+	}
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_clear_hmc_context - zero out the HMC context bits
+ * @hw:       the hardware struct
+ * @context_bytes: pointer to the context bit array (DMA memory)
+ * @hmc_type: the type of HMC resource
+ **/
+static enum i40e_status_code i40e_clear_hmc_context(struct i40e_hw *hw,
+					u8 *context_bytes,
+					enum i40e_hmc_lan_rsrc_type hmc_type)
+{
+	/* clean the bit array */
+	i40e_memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size,
+		    I40E_DMA_MEM);
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_set_hmc_context - replace HMC context bits
+ * @context_bytes: pointer to the context bit array
+ * @ce_info:  a description of the struct to be filled
+ * @dest:     the struct to be filled
+ **/
+static enum i40e_status_code i40e_set_hmc_context(u8 *context_bytes,
+					struct i40e_context_ele *ce_info,
+					u8 *dest)
+{
+	int f;
+
+	for (f = 0; ce_info[f].width != 0; f++) {
+
+		/* we have to deal with each element of the HMC using the
+		 * correct size so that we are correct regardless of the
+		 * endianness of the machine
+		 */
+		switch (ce_info[f].size_of) {
+		case 1:
+			i40e_write_byte(context_bytes, &ce_info[f], dest);
+			break;
+		case 2:
+			i40e_write_word(context_bytes, &ce_info[f], dest);
+			break;
+		case 4:
+			i40e_write_dword(context_bytes, &ce_info[f], dest);
+			break;
+		case 8:
+			i40e_write_qword(context_bytes, &ce_info[f], dest);
+			break;
+		}
+	}
+
+	return I40E_SUCCESS;
+}
+
+/**
+ * i40e_hmc_get_object_va - retrieves an object's virtual address
+ * @hw: pointer to the hw structure
+ * @object_base: pointer to u64 to get the va
+ * @rsrc_type: the hmc resource type
+ * @obj_idx: hmc object index
+ *
+ * This function retrieves the object's virtual address from the object
+ * base pointer.  This function is used for LAN Queue contexts.
+ **/
+static
+enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
+					u8 **object_base,
+					enum i40e_hmc_lan_rsrc_type rsrc_type,
+					u32 obj_idx)
+{
+	u32 obj_offset_in_sd, obj_offset_in_pd;
+	struct i40e_hmc_info     *hmc_info = &hw->hmc;
+	struct i40e_hmc_sd_entry *sd_entry;
+	struct i40e_hmc_pd_entry *pd_entry;
+	u32 pd_idx, pd_lmt, rel_pd_idx;
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u64 obj_offset_in_fpm;
+	u32 sd_idx, sd_lmt;
+
+	if (NULL == hmc_info) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
+		goto exit;
+	}
+	if (NULL == hmc_info->hmc_obj) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
+		goto exit;
+	}
+	if (NULL == object_base) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_hmc_get_object_va: bad object_base ptr\n");
+		goto exit;
+	}
+	if (I40E_HMC_INFO_SIGNATURE != hmc_info->signature) {
+		ret_code = I40E_ERR_BAD_PTR;
+		DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->signature\n");
+		goto exit;
+	}
+	if (obj_idx >= hmc_info->hmc_obj[rsrc_type].cnt) {
+		DEBUGOUT1("i40e_hmc_get_object_va: returns error %d\n",
+			  ret_code);
+		ret_code = I40E_ERR_INVALID_HMC_OBJ_INDEX;
+		goto exit;
+	}
+	/* find sd index and limit */
+	I40E_FIND_SD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+				 &sd_idx, &sd_lmt);
+
+	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+	obj_offset_in_fpm = hmc_info->hmc_obj[rsrc_type].base +
+			    hmc_info->hmc_obj[rsrc_type].size * obj_idx;
+
+	if (I40E_SD_TYPE_PAGED == sd_entry->entry_type) {
+		I40E_FIND_PD_INDEX_LIMIT(hmc_info, rsrc_type, obj_idx, 1,
+					 &pd_idx, &pd_lmt);
+		rel_pd_idx = pd_idx % I40E_HMC_PD_CNT_IN_SD;
+		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx];
+		obj_offset_in_pd = (u32)(obj_offset_in_fpm %
+					 I40E_HMC_PAGED_BP_SIZE);
+		*object_base = (u8 *)pd_entry->bp.addr.va + obj_offset_in_pd;
+	} else {
+		obj_offset_in_sd = (u32)(obj_offset_in_fpm %
+					 I40E_HMC_DIRECT_BP_SIZE);
+		*object_base = (u8 *)sd_entry->u.bp.addr.va + obj_offset_in_sd;
+	}
+exit:
+	return ret_code;
+}
+
+/**
+ * i40e_get_lan_tx_queue_context - return the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_get_hmc_context(context_bytes,
+				    i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_tx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+						      u16 queue)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_TX);
+}
+
+/**
+ * i40e_set_lan_tx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_set_hmc_context(context_bytes,
+				    i40e_hmc_txq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_get_lan_rx_queue_context - return the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_get_hmc_context(context_bytes,
+				    i40e_hmc_rxq_ce_info, (u8 *)s);
+}
+
+/**
+ * i40e_clear_lan_rx_queue_context - clear the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ **/
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+						      u16 queue)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_clear_hmc_context(hw, context_bytes, I40E_HMC_LAN_RX);
+}
+
+/**
+ * i40e_set_lan_rx_queue_context - set the HMC context for the queue
+ * @hw:    the hardware struct
+ * @queue: the queue we care about
+ * @s:     the struct to be filled
+ **/
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s)
+{
+	enum i40e_status_code err;
+	u8 *context_bytes;
+
+	err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
+	if (err < 0)
+		return err;
+
+	return i40e_set_hmc_context(context_bytes,
+				    i40e_hmc_rxq_ce_info, (u8 *)s);
+}


Property changes on: trunk/sys/dev/ixl/i40e_lan_hmc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_lan_hmc.h
===================================================================
--- trunk/sys/dev/ixl/i40e_lan_hmc.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_lan_hmc.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,202 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_lan_hmc.h 292099 2015-12-11 13:05:18Z smh $*/
+
+#ifndef _I40E_LAN_HMC_H_
+#define _I40E_LAN_HMC_H_
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
+
+/* HMC element context information */
+
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
+struct i40e_hmc_obj_rxq {
+	u16 head;
+	u16 cpuid; /* bigger than needed, see above for reason */
+	u64 base;
+	u16 qlen;
+#define I40E_RXQ_CTX_DBUFF_SHIFT 7
+	u16 dbuff; /* bigger than needed, see above for reason */
+#define I40E_RXQ_CTX_HBUFF_SHIFT 6
+	u16 hbuff; /* bigger than needed, see above for reason */
+	u8  dtype;
+	u8  dsize;
+	u8  crcstrip;
+	u8  fc_ena;
+	u8  l2tsel;
+	u8  hsplit_0;
+	u8  hsplit_1;
+	u8  showiv;
+	u32 rxmax; /* bigger than needed, see above for reason */
+	u8  tphrdesc_ena;
+	u8  tphwdesc_ena;
+	u8  tphdata_ena;
+	u8  tphhead_ena;
+	u16 lrxqthresh; /* bigger than needed, see above for reason */
+	u8  prefena;	/* NOTE: normally must be set to 1 at init */
+};
+
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
+struct i40e_hmc_obj_txq {
+	u16 head;
+	u8  new_context;
+	u64 base;
+	u8  fc_ena;
+	u8  timesync_ena;
+	u8  fd_ena;
+	u8  alt_vlan_ena;
+	u16 thead_wb;
+	u8  cpuid;
+	u8  head_wb_ena;
+	u16 qlen;
+	u8  tphrdesc_ena;
+	u8  tphrpacket_ena;
+	u8  tphwdesc_ena;
+	u64 head_wb_addr;
+	u32 crc;
+	u16 rdylist;
+	u8  rdylist_act;
+};
+
+/* for hsplit_0 field of Rx HMC context */
+enum i40e_hmc_obj_rx_hsplit_0 {
+	I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT      = 0,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2      = 1,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP      = 2,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4,
+	I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP    = 8,
+};
+
+/* fcoe_cntx and fcoe_filt are for debugging purpose only */
+struct i40e_hmc_obj_fcoe_cntx {
+	u32 rsv[32];
+};
+
+struct i40e_hmc_obj_fcoe_filt {
+	u32 rsv[8];
+};
+
+/* Context sizes for LAN objects */
+enum i40e_hmc_lan_object_size {
+	I40E_HMC_LAN_OBJ_SZ_8   = 0x3,
+	I40E_HMC_LAN_OBJ_SZ_16  = 0x4,
+	I40E_HMC_LAN_OBJ_SZ_32  = 0x5,
+	I40E_HMC_LAN_OBJ_SZ_64  = 0x6,
+	I40E_HMC_LAN_OBJ_SZ_128 = 0x7,
+	I40E_HMC_LAN_OBJ_SZ_256 = 0x8,
+	I40E_HMC_LAN_OBJ_SZ_512 = 0x9,
+};
+
+#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512
+#define I40E_HMC_OBJ_SIZE_TXQ         128
+#define I40E_HMC_OBJ_SIZE_RXQ         32
+#define I40E_HMC_OBJ_SIZE_FCOE_CNTX   64
+#define I40E_HMC_OBJ_SIZE_FCOE_FILT   64
+
+enum i40e_hmc_lan_rsrc_type {
+	I40E_HMC_LAN_FULL  = 0,
+	I40E_HMC_LAN_TX    = 1,
+	I40E_HMC_LAN_RX    = 2,
+	I40E_HMC_FCOE_CTX  = 3,
+	I40E_HMC_FCOE_FILT = 4,
+	I40E_HMC_LAN_MAX   = 5
+};
+
+enum i40e_hmc_model {
+	I40E_HMC_MODEL_DIRECT_PREFERRED = 0,
+	I40E_HMC_MODEL_DIRECT_ONLY      = 1,
+	I40E_HMC_MODEL_PAGED_ONLY       = 2,
+	I40E_HMC_MODEL_UNKNOWN,
+};
+
+struct i40e_hmc_lan_create_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+	enum i40e_sd_entry_type entry_type;
+	u64 direct_mode_sz;
+};
+
+struct i40e_hmc_lan_delete_obj_info {
+	struct i40e_hmc_info *hmc_info;
+	u32 rsrc_type;
+	u32 start_idx;
+	u32 count;
+};
+
+enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+					u32 rxq_num, u32 fcoe_cntx_num,
+					u32 fcoe_filt_num);
+enum i40e_status_code i40e_configure_lan_hmc(struct i40e_hw *hw,
+					     enum i40e_hmc_model model);
+enum i40e_status_code i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
+			      u32 fcoe_cntx_num, u32 fcoe_filt_num);
+enum i40e_status_code i40e_get_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+enum i40e_status_code i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_txq *s);
+enum i40e_status_code i40e_get_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+						      u16 queue);
+enum i40e_status_code i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+						    u16 queue,
+						    struct i40e_hmc_obj_rxq *s);
+enum i40e_status_code i40e_create_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_create_obj_info *info);
+enum i40e_status_code i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+				struct i40e_hmc_lan_delete_obj_info *info);
+
+#endif /* _I40E_LAN_HMC_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_lan_hmc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_nvm.c
===================================================================
--- trunk/sys/dev/ixl/i40e_nvm.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_nvm.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,713 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_nvm.c 292100 2015-12-11 13:08:38Z smh $*/
+
+#include "i40e_prototype.h"
+
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+					       u16 *data);
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+					    u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+						 u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+					      u16 *words, u16 *data);
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+				       u32 offset, u16 words, void *data,
+				       bool last_command);
+
+/**
+ * i40e_init_nvm_ops - Initialize NVM function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the function pointers and the NVM info structure. Should be called
+ * once per NVM initialization, e.g. inside the i40e_init_shared_code().
+ * Please notice that the NVM term is used here (& in all methods covered
+ * in this file) as an equivalent of the FLASH part mapped into the SR.
+ * We are accessing FLASH always thru the Shadow RAM.
+ **/
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
+{
+	struct i40e_nvm_info *nvm = &hw->nvm;
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u32 fla, gens;
+	u8 sr_size;
+
+	DEBUGFUNC("i40e_init_nvm");
+
+	/* The SR size is stored regardless of the nvm programming mode
+	 * as the blank mode may be used in the factory line.
+	 */
+	gens = rd32(hw, I40E_GLNVM_GENS);
+	sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
+			   I40E_GLNVM_GENS_SR_SIZE_SHIFT);
+	/* Switching to words (sr_size contains power of 2KB) */
+	nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
+
+	/* Check if we are in the normal or blank NVM programming mode */
+	fla = rd32(hw, I40E_GLNVM_FLA);
+	if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
+		/* Max NVM timeout */
+		nvm->timeout = I40E_MAX_NVM_TIMEOUT;
+		nvm->blank_nvm_mode = FALSE;
+	} else { /* Blank programming mode */
+		nvm->blank_nvm_mode = TRUE;
+		ret_code = I40E_ERR_NVM_BLANK_MODE;
+		i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
+	}
+
+	return ret_code;
+}
+
+/**
+ * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * This function will request NVM ownership for reading
+ * via the proper Admin Command.
+ **/
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+				       enum i40e_aq_resource_access_type access)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u64 gtime, timeout;
+	u64 time_left = 0;
+
+	DEBUGFUNC("i40e_acquire_nvm");
+
+	if (hw->nvm.blank_nvm_mode)
+		goto i40e_i40e_acquire_nvm_exit;
+
+	ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
+					    0, &time_left, NULL);
+	/* Reading the Global Device Timer */
+	gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+
+	/* Store the timeout */
+	hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
+
+	if (ret_code)
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
+			   access, time_left, ret_code, hw->aq.asq_last_status);
+
+	if (ret_code && time_left) {
+		/* Poll until the current NVM owner timeouts */
+		timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
+		while ((gtime < timeout) && time_left) {
+			i40e_msec_delay(10);
+			gtime = rd32(hw, I40E_GLVFGEN_TIMER);
+			ret_code = i40e_aq_request_resource(hw,
+							I40E_NVM_RESOURCE_ID,
+							access, 0, &time_left,
+							NULL);
+			if (ret_code == I40E_SUCCESS) {
+				hw->nvm.hw_semaphore_timeout =
+					    I40E_MS_TO_GTIME(time_left) + gtime;
+				break;
+			}
+		}
+		if (ret_code != I40E_SUCCESS) {
+			hw->nvm.hw_semaphore_timeout = 0;
+			i40e_debug(hw, I40E_DEBUG_NVM,
+				   "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
+				   time_left, ret_code, hw->aq.asq_last_status);
+		}
+	}
+
+i40e_i40e_acquire_nvm_exit:
+	return ret_code;
+}
+
+/**
+ * i40e_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * This function will release NVM resource via the proper Admin Command.
+ **/
+void i40e_release_nvm(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u32 total_delay = 0;
+
+	DEBUGFUNC("i40e_release_nvm");
+
+	if (hw->nvm.blank_nvm_mode)
+		return;
+
+	ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+
+	/* there are some rare cases when trying to release the resource
+	 * results in an admin Q timeout, so handle them correctly
+	 */
+	while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
+	       (total_delay < hw->aq.asq_cmd_timeout)) {
+			i40e_msec_delay(1);
+			ret_code = i40e_aq_release_resource(hw,
+						I40E_NVM_RESOURCE_ID, 0, NULL);
+			total_delay++;
+	}
+}
+
+/**
+ * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
+ * @hw: pointer to the HW structure
+ *
+ * Polls the SRCTL Shadow RAM register done bit.
+ **/
+static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+	u32 srctl, wait_cnt;
+
+	DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
+
+	/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
+	for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
+		srctl = rd32(hw, I40E_GLNVM_SRCTL);
+		if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
+			ret_code = I40E_SUCCESS;
+			break;
+		}
+		i40e_usec_delay(5);
+	}
+	if (ret_code == I40E_ERR_TIMEOUT)
+		i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word - Reads Shadow RAM
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+					 u16 *data)
+{
+#ifdef X722_SUPPORT
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_word_aq(hw, offset, data);
+#endif
+	return i40e_read_nvm_word_srctl(hw, offset, data);
+}
+
+/**
+ * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+					       u16 *data)
+{
+	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+	u32 sr_reg;
+
+	DEBUGFUNC("i40e_read_nvm_word_srctl");
+
+	if (offset >= hw->nvm.sr_size) {
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
+			   offset, hw->nvm.sr_size);
+		ret_code = I40E_ERR_PARAM;
+		goto read_nvm_exit;
+	}
+
+	/* Poll the done bit first */
+	ret_code = i40e_poll_sr_srctl_done_bit(hw);
+	if (ret_code == I40E_SUCCESS) {
+		/* Write the address and start reading */
+		sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+			 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
+		wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
+
+		/* Poll I40E_GLNVM_SRCTL until the done bit is set */
+		ret_code = i40e_poll_sr_srctl_done_bit(hw);
+		if (ret_code == I40E_SUCCESS) {
+			sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
+			*data = (u16)((sr_reg &
+				       I40E_GLNVM_SRDATA_RDDATA_MASK)
+				    >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
+		}
+	}
+	if (ret_code != I40E_SUCCESS)
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
+			   offset);
+
+read_nvm_exit:
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
+ **/
+enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+					    u16 *data)
+{
+	enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
+
+	DEBUGFUNC("i40e_read_nvm_word_aq");
+
+	ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, TRUE);
+	*data = LE16_TO_CPU(*(__le16 *)data);
+
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer - Reads Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data)
+{
+#ifdef X722_SUPPORT
+	if (hw->mac.type == I40E_MAC_X722)
+		return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+#endif
+	return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+}
+
+/**
+ * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+						 u16 *words, u16 *data)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u16 index, word;
+
+	DEBUGFUNC("i40e_read_nvm_buffer_srctl");
+
+	/* Loop thru the selected region */
+	for (word = 0; word < *words; word++) {
+		index = offset + word;
+		ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
+		if (ret_code != I40E_SUCCESS)
+			break;
+	}
+
+	/* Update the number of words read from the Shadow RAM */
+	*words = word;
+
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
+ * method. The buffer read is preceded by the NVM ownership take
+ * and followed by the release.
+ **/
+enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+					      u16 *words, u16 *data)
+{
+	enum i40e_status_code ret_code;
+	u16 read_size = *words;
+	bool last_cmd = FALSE;
+	u16 words_read = 0;
+	u16 i = 0;
+
+	DEBUGFUNC("i40e_read_nvm_buffer_aq");
+
+	do {
+		/* Calculate number of bytes we should read in this step.
+		 * FVL AQ do not allow to read more than one page at a time or
+		 * to cross page boundaries.
+		 */
+		if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
+			read_size = min(*words,
+					(u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
+				      (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
+		else
+			read_size = min((*words - words_read),
+					I40E_SR_SECTOR_SIZE_IN_WORDS);
+
+		/* Check if this is last command, if so set proper flag */
+		if ((words_read + read_size) >= *words)
+			last_cmd = TRUE;
+
+		ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
+					    data + words_read, last_cmd);
+		if (ret_code != I40E_SUCCESS)
+			goto read_nvm_buffer_aq_exit;
+
+		/* Increment counter for words already read and move offset to
+		 * new read location
+		 */
+		words_read += read_size;
+		offset += read_size;
+	} while (words_read < *words);
+
+	for (i = 0; i < *words; i++)
+		data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
+
+read_nvm_buffer_aq_exit:
+	*words = words_read;
+	return ret_code;
+}
+
+/**
+ * i40e_read_nvm_aq - Read Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+				       u32 offset, u16 words, void *data,
+				       bool last_command)
+{
+	enum i40e_status_code ret_code = I40E_ERR_NVM;
+	struct i40e_asq_cmd_details cmd_details;
+
+	DEBUGFUNC("i40e_read_nvm_aq");
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+	/* Here we are checking the SR limit only for the flat memory model.
+	 * We cannot do it for the module-based model, as we did not acquire
+	 * the NVM resource yet (we cannot get the module pointer value).
+	 * Firmware will check the module-based model.
+	 */
+	if ((offset + words) > hw->nvm.sr_size)
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: offset %d beyond Shadow RAM limit %d\n",
+			   (offset + words), hw->nvm.sr_size);
+	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+		/* We can write only up to 4KB (one sector), in one AQ write */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write fail error: tried to write %d words, limit is %d.\n",
+			   words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+		/* A single write cannot spread over two sectors */
+		i40e_debug(hw, I40E_DEBUG_NVM,
+			   "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
+			   offset, words);
+	else
+		ret_code = i40e_aq_read_nvm(hw, module_pointer,
+					    2 * offset,  /*bytes*/
+					    2 * words,   /*bytes*/
+					    data, last_command, &cmd_details);
+
+	return ret_code;
+}
+
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+					u32 offset, u16 words, void *data,
+					bool last_command)
+{
+	enum i40e_status_code ret_code = I40E_ERR_NVM;
+	struct i40e_asq_cmd_details cmd_details;
+
+	DEBUGFUNC("i40e_write_nvm_aq");
+
+	memset(&cmd_details, 0, sizeof(cmd_details));
+	cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+	/* Here we are checking the SR limit only for the flat memory model.
+	 * We cannot do it for the module-based model, as we did not acquire
+	 * the NVM resource yet (we cannot get the module pointer value).
+	 * Firmware will check the module-based model.
+	 */
+	if ((offset + words) > hw->nvm.sr_size)
+		DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
+	else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+		/* We can write only up to 4KB (one sector), in one AQ write */
+		DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
+	else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+		 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+		/* A single write cannot spread over two sectors */
+		DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
+	else
+		ret_code = i40e_aq_update_nvm(hw, module_pointer,
+					      2 * offset,  /*bytes*/
+					      2 * words,   /*bytes*/
+					      data, last_command, &cmd_details);
+
+	return ret_code;
+}
+
+/**
+ * i40e_write_nvm_word - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
+ * NVM ownership have to be acquired and released (on ARQ completion event
+ * reception) by caller. To commit SR to NVM update checksum function
+ * should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+					  void *data)
+{
+	DEBUGFUNC("i40e_write_nvm_word");
+
+	*((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
+
+	/* Value 0x00 below means that we treat SR as a flat mem */
+	return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, FALSE);
+}
+
+/**
+ * i40e_write_nvm_buffer - Writes Shadow RAM buffer
+ * @hw: pointer to the HW structure
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller. To commit SR to NVM update
+ * checksum function should be called.
+ **/
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
+					    u8 module_pointer, u32 offset,
+					    u16 words, void *data)
+{
+	__le16 *le_word_ptr = (__le16 *)data;
+	u16 *word_ptr = (u16 *)data;
+	u32 i = 0;
+
+	DEBUGFUNC("i40e_write_nvm_buffer");
+
+	for (i = 0; i < words; i++)
+		le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
+
+	/* Here we will only write one buffer as the size of the modules
+	 * mirrored in the Shadow RAM is always less than 4K.
+	 */
+	return i40e_write_nvm_aq(hw, module_pointer, offset, words,
+				 data, FALSE);
+}
+
+/**
+ * i40e_calc_nvm_checksum - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ * @checksum: pointer to the checksum
+ *
+ * This function calculates SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ **/
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	struct i40e_virt_mem vmem;
+	u16 pcie_alt_module = 0;
+	u16 checksum_local = 0;
+	u16 vpd_module = 0;
+	u16 *data;
+	u16 i = 0;
+
+	DEBUGFUNC("i40e_calc_nvm_checksum");
+
+	ret_code = i40e_allocate_virt_mem(hw, &vmem,
+				    I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
+	if (ret_code)
+		goto i40e_calc_nvm_checksum_exit;
+	data = (u16 *)vmem.va;
+
+	/* read pointer to VPD area */
+	ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
+	if (ret_code != I40E_SUCCESS) {
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+		goto i40e_calc_nvm_checksum_exit;
+	}
+
+	/* read pointer to PCIe Alt Auto-load module */
+	ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
+				      &pcie_alt_module);
+	if (ret_code != I40E_SUCCESS) {
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+		goto i40e_calc_nvm_checksum_exit;
+	}
+
+	/* Calculate SW checksum that covers the whole 64kB shadow RAM
+	 * except the VPD and PCIe ALT Auto-load modules
+	 */
+	for (i = 0; i < hw->nvm.sr_size; i++) {
+		/* Read SR page */
+		if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+			u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
+
+			ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
+			if (ret_code != I40E_SUCCESS) {
+				ret_code = I40E_ERR_NVM_CHECKSUM;
+				goto i40e_calc_nvm_checksum_exit;
+			}
+		}
+
+		/* Skip Checksum word */
+		if (i == I40E_SR_SW_CHECKSUM_WORD)
+			continue;
+		/* Skip VPD module (convert byte size to word count) */
+		if ((i >= (u32)vpd_module) &&
+		    (i < ((u32)vpd_module +
+		     (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
+			continue;
+		}
+		/* Skip PCIe ALT module (convert byte size to word count) */
+		if ((i >= (u32)pcie_alt_module) &&
+		    (i < ((u32)pcie_alt_module +
+		     (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
+			continue;
+		}
+
+		checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
+	}
+
+	*checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
+
+i40e_calc_nvm_checksum_exit:
+	i40e_free_virt_mem(hw, &vmem);
+	return ret_code;
+}
+
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u16 checksum;
+	__le16 le_sum;
+
+	DEBUGFUNC("i40e_update_nvm_checksum");
+
+	ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+	le_sum = CPU_TO_LE16(checksum);
+	if (ret_code == I40E_SUCCESS)
+		ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+					     1, &le_sum, TRUE);
+
+	return ret_code;
+}
+
+/**
+ * i40e_validate_nvm_checksum - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum: calculated checksum
+ *
+ * Performs checksum calculation and validates the NVM SW checksum. If the
+ * caller does not need checksum, the value can be NULL.
+ **/
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+						 u16 *checksum)
+{
+	enum i40e_status_code ret_code = I40E_SUCCESS;
+	u16 checksum_sr = 0;
+	u16 checksum_local = 0;
+
+	DEBUGFUNC("i40e_validate_nvm_checksum");
+
+	ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
+	if (ret_code != I40E_SUCCESS)
+		goto i40e_validate_nvm_checksum_exit;
+
+	/* Do not use i40e_read_nvm_word() because we do not want to take
+	 * the synchronization semaphores twice here.
+	 */
+	i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
+
+	/* Verify read checksum from EEPROM is the same as
+	 * calculated checksum
+	 */
+	if (checksum_local != checksum_sr)
+		ret_code = I40E_ERR_NVM_CHECKSUM;
+
+	/* If the user cares, return the calculated checksum */
+	if (checksum)
+		*checksum = checksum_local;
+
+i40e_validate_nvm_checksum_exit:
+	return ret_code;
+}


Property changes on: trunk/sys/dev/ixl/i40e_nvm.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_osdep.c
===================================================================
--- trunk/sys/dev/ixl/i40e_osdep.c	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_osdep.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,200 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_osdep.c 292099 2015-12-11 13:05:18Z smh $*/
+
+#include <machine/stdarg.h>
+
+#include "ixl.h"
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+        if (error)
+                return;
+        *(bus_addr_t *) arg = segs->ds_addr;
+        return;
+}
+
+i40e_status
+i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
+{
+	mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+	return(mem->va == NULL);
+}
+
+i40e_status
+i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+{
+	free(mem->va, M_DEVBUF);
+	return(0);
+}
+
+i40e_status
+i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+	enum i40e_memory_type type __unused, u64 size, u32 alignment)
+{
+	device_t	dev = ((struct i40e_osdep *)hw->back)->dev;
+	int		err;
+
+
+	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
+			       alignment, 0,	/* alignment, bounds */
+			       BUS_SPACE_MAXADDR,	/* lowaddr */
+			       BUS_SPACE_MAXADDR,	/* highaddr */
+			       NULL, NULL,	/* filter, filterarg */
+			       size,	/* maxsize */
+			       1,	/* nsegments */
+			       size,	/* maxsegsize */
+			       BUS_DMA_ALLOCNOW, /* flags */
+			       NULL,	/* lockfunc */
+			       NULL,	/* lockfuncarg */
+			       &mem->tag);
+	if (err != 0) {
+		device_printf(dev,
+		    "i40e_allocate_dma: bus_dma_tag_create failed, "
+		    "error %u\n", err);
+		goto fail_0;
+	}
+	err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
+			     BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
+	if (err != 0) {
+		device_printf(dev,
+		    "i40e_allocate_dma: bus_dmamem_alloc failed, "
+		    "error %u\n", err);
+		goto fail_1;
+	}
+	err = bus_dmamap_load(mem->tag, mem->map, mem->va,
+			    size,
+			    i40e_dmamap_cb,
+			    &mem->pa,
+			    BUS_DMA_NOWAIT);
+	if (err != 0) {
+		device_printf(dev,
+		    "i40e_allocate_dma: bus_dmamap_load failed, "
+		    "error %u\n", err);
+		goto fail_2;
+	}
+	mem->nseg = 1;
+	mem->size = size;
+	bus_dmamap_sync(mem->tag, mem->map,
+	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+	return (0);
+fail_2:
+	bus_dmamem_free(mem->tag, mem->va, mem->map);
+fail_1:
+	bus_dma_tag_destroy(mem->tag);
+fail_0:
+	mem->map = NULL;
+	mem->tag = NULL;
+	return (err);
+}
+
+i40e_status
+i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+{
+	bus_dmamap_sync(mem->tag, mem->map,
+	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+	bus_dmamap_unload(mem->tag, mem->map);
+	bus_dmamem_free(mem->tag, mem->va, mem->map);
+	bus_dma_tag_destroy(mem->tag);
+	return (0);
+}
+
+void
+i40e_init_spinlock(struct i40e_spinlock *lock)
+{
+	mtx_init(&lock->mutex, "mutex",
+	    MTX_NETWORK_LOCK, MTX_DEF | MTX_DUPOK);
+}
+
+void
+i40e_acquire_spinlock(struct i40e_spinlock *lock)
+{
+	mtx_lock(&lock->mutex);
+}
+
+void
+i40e_release_spinlock(struct i40e_spinlock *lock)
+{
+	mtx_unlock(&lock->mutex);
+}
+
+void
+i40e_destroy_spinlock(struct i40e_spinlock *lock)
+{
+	mtx_destroy(&lock->mutex);
+}
+
+/*
+** i40e_debug_d - OS dependent version of shared code debug printing
+*/
+void i40e_debug_d(void *hw, u32 mask, char *fmt, ...)
+{
+        char buf[512];
+        va_list args;
+
+        if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+                return;
+
+	va_start(args, fmt);
+        vsnprintf(buf, sizeof(buf), fmt, args);
+	va_end(args);
+
+        /* the debug string is already formatted with a newline */
+        printf("%s", buf);
+}
+
+u16
+i40e_read_pci_cfg(struct i40e_hw *hw, u32 reg)
+{
+        u16 value;
+
+        value = pci_read_config(((struct i40e_osdep *)hw->back)->dev,
+            reg, 2);
+
+        return (value);
+}
+
+void
+i40e_write_pci_cfg(struct i40e_hw *hw, u32 reg, u16 value)
+{
+        pci_write_config(((struct i40e_osdep *)hw->back)->dev,
+            reg, value, 2);
+
+        return;
+}
+


Property changes on: trunk/sys/dev/ixl/i40e_osdep.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_osdep.h
===================================================================
--- trunk/sys/dev/ixl/i40e_osdep.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_osdep.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,236 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_osdep.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_OSDEP_H_
+#define _I40E_OSDEP_H_
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#define ASSERT(x) if(!(x)) panic("IXL: x")
+
+#define i40e_usec_delay(x) DELAY(x)
+#define i40e_msec_delay(x) DELAY(1000*(x))
+
+#define DBG 0 
+#define MSGOUT(S, A, B)     printf(S "\n", A, B)
+#define DEBUGFUNC(F)        DEBUGOUT(F);
+#if DBG
+	#define DEBUGOUT(S)         printf(S "\n")
+	#define DEBUGOUT1(S,A)      printf(S "\n",A)
+	#define DEBUGOUT2(S,A,B)    printf(S "\n",A,B)
+	#define DEBUGOUT3(S,A,B,C)  printf(S "\n",A,B,C)
+	#define DEBUGOUT7(S,A,B,C,D,E,F,G)  printf(S "\n",A,B,C,D,E,F,G)
+#else
+	#define DEBUGOUT(S)
+	#define DEBUGOUT1(S,A)
+	#define DEBUGOUT2(S,A,B)
+	#define DEBUGOUT3(S,A,B,C)
+	#define DEBUGOUT6(S,A,B,C,D,E,F)
+	#define DEBUGOUT7(S,A,B,C,D,E,F,G)
+#endif
+
+#define UNREFERENCED_XPARAMETER
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+#define STATIC	static
+#define INLINE  inline
+
+#define FALSE               0
+#define false               0 /* shared code requires this */
+#define TRUE                1
+#define true                1
+#define CMD_MEM_WRT_INVALIDATE          0x0010  /* BIT_4 */
+#define PCI_COMMAND_REGISTER            PCIR_COMMAND
+#define ARRAY_SIZE(a)		(sizeof(a) / sizeof((a)[0]))
+
+#define i40e_memset(a, b, c, d)  memset((a), (b), (c))
+#define i40e_memcpy(a, b, c, d)  memcpy((a), (b), (c))
+
+#define CPU_TO_LE16(o)	htole16(o)
+#define CPU_TO_LE32(s)	htole32(s)
+#define CPU_TO_LE64(h)	htole64(h)
+#define LE16_TO_CPU(a)	le16toh(a)
+#define LE32_TO_CPU(c)	le32toh(c)
+#define LE64_TO_CPU(k)	le64toh(k)
+
+#define I40E_NTOHS(a)	ntohs(a)
+#define I40E_NTOHL(a)	ntohl(a)
+#define I40E_HTONS(a)	htons(a)
+#define I40E_HTONL(a)	htonl(a)
+
+#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
+
+#define BIT(a) 		(1UL << (a))
+#define BIT_ULL(a) 	(1ULL << (a))
+
+typedef uint8_t		u8;
+typedef int8_t		s8;
+typedef uint16_t	u16;
+typedef int16_t		s16;
+typedef uint32_t	u32;
+typedef int32_t		s32;
+typedef uint64_t	u64;
+
+/* long string relief */
+typedef enum i40e_status_code i40e_status;
+
+#define __le16  u16
+#define __le32  u32
+#define __le64  u64
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+/* SW spinlock */
+struct i40e_spinlock {
+        struct mtx mutex;
+};
+
+#define le16_to_cpu 
+
+#if defined(__amd64__) || defined(i386)
+static __inline
+void prefetch(void *x)
+{
+	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
+}
+#else
+#define	prefetch(x)
+#endif
+
+struct i40e_osdep {
+	bus_space_tag_t		mem_bus_space_tag;
+	bus_space_handle_t	mem_bus_space_handle;
+	bus_size_t		mem_bus_space_size;
+	uint32_t		flush_reg;
+	struct device		*dev;
+};
+
+struct i40e_dma_mem {
+	void			*va;
+	u64			pa;
+	bus_dma_tag_t		tag;
+	bus_dmamap_t		map;
+	bus_dma_segment_t	seg;
+	bus_size_t              size;
+	int			nseg;
+	int                     flags;
+};
+
+struct i40e_hw; /* forward decl */
+u16	i40e_read_pci_cfg(struct i40e_hw *, u32);
+void	i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
+
+#define i40e_debug(h, m, s, ...)  i40e_debug_d(h, m, s, ##__VA_ARGS__)
+extern void i40e_debug_d(void *hw, u32 mask, char *fmt_str, ...);
+
+struct i40e_virt_mem {
+	void *va;
+	u32 size;
+};
+
+/*
+** This hardware supports either 16 or 32 byte rx descriptors
+** we default here to the larger size.
+*/
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+static __inline uint32_t
+rd32_osdep(struct i40e_osdep *osdep, uint32_t reg)
+{
+
+	KASSERT(reg < osdep->mem_bus_space_size,
+	    ("ixl: register offset %#jx too large (max is %#jx)",
+	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
+
+	return (bus_space_read_4(osdep->mem_bus_space_tag,
+	    osdep->mem_bus_space_handle, reg));
+}
+
+static __inline void
+wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value)
+{
+
+	KASSERT(reg < osdep->mem_bus_space_size,
+	    ("ixl: register offset %#jx too large (max is %#jx)",
+	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
+
+	bus_space_write_4(osdep->mem_bus_space_tag,
+	    osdep->mem_bus_space_handle, reg, value);
+}
+
+static __inline void
+ixl_flush_osdep(struct i40e_osdep *osdep)
+{
+	rd32_osdep(osdep, osdep->flush_reg);
+}
+
+#define rd32(a, reg)		rd32_osdep((a)->back, (reg))
+#define wr32(a, reg, value)	wr32_osdep((a)->back, (reg), (value))
+
+#define rd64(a, reg) (\
+   bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
+                     ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
+                     reg))
+
+#define wr64(a, reg, value) (\
+   bus_space_write_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \
+                     ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \
+                     reg, value))
+
+#define ixl_flush(a)		ixl_flush_osdep((a)->back)
+
+#endif /* _I40E_OSDEP_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_osdep.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_prototype.h
===================================================================
--- trunk/sys/dev/ixl/i40e_prototype.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_prototype.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,479 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_prototype.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_PROTOTYPE_H_
+#define _I40E_PROTOTYPE_H_
+
+#include "i40e_type.h"
+#include "i40e_alloc.h"
+#include "i40e_virtchnl.h"
+
+/* Prototypes for shared code functions that are not in
+ * the standard function pointer structures.  These are
+ * mostly because they are needed even before the init
+ * has happened and will assist in the early SW and FW
+ * setup.
+ */
+
+/* adminq functions */
+enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw);
+enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw);
+u16 i40e_clean_asq(struct i40e_hw *hw);
+void i40e_free_adminq_asq(struct i40e_hw *hw);
+void i40e_free_adminq_arq(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_adminq_init_ring_data(struct i40e_hw *hw);
+enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
+					     struct i40e_arq_event_info *e,
+					     u16 *events_pending);
+enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
+				struct i40e_aq_desc *desc,
+				void *buff, /* can be NULL */
+				u16  buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+bool i40e_asq_done(struct i40e_hw *hw);
+
+/* debug function for adminq */
+void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
+		   void *desc, void *buffer, u16 buf_len);
+
+void i40e_idle_aq(struct i40e_hw *hw);
+void i40e_resume_aq(struct i40e_hw *hw);
+bool i40e_check_asq_alive(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+#ifdef X722_SUPPORT
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+					  bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+					  bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+				     u16 seid,
+				     struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+				     u16 seid,
+				     struct i40e_aqc_get_set_rss_key_data *key);
+#endif
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
+
+
+u32 i40e_led_get(struct i40e_hw *hw);
+void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+
+/* admin send queue commands */
+
+enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
+				u16 *fw_major_version, u16 *fw_minor_version,
+				u32 *fw_build,
+				u16 *api_major_version, u16 *api_minor_version,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
+				u32 reg_addr, u64 reg_val,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_read_register(struct i40e_hw *hw,
+				u32  reg_addr, u64 *reg_val,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+			bool qualified_modules, bool report_init,
+			struct i40e_aq_get_phy_abilities_resp *abilities,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+				struct i40e_aq_set_phy_config *config,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+				  bool atomic_reset);
+enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
+				u16 max_frame_size, bool crc_en, u16 pacing,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
+				u64 *advt_reg,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_partner_advt(struct i40e_hw *hw,
+				u64 *advt_reg,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lb_modes(struct i40e_hw *hw, u16 lb_modes,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+		bool enable_link, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
+				bool enable_lse, struct i40e_link_status *link,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+				u64 advt_reg,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_driver_version(struct i40e_hw *hw,
+				struct i40e_driver_version *dv,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+				u16 vsi_id, bool set_filter,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+		u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+				u16 seid, bool enable, u16 vid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
+				struct i40e_vsi_context *vsi_ctx,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+				u16 downlink_seid, u8 enabled_tc,
+				bool default_port, bool enable_l2_filtering,
+				u16 *pveb_seid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+				u16 veb_seid, u16 *switch_id, bool *floating,
+				u16 *statistic_index, u16 *vebs_used,
+				u16 *vebs_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_remove_macvlan_element_data *mv_list,
+			u16 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_vlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_vlan(struct i40e_hw *hw, u16 vsi_id,
+			struct i40e_aqc_add_remove_vlan_element_data *v_list,
+			u8 count, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+				u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_config(struct i40e_hw *hw,
+				struct i40e_aqc_get_switch_config_resp *buf,
+				u16 buf_size, u16 *start_seid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_request_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				enum i40e_aq_resource_access_type access,
+				u8 sdp_number, u64 *timeout,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_release_resource(struct i40e_hw *hw,
+				enum i40e_aq_resources_ids resource,
+				u8 sdp_number,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_read_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, u32 field_id, void *data,
+				u16 buf_size, u16 *element_count,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw,
+				u8 cmd_flags, void *data, u16 buf_size,
+				u16 element_count,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw,
+				void *buff, u16 buff_size, u16 *data_size,
+				enum i40e_admin_queue_opc list_type_opc,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+				u32 offset, u16 length, void *data,
+				bool last_command,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+				u8 mib_type, void *buff, u16 buff_size,
+				u16 *local_len, u16 *remote_len,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+				u8 mib_type, void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+				bool enable_update,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
+				void *buff, u16 buff_size, u16 tlv_len,
+				u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
+				u8 bridge_type, void *buff, u16 buff_size,
+				u16 old_len, u16 new_len, u16 offset,
+				u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
+				u8 bridge_type, void *buff, u16 buff_size,
+				u16 tlv_len, u16 *mib_len,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+				void *buff, u16 buff_size,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
+				bool start_agent,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+				u16 udp_port, u8 protocol_index,
+				u8 *filter_index,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_switch_resource_alloc(struct i40e_hw *hw,
+			u8 *num_entries,
+			struct i40e_aqc_switch_resource_alloc_element_resp *buf,
+			u16 count,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_pvirt(struct i40e_hw *hw, u16 flags,
+				       u16 mac_seid, u16 vsi_seid,
+				       u16 *ret_seid);
+enum i40e_status_code i40e_aq_add_tag(struct i40e_hw *hw, bool direct_to_queue,
+				u16 vsi_seid, u16 tag, u16 queue_num,
+				u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_tag(struct i40e_hw *hw, u16 vsi_seid,
+				u16 tag, u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+				u16 etag, u8 num_tags_in_buf, void *buf,
+				u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_mcast_etag(struct i40e_hw *hw, u16 pe_seid,
+				u16 etag, u16 *tags_used, u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_update_tag(struct i40e_hw *hw, u16 vsi_seid,
+				u16 old_tag, u16 new_tag, u16 *tags_used,
+				u16 *tags_free,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_add_statistics(struct i40e_hw *hw, u16 seid,
+				u16 vlan_id, u16 *stat_index,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_remove_statistics(struct i40e_hw *hw, u16 seid,
+				u16 vlan_id, u16 stat_index,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_port_parameters(struct i40e_hw *hw,
+				u16 bad_frame_vsi, bool save_bad_pac,
+				bool pad_short_pac, bool double_vlan,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_mac_address_write(struct i40e_hw *hw,
+				    u16 flags, u8 *mac_addr,
+				    struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+				u16 seid, u16 credit, u8 max_credit,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
+				u8 tcmap, bool request, u8 *tcmap_ret,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile *profile,
+				u8 *pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
+	struct i40e_hw *hw, u16 seid,
+	struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
+	struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_configure_vsi_ets_sla_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
+				enum i40e_aq_hmc_profile profile,
+				u8 pe_vf_enabled_count,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+				u16 seid, u16 credit, u8 max_bw,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+			struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+			u16 seid,
+			struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_port_ets_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+		u16 seid,
+		struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+		struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
+					struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
+		u16 vsi,
+		struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+		u8 filter_count);
+
+enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
+		u16 vsi,
+		struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+		u8 filter_count);
+
+enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
+				u32 reg_addr0, u32 *reg_val0,
+				u32 reg_addr1, u32 *reg_val1);
+enum i40e_status_code i40e_aq_alternate_read_indirect(struct i40e_hw *hw,
+				u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_write(struct i40e_hw *hw,
+				u32 reg_addr0, u32 reg_val0,
+				u32 reg_addr1, u32 reg_val1);
+enum i40e_status_code i40e_aq_alternate_write_indirect(struct i40e_hw *hw,
+				u32 addr, u32 dw_count, void *buffer);
+enum i40e_status_code i40e_aq_alternate_clear(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_alternate_write_done(struct i40e_hw *hw,
+				u8 bios_mode, bool *reset_needed);
+enum i40e_status_code i40e_aq_set_oem_mode(struct i40e_hw *hw,
+				u8 oem_mode);
+
+/* i40e_common */
+enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw);
+enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
+void i40e_clear_pxe_mode(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw);
+enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+		u32 *max_bw, u32 *min_bw, bool *min_valid, bool *max_valid);
+enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+			struct i40e_aqc_configure_partition_bw_data *bw_data,
+			struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+					    u32 pba_num_size);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
+/* prototype for functions used for NVM access */
+enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
+				      enum i40e_aq_resource_access_type access);
+void i40e_release_nvm(struct i40e_hw *hw);
+enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+					 u16 *data);
+enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+					   u16 *words, u16 *data);
+enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
+					u32 offset, u16 words, void *data,
+					bool last_command);
+enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
+					  void *data);
+enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw, u8 module,
+					    u32 offset, u16 words, void *data);
+enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum);
+enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw);
+enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
+						 u16 *checksum);
+enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
+					  struct i40e_nvm_access *cmd,
+					  u8 *bytes, int *);
+void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
+
+enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
+
+extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
+
+static INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
+{
+	return i40e_ptype_lookup[ptype];
+}
+
+/* prototype for functions used for SW spinlocks */
+void i40e_init_spinlock(struct i40e_spinlock *sp);
+void i40e_acquire_spinlock(struct i40e_spinlock *sp);
+void i40e_release_spinlock(struct i40e_spinlock *sp);
+void i40e_destroy_spinlock(struct i40e_spinlock *sp);
+
+/* i40e_common for VF drivers*/
+void i40e_vf_parse_hw_config(struct i40e_hw *hw,
+			     struct i40e_virtchnl_vf_resource *msg);
+enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw);
+enum i40e_status_code i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+				enum i40e_virtchnl_ops v_opcode,
+				enum i40e_status_code v_retval,
+				u8 *msg, u16 msglen,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
+				struct i40e_filter_control_settings *settings);
+enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+				u8 *mac_addr, u16 ethtype, u16 flags,
+				u16 vsi_seid, u16 queue, bool is_add,
+				struct i40e_control_filter_stats *stats,
+				struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+				u8 table_id, u32 start_index, u16 buff_size,
+				void *buff, u16 *ret_buff_size,
+				u8 *ret_next_table, u32 *ret_next_index,
+				struct i40e_asq_cmd_details *cmd_details);
+#endif /* _I40E_PROTOTYPE_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_prototype.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_register.h
===================================================================
--- trunk/sys/dev/ixl/i40e_register.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_register.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,5318 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_register.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_REGISTER_H_
+#define _I40E_REGISTER_H_
+
+
+#define I40E_GL_ARQBAH              0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL              0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH            0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK  I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT            0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK  I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH              0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL              0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH            0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK  I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN                 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT    0
+#define I40E_GL_ATQLEN_ATQLEN_MASK     I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT    28
+#define I40E_GL_ATQLEN_ATQVFE_MASK     I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT   29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK    I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT   30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK    I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK  I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT            0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK  I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH              0x00080180 /* Reset: EMPR */
+#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_PF_ARQBAH_ARQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL              0x00080080 /* Reset: EMPR */
+#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_PF_ARQBAL_ARQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH            0x00080380 /* Reset: EMPR */
+#define I40E_PF_ARQH_ARQH_SHIFT 0
+#define I40E_PF_ARQH_ARQH_MASK  I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN                 0x00080280 /* Reset: EMPR */
+#define I40E_PF_ARQLEN_ARQLEN_SHIFT    0
+#define I40E_PF_ARQLEN_ARQLEN_MASK     I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_SHIFT    28
+#define I40E_PF_ARQLEN_ARQVFE_MASK     I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_SHIFT   29
+#define I40E_PF_ARQLEN_ARQOVFL_MASK    I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_SHIFT   30
+#define I40E_PF_ARQLEN_ARQCRIT_MASK    I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_PF_ARQLEN_ARQENABLE_MASK  I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT            0x00080480 /* Reset: EMPR */
+#define I40E_PF_ARQT_ARQT_SHIFT 0
+#define I40E_PF_ARQT_ARQT_MASK  I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH              0x00080100 /* Reset: EMPR */
+#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_PF_ATQBAH_ATQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL              0x00080000 /* Reset: EMPR */
+#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_PF_ATQBAL_ATQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH            0x00080300 /* Reset: EMPR */
+#define I40E_PF_ATQH_ATQH_SHIFT 0
+#define I40E_PF_ATQH_ATQH_MASK  I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN                 0x00080200 /* Reset: EMPR */
+#define I40E_PF_ATQLEN_ATQLEN_SHIFT    0
+#define I40E_PF_ATQLEN_ATQLEN_MASK     I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_SHIFT    28
+#define I40E_PF_ATQLEN_ATQVFE_MASK     I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_SHIFT   29
+#define I40E_PF_ATQLEN_ATQOVFL_MASK    I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_SHIFT   30
+#define I40E_PF_ATQLEN_ATQCRIT_MASK    I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_PF_ATQLEN_ATQENABLE_MASK  I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT            0x00080400 /* Reset: EMPR */
+#define I40E_PF_ATQT_ATQT_SHIFT 0
+#define I40E_PF_ATQT_ATQT_MASK  I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF)         (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAH_MAX_INDEX    127
+#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH_ARQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF)         (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQBAL_MAX_INDEX    127
+#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL_ARQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF)       (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQH_MAX_INDEX  127
+#define I40E_VF_ARQH_ARQH_SHIFT 0
+#define I40E_VF_ARQH_ARQH_MASK  I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF)            (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQLEN_MAX_INDEX       127
+#define I40E_VF_ARQLEN_ARQLEN_SHIFT    0
+#define I40E_VF_ARQLEN_ARQLEN_MASK     I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_SHIFT    28
+#define I40E_VF_ARQLEN_ARQVFE_MASK     I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_SHIFT   29
+#define I40E_VF_ARQLEN_ARQOVFL_MASK    I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_SHIFT   30
+#define I40E_VF_ARQLEN_ARQCRIT_MASK    I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN_ARQENABLE_MASK  I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF)       (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ARQT_MAX_INDEX  127
+#define I40E_VF_ARQT_ARQT_SHIFT 0
+#define I40E_VF_ARQT_ARQT_MASK  I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF)         (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAH_MAX_INDEX    127
+#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH_ATQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF)         (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQBAL_MAX_INDEX    127
+#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL_ATQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF)       (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQH_MAX_INDEX  127
+#define I40E_VF_ATQH_ATQH_SHIFT 0
+#define I40E_VF_ATQH_ATQH_MASK  I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF)            (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQLEN_MAX_INDEX       127
+#define I40E_VF_ATQLEN_ATQLEN_SHIFT    0
+#define I40E_VF_ATQLEN_ATQLEN_MASK     I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_SHIFT    28
+#define I40E_VF_ATQLEN_ATQVFE_MASK     I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_SHIFT   29
+#define I40E_VF_ATQLEN_ATQOVFL_MASK    I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_SHIFT   30
+#define I40E_VF_ATQLEN_ATQCRIT_MASK    I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN_ATQENABLE_MASK  I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF)       (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_VF_ATQT_MAX_INDEX  127
+#define I40E_VF_ATQT_ATQT_SHIFT 0
+#define I40E_VF_ATQT_ATQT_MASK  I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN              0x001C0B20 /* Reset: CORER */
+#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK  I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA                  0x0010C080 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK  I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT     4
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK      I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT      8
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK       I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO                     0x0010C000 /* Reset: PFR */
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT   0
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK    I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT    4
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK     I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL                  0x0010C300 /* Reset: CORER */
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT  0
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK   I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT   12
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK    I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK  I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT    17
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK     I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i)        (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PFCM_LANCTXDATA_MAX_INDEX  3
+#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
+#define I40E_PFCM_LANCTXDATA_DATA_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT                0x0010C380 /* Reset: CORER */
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK  I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK  I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF)             (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX        127
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK  I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT     4
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK      I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT      8
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK       I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF)                (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX           127
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT   0
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK    I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT    4
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK     I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC              0x00083044 /* Reset: CORER */
+#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
+#define I40E_GLDCB_GENC_PCIRTT_MASK  I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI                     0x00122618 /* Reset: CORER */
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG            0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK  I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV                     0x001E4600 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK  I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i)             (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_FCTTVN_MAX_INDEX       3
+#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT    0
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK     I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK  I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC                    0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT   0
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK    I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT        2
+#define I40E_PRTDCB_GENC_NUMTC_MASK         I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_SHIFT       6
+#define I40E_PRTDCB_GENC_FCOEUP_MASK        I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK  I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_PFCLDA_SHIFT       16
+#define I40E_PRTDCB_GENC_PFCLDA_MASK        I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS                   0x00083020 /* Reset: CORER */
+#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK  I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN             0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT  0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK   I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT   1
+#define I40E_PRTDCB_MFLCN_DPF_MASK    I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK  I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT  3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK   I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK  I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC                    0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT     0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK      I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK  I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT  2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK   I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT         8
+#define I40E_PRTDCB_RETSC_LLTC_MASK          I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i)               (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX         7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT     0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK      I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK  I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT       31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK        I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC                    0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT      0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK       I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT     8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK      I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK  I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP                0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC             0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK  I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i)          (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX    7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK  I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC              0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK  I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i)        (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX  7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK  I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC                 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT       0
+#define I40E_PRTDCB_TCPMC_CPM_MASK        I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT      13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK       I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK  I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i)        (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX  7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK  I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC                 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT       0
+#define I40E_PRTDCB_TDPMC_DPM_MASK        I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK  I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB                             0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK  I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT                  8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK                   I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB                             0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK  I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT                  8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK                   I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS              0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT  0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK   I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK  I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i)            (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX      7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK  I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL                0x00269B94 /* Reset: CORER */
+#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT  0
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK   I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT   4
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK    I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_SHIFT     5
+#define I40E_GLFCOE_RCTL_ICRC_MASK      I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK  I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS             0x00083048 /* Reset: POR */
+#define I40E_GL_FWSTS_FWS0B_SHIFT 0
+#define I40E_GL_FWSTS_FWS0B_MASK  I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWRI_SHIFT  9
+#define I40E_GL_FWSTS_FWRI_MASK   I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_SHIFT 16
+#define I40E_GL_FWSTS_FWS1B_MASK  I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT                    0x000B8184 /* Reset: POR */
+#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT      0
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK       I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT  4
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK   I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK  I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK  I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK  I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK  I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i)                (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
+#define I40E_GLGEN_GPIO_CTL_MAX_INDEX          29
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT      0
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK       I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT   3
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK    I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT      4
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK       I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT      5
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK       I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT      6
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK       I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT     7
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK      I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT    10
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK     I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT    11
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK     I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT     12
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK      I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT     17
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK      I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT  19
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK   I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK  I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT  26
+#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK   I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
+#define I40E_GLGEN_GPIO_SET                 0x00088184 /* Reset: POR */
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK  I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT  5
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK   I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK  I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT                  0x0008817C /* Reset: POR */
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK  I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT                       0x00088180 /* Reset: POR */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK  I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i)          (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CCMD_MAX_INDEX    3
+#define I40E_GLGEN_I2CCMD_DATA_SHIFT   0
+#define I40E_GLGEN_I2CCMD_DATA_MASK    I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
+#define I40E_GLGEN_I2CCMD_REGADD_MASK  I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK  I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_SHIFT     27
+#define I40E_GLGEN_I2CCMD_OP_MASK      I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_SHIFT  28
+#define I40E_GLGEN_I2CCMD_RESET_MASK   I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_SHIFT      29
+#define I40E_GLGEN_I2CCMD_R_MASK       I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_E_SHIFT      31
+#define I40E_GLGEN_I2CCMD_E_MASK       I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i)                   (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_I2CPARAMS_MAX_INDEX             3
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT      0
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK       I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT       5
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK        I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT        8
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK         I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT             9
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK              I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT        10
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK         I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT       11
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK        I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT         12
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK          I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT        13
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK         I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT          14
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK           I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK  I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT  31
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK   I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL                          0x00088178 /* Reset: POR */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT  0
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK   I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i)                (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX          3
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK  I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT      17
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK       I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK  I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK  I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i)                (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX          3
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK  I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK  I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK  I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK  I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK  I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK  I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK  I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK  I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i)               (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSCA_MAX_INDEX         3
+#define I40E_GLGEN_MSCA_MDIADD_SHIFT      0
+#define I40E_GLGEN_MSCA_MDIADD_MASK       I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_SHIFT      16
+#define I40E_GLGEN_MSCA_DEVADD_MASK       I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_SHIFT      21
+#define I40E_GLGEN_MSCA_PHYADD_MASK       I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_SHIFT      26
+#define I40E_GLGEN_MSCA_OPCODE_MASK       I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_SHIFT      28
+#define I40E_GLGEN_MSCA_STCODE_MASK       I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_SHIFT      30
+#define I40E_GLGEN_MSCA_MDICMD_MASK       I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK  I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i)             (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_GLGEN_MSRWD_MAX_INDEX       3
+#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK  I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK  I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT                0x001C0AB4 /* Reset: PCIR */
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK  I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK  I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT                   0x000B8188 /* Reset: POR */
+#define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT    0
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK     I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT  2
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK   I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT    4
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK     I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT    6
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK     I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT     8
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK      I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK  I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL                   0x000B8180 /* Reset: POR */
+#define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT     0
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK      I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK  I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG              0x000B8190 /* Reset: CORER */
+#define I40E_GLGEN_RTRIG_CORER_SHIFT  0
+#define I40E_GLGEN_RTRIG_CORER_MASK   I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_SHIFT  1
+#define I40E_GLGEN_RTRIG_GLOBR_MASK   I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK  I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT               0x000B612C /* Reset: POR */
+#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
+#define I40E_GLGEN_STAT_HWRSVD0_MASK  I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_SHIFT   2
+#define I40E_GLGEN_STAT_DCBEN_MASK    I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_SHIFT    3
+#define I40E_GLGEN_STAT_VTEN_MASK     I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_SHIFT   4
+#define I40E_GLGEN_STAT_FCOEN_MASK    I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_SHIFT   5
+#define I40E_GLGEN_STAT_EVBEN_MASK    I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
+#define I40E_GLGEN_STAT_HWRSVD1_MASK  I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i)         (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLGEN_VFLRSTAT_MAX_INDEX   3
+#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER             0x000881BC /* Reset: CORER */
+#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
+#define I40E_GLVFGEN_TIMER_GTIME_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL             0x00092400 /* Reset: PFR */
+#define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
+#define I40E_PFGEN_CTRL_PFSWR_MASK  I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN               0x00092500 /* Reset: CORER */
+#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK  I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM                0x001C0480 /* Reset: CORER */
+#define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK  I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE                  0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK  I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_SHIFT     1
+#define I40E_PFGEN_STATE_PFFCEN_MASK      I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_SHIFT   2
+#define I40E_PFGEN_STATE_PFLINKEN_MASK    I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFSCEN_SHIFT     3
+#define I40E_PFGEN_STATE_PFSCEN_MASK      I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF                      0x000B8120 /* Reset: POR */
+#define I40E_PRTGEN_CNF_PORT_DIS_SHIFT       0
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK        I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK  I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT   2
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK    I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2                          0x000B8160 /* Reset: POR */
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK  I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS                   0x000B8100 /* Reset: POR */
+#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT  0
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK   I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK  I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF)            (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFGEN_RSTAT1_MAX_INDEX       127
+#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK  I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF)       (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRSTAT_MAX_INDEX  127
+#define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK  I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF)        (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPGEN_VFRTRIG_MAX_INDEX   127
+#define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK  I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI)      (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RSTAT_MAX_INDEX  383
+#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
+#define I40E_VSIGEN_RSTAT_VMRD_MASK  I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI)       (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_VSIGEN_RTRIG_MAX_INDEX   383
+#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK  I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i)                  (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX            15
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i)                 (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX           15
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK  I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ                      0x000C2010 /* Reset: CORER */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i)                (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFBASE_MAX_INDEX          15
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i)               (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FCOEFCNT_MAX_INDEX         15
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK  I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX                  0x000C20D0 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK  I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ                    0x000C2018 /* Reset: CORER */
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX                 0x000C2014 /* Reset: CORER */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK  I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i)                (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVBASE_MAX_INDEX          15
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i)               (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIAVCNT_MAX_INDEX         15
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT        29
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK         I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX                  0x000C2068 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK  I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ                    0x000C2064 /* Reset: CORER */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i)                (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCBASE_MAX_INDEX          15
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i)              (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_FSIMCCNT_MAX_INDEX        15
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX                  0x000C2060 /* Reset: CORER */
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK  I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ                    0x000C205c /* Reset: CORER */
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX                 0x000C2008 /* Reset: CORER */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK  I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i)                (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXBASE_MAX_INDEX          15
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i)               (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANRXCNT_MAX_INDEX         15
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK  I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ                    0x000C200c /* Reset: CORER */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i)                (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXBASE_MAX_INDEX          15
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT         24
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK          I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i)               (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_LANTXCNT_MAX_INDEX         15
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK  I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ                    0x000C2004 /* Reset: CORER */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i)                 (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFASSIGN_MAX_INDEX           15
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK  I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i)            (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_SDPART_MAX_INDEX      15
+#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK  I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK  I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA                      0x000C0500 /* Reset: PFR */
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK  I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO                       0x000C0400 /* Reset: PFR */
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT       0
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK        I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT        7
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK         I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT  8
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK   I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK  I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT  31
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK   I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV               0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK  I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK  I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD               0x000C0000 /* Reset: PFR */
+#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK  I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT  31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK   I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH                    0x000C0200 /* Reset: PFR */
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW                   0x000C0100 /* Reset: PFR */
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT   0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK    I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT    1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK     I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK  I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK  I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i)              (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX        28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE                        0x00094008 /* Reset: POR */
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK  I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_SHIFT           2
+#define I40E_GL_UFUSE_NIC_ID_MASK            I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT      10
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK       I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT      11
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK       I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA                  0x00088188 /* Reset: POR */
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT  0
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT  1
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT  2
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT  3
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT  4
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT  5
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT  6
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT  7
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT  8
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT  9
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK   I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK  I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM                       0x0003F100 /* Reset: CORER */
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT        0
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK         I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK  I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL                  0x00038700 /* Reset: CORER */
+#define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT  0
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT   11
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT   31
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF)          (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX        511
+#define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT  0
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT   11
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK  I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT   31
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_GLINT_CTL				0x0003F800 /* Reset: CORER */
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT	0
+#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK	I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT	1
+#define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK	I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT	2
+#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK	I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
+#define I40E_PFINT_DYN_CTL0                       0x00038480 /* Reset: PFR */
+#define I40E_PFINT_DYN_CTL0_INTENA_SHIFT          0
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK           I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT        1
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK         I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT      2
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT        3
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK         I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT        5
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT     25
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT      31
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF)               (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_MAX_INDEX             511
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT          0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK           I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT        1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK         I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT      2
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT        3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK         I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT        5
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT     25
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT      31
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA                  0x00088080 /* Reset: CORER */
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT  0
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT  1
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT  2
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT  3
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT  4
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT  5
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT  6
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT  7
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT  8
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT  9
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK   I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0                        0x00038780 /* Reset: CORER */
+#define I40E_PFINT_ICR0_INTEVENT_SHIFT         0
+#define I40E_PFINT_ICR0_INTEVENT_MASK          I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_SHIFT          1
+#define I40E_PFINT_ICR0_QUEUE_0_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_SHIFT          2
+#define I40E_PFINT_ICR0_QUEUE_1_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_SHIFT          3
+#define I40E_PFINT_ICR0_QUEUE_2_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_SHIFT          4
+#define I40E_PFINT_ICR0_QUEUE_3_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_SHIFT          5
+#define I40E_PFINT_ICR0_QUEUE_4_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_SHIFT          6
+#define I40E_PFINT_ICR0_QUEUE_5_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_SHIFT          7
+#define I40E_PFINT_ICR0_QUEUE_6_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_SHIFT          8
+#define I40E_PFINT_ICR0_QUEUE_7_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_SHIFT          16
+#define I40E_PFINT_ICR0_ECC_ERR_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_SHIFT       19
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK        I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_GRST_SHIFT             20
+#define I40E_PFINT_ICR0_GRST_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT    21
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK     I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_SHIFT             22
+#define I40E_PFINT_ICR0_GPIO_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_SHIFT         23
+#define I40E_PFINT_ICR0_TIMESYNC_MASK          I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT     24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK      I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_SHIFT          26
+#define I40E_PFINT_ICR0_HMC_ERR_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_SHIFT       28
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK        I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_SHIFT             29
+#define I40E_PFINT_ICR0_VFLR_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_SHIFT           30
+#define I40E_PFINT_ICR0_ADMINQ_MASK            I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_SWINT_SHIFT            31
+#define I40E_PFINT_ICR0_SWINT_MASK             I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA                        0x00038800 /* Reset: CORER */
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT          16
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT       19
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK        I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_SHIFT             20
+#define I40E_PFINT_ICR0_ENA_GRST_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT    21
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK     I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_SHIFT             22
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT         23
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK          I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT     24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK      I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT          26
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK           I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT       28
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK        I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_SHIFT             29
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT           30
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK            I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT             31
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK              I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i)            (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
+#define I40E_PFINT_ITR0_MAX_INDEX      2
+#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITR0_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF)     (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_ITRN_MAX_INDEX      2
+#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_PFINT_ITRN_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0                   0x00038500 /* Reset: PFR */
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK  I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF)           (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX         511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK  I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0                 0x00038580 /* Reset: PFR */
+#define I40E_PFINT_RATE0_INTERVAL_SHIFT  0
+#define I40E_PFINT_RATE0_INTERVAL_MASK   I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF)         (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_RATEN_MAX_INDEX       511
+#define I40E_PFINT_RATEN_INTERVAL_SHIFT  0
+#define I40E_PFINT_RATEN_INTERVAL_MASK   I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK  I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0                      0x00038400 /* Reset: CORER */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK  I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q)              (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_RQCTL_MAX_INDEX        1535
+#define I40E_QINT_RQCTL_MSIX_INDX_SHIFT  0
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_SHIFT   11
+#define I40E_QINT_RQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK  I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_INTEVENT_SHIFT   31
+#define I40E_QINT_RQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q)              (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QINT_TQCTL_MAX_INDEX        1535
+#define I40E_QINT_TQCTL_MSIX_INDX_SHIFT  0
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_SHIFT   11
+#define I40E_QINT_TQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK  I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_INTEVENT_SHIFT   31
+#define I40E_QINT_TQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF)                  (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL0_MAX_INDEX             127
+#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT          0
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK           I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT        1
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK         I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT      2
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT        3
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK         I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT        5
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT     25
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT      31
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF)               (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN_MAX_INDEX             511
+#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT          0
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK           I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT        1
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK         I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT      2
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT        3
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK         I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT        5
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT     25
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT      31
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF)                   (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_MAX_INDEX              127
+#define I40E_VFINT_ICR0_INTEVENT_SHIFT         0
+#define I40E_VFINT_ICR0_INTEVENT_MASK          I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_SHIFT          1
+#define I40E_VFINT_ICR0_QUEUE_0_MASK           I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_SHIFT          2
+#define I40E_VFINT_ICR0_QUEUE_1_MASK           I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_SHIFT          3
+#define I40E_VFINT_ICR0_QUEUE_2_MASK           I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_SHIFT          4
+#define I40E_VFINT_ICR0_QUEUE_3_MASK           I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT           30
+#define I40E_VFINT_ICR0_ADMINQ_MASK            I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_SWINT_SHIFT            31
+#define I40E_VFINT_ICR0_SWINT_MASK             I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF)                   (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA_MAX_INDEX              127
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT           30
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK            I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT             31
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK              I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF)        (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VFINT_ITR0_MAX_INDEX      2
+#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR0_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF)     (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN_MAX_INDEX      2
+#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF)                 (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL0_MAX_INDEX            127
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK  I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF)             (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VPINT_AEQCTL_MAX_INDEX        127
+#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT  0
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT   11
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT   31
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF)          (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_VPINT_CEQCTL_MAX_INDEX        511
+#define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT  0
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK   I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT   11
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK    I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK  I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK  I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT  30
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK   I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT   31
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK    I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF)              (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLST0_MAX_INDEX         127
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK  I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF)           (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_LNKLSTN_MAX_INDEX         511
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK  I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK  I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF)            (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPINT_RATE0_MAX_INDEX       127
+#define I40E_VPINT_RATE0_INTERVAL_SHIFT  0
+#define I40E_VPINT_RATE0_INTERVAL_MASK   I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK  I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF)         (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+#define I40E_VPINT_RATEN_MAX_INDEX       511
+#define I40E_VPINT_RATEN_INTERVAL_SHIFT  0
+#define I40E_VPINT_RATEN_INTERVAL_MASK   I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK  I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL                 0x00051060 /* Reset: CORER */
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK  I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_ECO_SHIFT       1
+#define I40E_GL_RDPU_CNTRL_ECO_MASK        I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0                0x0012A500 /* Reset: CORER */
+#define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK  I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F               0x000442D8 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK  I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L               0x000442E0 /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK  I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M               0x000442DC /* Reset: CORER */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK  I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i)              (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX        11
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT      0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK       I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT  16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK   I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT   30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK    I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK  I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC              0x001C0400 /* Reset: CORER */
+#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK  I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_SHIFT  16
+#define I40E_PFLAN_QALLOC_LASTQ_MASK   I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_SHIFT  31
+#define I40E_PFLAN_QALLOC_VALID_MASK   I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q)             (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QRX_ENA_MAX_INDEX       1535
+#define I40E_QRX_ENA_QENA_REQ_SHIFT  0
+#define I40E_QRX_ENA_QENA_REQ_MASK   I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QRX_ENA_FAST_QDIS_MASK  I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QRX_ENA_QENA_STAT_MASK  I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q)        (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QRX_TAIL_MAX_INDEX  1535
+#define I40E_QRX_TAIL_TAIL_SHIFT 0
+#define I40E_QRX_TAIL_TAIL_MASK  I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q)             (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_CTL_MAX_INDEX       1535
+#define I40E_QTX_CTL_PFVF_Q_SHIFT    0
+#define I40E_QTX_CTL_PFVF_Q_MASK     I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_SHIFT   2
+#define I40E_QTX_CTL_PF_INDX_MASK    I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
+#define I40E_QTX_CTL_VFVM_INDX_MASK  I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q)             (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_ENA_MAX_INDEX       1535
+#define I40E_QTX_ENA_QENA_REQ_SHIFT  0
+#define I40E_QTX_ENA_QENA_REQ_MASK   I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
+#define I40E_QTX_ENA_FAST_QDIS_MASK  I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_QENA_STAT_SHIFT 2
+#define I40E_QTX_ENA_QENA_STAT_MASK  I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q)              (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
+#define I40E_QTX_HEAD_MAX_INDEX        1535
+#define I40E_QTX_HEAD_HEAD_SHIFT       0
+#define I40E_QTX_HEAD_HEAD_MASK        I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
+#define I40E_QTX_HEAD_RS_PENDING_MASK  I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q)        (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
+#define I40E_QTX_TAIL_MAX_INDEX  1535
+#define I40E_QTX_TAIL_TAIL_SHIFT 0
+#define I40E_QTX_TAIL_TAIL_MASK  I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF)           (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_MAPENA_MAX_INDEX      127
+#define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK  I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF)      (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QTABLE_MAX_INDEX    15
+#define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
+#define I40E_VPLAN_QTABLE_QINDEX_MASK  I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI)               (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QBASE_MAX_INDEX           383
+#define I40E_VSILAN_QBASE_VSIBASE_SHIFT       0
+#define I40E_VSILAN_QBASE_VSIBASE_MASK        I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK  I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI)       (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSILAN_QTABLE_MAX_INDEX      7
+#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK  I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK  I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH              0x001E2140 /* Reset: GLOBR */
+#define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
+#define I40E_PRTGL_SAH_FC_SAH_MASK  I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_MFS_SHIFT    16
+#define I40E_PRTGL_SAH_MFS_MASK     I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL              0x001E2120 /* Reset: GLOBR */
+#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
+#define I40E_PRTGL_SAL_FC_SAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP                              0x001E30E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK  I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP                              0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK  I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP                              0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK  I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL                                   0x001E3360 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK  I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1                                        0x001E3110 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2                                        0x001E3120 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE                                0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK  I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1                                  0x001E3140 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2                                  0x001E3150 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE                                0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK  I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i)                            (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX                      8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i)                                   (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX                             8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1                            0x001E34B0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2                            0x001E34C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A                     0x0008C480 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B                     0x0008C484 /* Reset: GLOBR */
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK  I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT                  0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM                              0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT               0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK                I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT         10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK          I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT       11
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK        I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT        15
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK         I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT              16
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK               I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT            19
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK             I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK  I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK  I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK  I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK  I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL                   0x000B6130 /* Reset: POR */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK  I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i)         (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX   31
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH              0x00085260 /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK  I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i)        (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX  7
+#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK  I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC                            0x00256A20 /* Reset: POR */
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT         1
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK          I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT           17
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK            I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT              19
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK               I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT       25
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK        I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT             26
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK              I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT            28
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK             I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT           29
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK            I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i)       (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
+#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
+#define I40E_PRT_MNG_MAVTV_VID_MASK  I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i)                             (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_MAX_INDEX                       7
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT             0
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK              I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT             4
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK              I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT                  5
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK                   I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT          13
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK           I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT          17
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK           I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT              21
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK               I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT              25
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK               I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT             26
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK              I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT            27
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK             I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT           28
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK            I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT             30
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK              I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT             31
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK              I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i)                             (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX                       7
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT          0
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK           I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT           4
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK            I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT              8
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK               I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT                  24
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK                   I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT                   28
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK                    I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT                       29
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK                        I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT  30
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK   I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT     31
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK      I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i)                (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX          3
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT   0
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK    I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK  I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i)            (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_METF_MAX_INDEX      3
+#define I40E_PRT_MNG_METF_ETYPE_SHIFT    0
+#define I40E_PRT_MNG_METF_ETYPE_MASK     I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
+#define I40E_PRT_MNG_METF_POLARITY_MASK  I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i)                      (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MFUTP_MAX_INDEX                15
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT            0
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK             I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_SHIFT                16
+#define I40E_PRT_MNG_MFUTP_UDP_MASK                 I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_SHIFT                17
+#define I40E_PRT_MNG_MFUTP_TCP_MASK                 I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i)         (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF4_MAX_INDEX   3
+#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i)         (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
+#define I40E_PRT_MNG_MIPAF6_MAX_INDEX   15
+#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i)        (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAH_MAX_INDEX  3
+#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
+#define I40E_PRT_MNG_MMAH_MMAH_MASK  I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i)        (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
+#define I40E_PRT_MNG_MMAL_MAX_INDEX  3
+#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
+#define I40E_PRT_MNG_MMAL_MMAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY                                  0x00256A60 /* Reset: POR */
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK  I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM                    0x00256AA0 /* Reset: POR */
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK  I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT  4
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK   I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT  5
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK   I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT  6
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK   I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT  7
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK   I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i)          (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
+#define I40E_MSIX_PBA_MAX_INDEX    5
+#define I40E_MSIX_PBA_PENBIT_SHIFT 0
+#define I40E_MSIX_PBA_PENBIT_MASK  I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i)              (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TADD_MAX_INDEX        128
+#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_MSIX_TADD_MSIXTADD10_MASK  I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD_SHIFT   2
+#define I40E_MSIX_TADD_MSIXTADD_MASK    I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i)            (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TMSG_MAX_INDEX      128
+#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK  I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i)             (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TUADD_MAX_INDEX       128
+#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK  I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i)        (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
+#define I40E_MSIX_TVCTRL_MAX_INDEX  128
+#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_MSIX_TVCTRL_MASK_MASK  I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i)          (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
+#define I40E_VFMSIX_PBA1_MAX_INDEX    19
+#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA1_PENBIT_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i)              (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD1_MAX_INDEX        639
+#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK  I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT   2
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK    I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i)            (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG1_MAX_INDEX      639
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i)             (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD1_MAX_INDEX       639
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i)        (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL1_MAX_INDEX  639
+#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK  I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA                0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_FL_SCK_SHIFT   0
+#define I40E_GLNVM_FLA_FL_SCK_MASK    I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_SHIFT    1
+#define I40E_GLNVM_FLA_FL_CE_MASK     I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_SHIFT    2
+#define I40E_GLNVM_FLA_FL_SI_MASK     I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_SHIFT    3
+#define I40E_GLNVM_FLA_FL_SO_MASK     I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_SHIFT   4
+#define I40E_GLNVM_FLA_FL_REQ_MASK    I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_SHIFT   5
+#define I40E_GLNVM_FLA_FL_GNT_MASK    I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_SHIFT   6
+#define I40E_GLNVM_FLA_LOCKED_MASK    I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
+#define I40E_GLNVM_FLA_FL_SADDR_MASK  I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_SHIFT  30
+#define I40E_GLNVM_FLA_FL_BUSY_MASK   I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_DER_SHIFT   31
+#define I40E_GLNVM_FLA_FL_DER_MASK    I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID                  0x000B6104 /* Reset: POR */
+#define I40E_GLNVM_FLASHID_FLASHID_SHIFT    0
+#define I40E_GLNVM_FLASHID_FLASHID_MASK     I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK  I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS                  0x000B6100 /* Reset: POR */
+#define I40E_GLNVM_GENS_NVM_PRES_SHIFT   0
+#define I40E_GLNVM_GENS_NVM_PRES_MASK    I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_SHIFT    5
+#define I40E_GLNVM_GENS_SR_SIZE_MASK     I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_SHIFT   8
+#define I40E_GLNVM_GENS_BANK1VAL_MASK    I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_SHIFT   23
+#define I40E_GLNVM_GENS_ALT_PRST_MASK    I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK  I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i)              (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
+#define I40E_GLNVM_PROTCSR_MAX_INDEX        59
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK  I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL              0x000B6110 /* Reset: POR */
+#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK  I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_SHIFT   14
+#define I40E_GLNVM_SRCTL_ADDR_MASK    I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_SHIFT  29
+#define I40E_GLNVM_SRCTL_WRITE_MASK   I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_START_SHIFT  30
+#define I40E_GLNVM_SRCTL_START_MASK   I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_SHIFT   31
+#define I40E_GLNVM_SRCTL_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA              0x000B6114 /* Reset: POR */
+#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
+#define I40E_GLNVM_SRDATA_WRDATA_MASK  I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
+#define I40E_GLNVM_SRDATA_RDDATA_MASK  I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD                          0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT     0
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK      I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT   1
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT      2
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK       I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT     3
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK      I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT   4
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT      5
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK       I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK  I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT  7
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK   I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT      8
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK       I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT   9
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH                        0x0009C484 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL                        0x0009C488 /* Reset: PCIR */
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL              0x000BE4A4 /* Reset: PCIR */
+#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK  I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP                      0x000BE4A8 /* Reset: PCIR */
+#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT       0
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK        I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT         2
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT         3
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT         4
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT         5
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT         6
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT         7
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT    16
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK     I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT    17
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK     I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT         18
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK          I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT       19
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK        I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT    20
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK     I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK  I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT    31
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK     I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF                   0x000BE4C0 /* Reset: POR */
+#define I40E_GLPCI_CNF_FLEX10_SHIFT      1
+#define I40E_GLPCI_CNF_FLEX10_MASK       I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK  I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2                      0x000BE494 /* Reset: PCIR */
+#define I40E_GLPCI_CNF2_RO_DIS_SHIFT         0
+#define I40E_GLPCI_CNF2_RO_DIS_MASK          I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK  I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT     2
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK      I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT     13
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK      I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID                     0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK  I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1                        0x0009C48C /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT   0
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK    I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT   1
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK    I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT   2
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK    I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT   3
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK    I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT     4
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK      I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT     5
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK      I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT     6
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK      I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT     7
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK      I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK  I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK  I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT  14
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK   I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT  15
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK   I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT    28
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK     I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT  29
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK   I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT   30
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK    I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT  31
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK   I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2                       0x0009C490 /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK  I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK  I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK  I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK  I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i)                   (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCL_5_8_MAX_INDEX             3
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT     16
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK      I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i)                 (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
+#define I40E_GLPCI_GSCN_0_3_MAX_INDEX           3
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LBARCTRL                    0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT      0
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK       I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT        1
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK         I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK  I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT       4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK        I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT      6
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK       I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT      10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK       I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT   11
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK    I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP                          0x000BE4AC /* Reset: PCIR */
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK  I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT        6
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK         I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT     9
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK      I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR                    0x000BE4FC /* Reset: PCIR */
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT                        0x0009C4BC /* Reset: PCIR */
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ                        0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK  I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT    16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK     I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB                      0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT   0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK    I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK  I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP                    0x000BE4B0 /* Reset: PCIR */
+#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT     0
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK      I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK  I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT  5
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK   I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT  8
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK   I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT   11
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK    I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT     14
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK      I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT     15
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK      I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC                                0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK  I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK  I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA                  0x000BE490 /* Reset: PCIR */
+#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT   0
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK    I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK  I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT   16
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK    I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK  I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID                 0x000BE4B4 /* Reset: PCIR */
+#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
+#define I40E_GLPCI_REVID_NVM_REVID_MASK  I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH                 0x000BE49C /* Reset: PCIR */
+#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL                 0x000BE498 /* Reset: PCIR */
+#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0                  0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1                  0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID                  0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD               0x000BE4F8 /* Reset: PCIR */
+#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
+#define I40E_GLPCI_UPADD_ADDRESS_MASK  I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID                0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP                   0x000BE4B8 /* Reset: PCIR */
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK  I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK  I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_GLTPH_CTRL                         0x000BE480 /* Reset: PCIR */
+#define I40E_GLTPH_CTRL_DESC_PH_SHIFT           9
+#define I40E_GLTPH_CTRL_DESC_PH_MASK            I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
+#define I40E_GLTPH_CTRL_DATA_PH_SHIFT           11
+#define I40E_GLTPH_CTRL_DATA_PH_MASK            I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
+#define I40E_PF_FUNC_RID                       0x0009C000 /* Reset: PCIR */
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK  I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT   3
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK    I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT      8
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK       I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA               0x0009C080 /* Reset: FLR */
+#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK  I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_VF_NUM_SHIFT  12
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK   I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD            0x0009C100 /* Reset: FLR */
+#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
+#define I40E_PF_PCI_CIAD_DATA_MASK  I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS                     0x000BE400 /* Reset: PCIR */
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK  I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT    1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK     I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT     2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK      I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF                 0x000BE000 /* Reset: PCIR */
+#define I40E_PFPCI_CNF_MSI_EN_SHIFT    2
+#define I40E_PFPCI_CNF_MSI_EN_MASK     I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK  I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_SHIFT    4
+#define I40E_PFPCI_CNF_IO_BAR_MASK     I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_INT_PIN_SHIFT   5
+#define I40E_PFPCI_CNF_INT_PIN_MASK    I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID                 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK  I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK  I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS                        0x0009C180 /* Reset: FLR */
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK  I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT      3
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK       I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC                            0x000BE200 /* Reset: POR */
+#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT             0
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK              I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT       1
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK        I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK  I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2                    0x000BE180 /* Reset: PCIR */
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK  I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE                      0x0009C200 /* Reset: PFR */
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA                   0x0009C280 /* Reset: PFR */
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE                  0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK  I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM              0x000BE300 /* Reset: POR */
+#define I40E_PFPCI_PM_PME_EN_SHIFT 0
+#define I40E_PFPCI_PM_PME_EN_MASK  I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1                  0x000BE280 /* Reset: POR */
+#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK  I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID                    0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK  I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK  I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE                  0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK  I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF)             (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX        127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK  I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE                  0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK  I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX               0x0009C300 /* Reset: PCIR */
+#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK  I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND               0x0009C380 /* Reset: PCIR */
+#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
+#define I40E_PFPCI_VMPEND_PENDING_MASK  I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT                     0x001E4320 /* Reset: GLOBR */
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT       29
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK        I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK  I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK  I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC                     0x001E4380 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT   16
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK    I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK  I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT      26
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK       I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD                          0x001E4400 /* Reset: GLOBR */
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK  I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER                 0x001E4360 /* Reset: GLOBR */
+#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK  I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK  I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC              0x001E43E0 /* Reset: GLOBR */
+#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK  I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC                     0x000B8140 /* Reset: POR */
+#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT   0
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK    I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_SHIFT      1
+#define I40E_PRTPM_GC_MNG_VETO_MASK       I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_RATD_SHIFT          2
+#define I40E_PRTPM_GC_RATD_MASK           I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_SHIFT         3
+#define I40E_PRTPM_GC_LCDMP_MASK          I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK  I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC              0x001E43A0 /* Reset: GLOBR */
+#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC              0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS               0x000AC828 /* Reset: CORER */
+#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK  I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW           0x000AC830 /* Reset: CORER */
+#define I40E_GLRPB_GHW_GHW_SHIFT 0
+#define I40E_GLRPB_GHW_GHW_MASK  I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW           0x000AC834 /* Reset: CORER */
+#define I40E_GLRPB_GLW_GLW_SHIFT 0
+#define I40E_GLRPB_GLW_GLW_MASK  I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW           0x000AC844 /* Reset: CORER */
+#define I40E_GLRPB_PHW_PHW_SHIFT 0
+#define I40E_GLRPB_PHW_PHW_MASK  I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW           0x000AC848 /* Reset: CORER */
+#define I40E_GLRPB_PLW_PLW_SHIFT 0
+#define I40E_GLRPB_PLW_PLW_MASK  I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i)           (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_MAX_INDEX     7
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i)           (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_MAX_INDEX     7
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i)           (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_MAX_INDEX     7
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i)           (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_MAX_INDEX     7
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW           0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i)           (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_MAX_INDEX     7
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW           0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS           0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK  I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL                      0x00269BA4 /* Reset: CORER */
+#define I40E_GLQF_CTL_HTOEP_SHIFT          1
+#define I40E_GLQF_CTL_HTOEP_MASK           I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT     2
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK      I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT     3
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK      I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK  I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
+#define I40E_GLQF_CTL_RSVD_SHIFT           7
+#define I40E_GLQF_CTL_RSVD_MASK            I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT      8
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK       I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT      11
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK       I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT      14
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK       I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_SHIFT         17
+#define I40E_GLQF_CTL_FDBEST_MASK          I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_SHIFT       25
+#define I40E_GLQF_CTL_PROGPRIO_MASK        I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_SHIFT      26
+#define I40E_GLQF_CTL_INVALPRIO_MASK       I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_IGNORE_IP_SHIFT      27
+#define I40E_GLQF_CTL_IGNORE_IP_MASK       I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0                   0x00269BAC /* Reset: CORER */
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK  I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT     13
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK      I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i)         (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX   12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK  I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK  I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK  I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK  I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i)            (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HSYM_MAX_INDEX      63
+#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK  I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i)        (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_GLQF_PCNT_MAX_INDEX  511
+#define I40E_GLQF_PCNT_PCNT_SHIFT 0
+#define I40E_GLQF_PCNT_PCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j)          (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_SWAP_MAX_INDEX       1
+#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK  I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK  I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_SHIFT     12
+#define I40E_GLQF_SWAP_FLEN0_MASK      I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK  I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK  I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_FLEN1_SHIFT     28
+#define I40E_GLQF_SWAP_FLEN1_MASK      I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0                   0x001C0AC0 /* Reset: CORER */
+#define I40E_PFQF_CTL_0_PEHSIZE_SHIFT     0
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK      I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_SHIFT     5
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK      I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT   10
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK    I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT   14
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK    I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK  I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_SHIFT      17
+#define I40E_PFQF_CTL_0_FD_ENA_MASK       I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT   18
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK    I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK  I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT   20
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK    I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT   24
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK    I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1                    0x00245D80 /* Reset: CORER */
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK  I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC               0x00246280 /* Reset: CORER */
+#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK  I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDBEST_SHIFT  8
+#define I40E_PFQF_FDALLOC_FDBEST_MASK   I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT                   0x00246380 /* Reset: CORER */
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK  I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT    16
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK     I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i)             (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_PFQF_HENA_MAX_INDEX       1
+#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i)         (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_PFQF_HKEY_MAX_INDEX   12
+#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_PFQF_HKEY_KEY_0_MASK  I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_PFQF_HKEY_KEY_1_MASK  I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_PFQF_HKEY_KEY_2_MASK  I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_PFQF_HKEY_KEY_3_MASK  I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i)        (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_PFQF_HLUT_MAX_INDEX  127
+#define I40E_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_PFQF_HLUT_LUT0_MASK  I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_PFQF_HLUT_LUT1_MASK  I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_PFQF_HLUT_LUT2_MASK  I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_PFQF_HLUT_LUT3_MASK  I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0                0x00256E60 /* Reset: CORER */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK  I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i)         (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX   63
+#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK  I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j)       (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_MSK_MAX_INDEX    63
+#define I40E_PRTQF_FD_MSK_MASK_SHIFT   0
+#define I40E_PRTQF_FD_MSK_MASK_MASK    I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK  I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i)              (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
+#define I40E_PRTQF_FLX_PIT_MAX_INDEX        8
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK  I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT      5
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK       I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT   10
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK    I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF)         (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HENA1_MAX_INDEX       1
+#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF)     (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY1_MAX_INDEX   12
+#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY1_KEY_0_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY1_KEY_1_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY1_KEY_2_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY1_KEY_3_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF)    (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT1_MAX_INDEX  15
+#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT1_LUT0_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT1_LUT1_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT1_LUT2_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT1_LUT3_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF)              (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION1_MAX_INDEX            7
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_SHIFT       1
+#define I40E_VFQF_HREGION1_REGION_0_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_SHIFT       5
+#define I40E_VFQF_HREGION1_REGION_1_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_SHIFT       9
+#define I40E_VFQF_HREGION1_REGION_2_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_SHIFT       13
+#define I40E_VFQF_HREGION1_REGION_3_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_SHIFT       17
+#define I40E_VFQF_HREGION1_REGION_4_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_SHIFT       21
+#define I40E_VFQF_HREGION1_REGION_5_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_SHIFT       25
+#define I40E_VFQF_HREGION1_REGION_6_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_7_SHIFT       29
+#define I40E_VFQF_HREGION1_REGION_7_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF)          (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPQF_CTL_MAX_INDEX     127
+#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
+#define I40E_VPQF_CTL_PEHSIZE_MASK  I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
+#define I40E_VPQF_CTL_PEDSIZE_MASK  I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
+#define I40E_VPQF_CTL_FCHSIZE_MASK  I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
+#define I40E_VPQF_CTL_FCDSIZE_MASK  I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI)             (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_CTL_MAX_INDEX         383
+#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT    0
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK     I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT   1
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK    I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT  2
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK   I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT  3
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK   I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK  I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK  I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI)         (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
+#define I40E_VSIQF_TCREGION_MAX_INDEX        3
+#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT  0
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK   I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT    9
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK     I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK  I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT   25
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK    I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i)           (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOECRC_MAX_INDEX     143
+#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
+#define I40E_GL_FCOECRC_FCOECRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i)            (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDDPC_MAX_INDEX      143
+#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i)             (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFEC_MAX_INDEX       143
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i)             (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIFTCL_MAX_INDEX       143
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i)             (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXEC_MAX_INDEX       143
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i)             (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDIXVC_MAX_INDEX       143
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i)             (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCH_MAX_INDEX       143
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK  I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i)             (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWRCL_MAX_INDEX       143
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i)             (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCH_MAX_INDEX       143
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK  I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i)             (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEDWTCL_MAX_INDEX       143
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i)            (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOELAST_MAX_INDEX      143
+#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
+#define I40E_GL_FCOELAST_FCOELAST_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i)           (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPRC_MAX_INDEX     143
+#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i)           (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOEPTC_MAX_INDEX     143
+#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i)            (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_FCOERPDC_MAX_INDEX      143
+#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i)             (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX       143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i)             (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX       143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i)         (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCH_MAX_INDEX   3
+#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLPRT_BPRCH_BPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i)         (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPRCL_MAX_INDEX   3
+#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLPRT_BPRCL_BPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
+#define I40E_GLPRT_BPTCH(_i)         (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCH_MAX_INDEX   3
+#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLPRT_BPTCH_BPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i)         (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_BPTCL_MAX_INDEX   3
+#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLPRT_BPTCL_BPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i)           (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_CRCERRS_MAX_INDEX     3
+#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i)         (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCH_MAX_INDEX   3
+#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
+#define I40E_GLPRT_GORCH_GORCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i)         (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GORCL_MAX_INDEX   3
+#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
+#define I40E_GLPRT_GORCL_GORCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i)         (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCH_MAX_INDEX   3
+#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLPRT_GOTCH_GOTCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i)         (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_GOTCL_MAX_INDEX   3
+#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLPRT_GOTCL_GOTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i)           (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ILLERRC_MAX_INDEX     3
+#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i)        (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LDPC_MAX_INDEX  3
+#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
+#define I40E_GLPRT_LDPC_LDPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i)              (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFRXC_MAX_INDEX        3
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i)            (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXOFFTXC_MAX_INDEX      3
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i)             (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONRXC_MAX_INDEX       3
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i)           (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_LXONTXC_MAX_INDEX     3
+#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i)        (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MLFC_MAX_INDEX  3
+#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
+#define I40E_GLPRT_MLFC_MLFC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i)         (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCH_MAX_INDEX   3
+#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLPRT_MPRCH_MPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i)         (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPRCL_MAX_INDEX   3
+#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLPRT_MPRCL_MPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i)         (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCH_MAX_INDEX   3
+#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLPRT_MPTCH_MPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i)         (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MPTCL_MAX_INDEX   3
+#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLPRT_MPTCL_MPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i)        (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_MRFC_MAX_INDEX  3
+#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
+#define I40E_GLPRT_MRFC_MRFC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i)            (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023H_MAX_INDEX      3
+#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i)            (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1023L_MAX_INDEX      3
+#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i)           (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127H_MAX_INDEX     3
+#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
+#define I40E_GLPRT_PRC127H_PRC127H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i)           (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC127L_MAX_INDEX     3
+#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
+#define I40E_GLPRT_PRC127L_PRC127L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i)            (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522H_MAX_INDEX      3
+#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i)            (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC1522L_MAX_INDEX      3
+#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i)              (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255H_MAX_INDEX        3
+#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i)           (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC255L_MAX_INDEX     3
+#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
+#define I40E_GLPRT_PRC255L_PRC255L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i)           (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511H_MAX_INDEX     3
+#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
+#define I40E_GLPRT_PRC511H_PRC511H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i)           (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC511L_MAX_INDEX     3
+#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
+#define I40E_GLPRT_PRC511L_PRC511L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i)          (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64H_MAX_INDEX    3
+#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
+#define I40E_GLPRT_PRC64H_PRC64H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i)          (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC64L_MAX_INDEX    3
+#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
+#define I40E_GLPRT_PRC64L_PRC64L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i)            (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522H_MAX_INDEX      3
+#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i)            (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PRC9522L_MAX_INDEX      3
+#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i)            (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023H_MAX_INDEX      3
+#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i)            (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1023L_MAX_INDEX      3
+#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i)           (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127H_MAX_INDEX     3
+#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
+#define I40E_GLPRT_PTC127H_PTC127H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i)           (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC127L_MAX_INDEX     3
+#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
+#define I40E_GLPRT_PTC127L_PTC127L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i)            (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522H_MAX_INDEX      3
+#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i)            (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC1522L_MAX_INDEX      3
+#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i)           (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255H_MAX_INDEX     3
+#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
+#define I40E_GLPRT_PTC255H_PTC255H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i)           (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC255L_MAX_INDEX     3
+#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
+#define I40E_GLPRT_PTC255L_PTC255L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i)           (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511H_MAX_INDEX     3
+#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
+#define I40E_GLPRT_PTC511H_PTC511H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i)           (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC511L_MAX_INDEX     3
+#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
+#define I40E_GLPRT_PTC511L_PTC511L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i)          (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64H_MAX_INDEX    3
+#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
+#define I40E_GLPRT_PTC64H_PTC64H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i)          (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC64L_MAX_INDEX    3
+#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
+#define I40E_GLPRT_PTC64L_PTC64L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i)            (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522H_MAX_INDEX      3
+#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i)            (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_PTC9522L_MAX_INDEX      3
+#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j)             (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFRXC_MAX_INDEX          3
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j)             (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXOFFTXC_MAX_INDEX          3
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j)            (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONRXC_MAX_INDEX         3
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j)          (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_PXONTXC_MAX_INDEX       3
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i)        (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RDPC_MAX_INDEX  3
+#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
+#define I40E_GLPRT_RDPC_RDPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i)       (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RFC_MAX_INDEX 3
+#define I40E_GLPRT_RFC_RFC_SHIFT 0
+#define I40E_GLPRT_RFC_RFC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i)       (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RJC_MAX_INDEX 3
+#define I40E_GLPRT_RJC_RJC_SHIFT 0
+#define I40E_GLPRT_RJC_RJC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i)        (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RLEC_MAX_INDEX  3
+#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
+#define I40E_GLPRT_RLEC_RLEC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i)       (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_ROC_MAX_INDEX 3
+#define I40E_GLPRT_ROC_ROC_SHIFT 0
+#define I40E_GLPRT_ROC_ROC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i)       (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUC_MAX_INDEX 3
+#define I40E_GLPRT_RUC_RUC_SHIFT 0
+#define I40E_GLPRT_RUC_RUC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i)        (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_RUPP_MAX_INDEX  3
+#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
+#define I40E_GLPRT_RUPP_RUPP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j)              (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
+#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX           3
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i)               (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_TDOLD_MAX_INDEX         3
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_UPRCH(_i)         (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCH_MAX_INDEX   3
+#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLPRT_UPRCH_UPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i)         (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPRCL_MAX_INDEX   3
+#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLPRT_UPRCL_UPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i)         (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCH_MAX_INDEX   3
+#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCH_UPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i)          (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GLPRT_UPTCL_MAX_INDEX    3
+#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i)         (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCH_MAX_INDEX   15
+#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLSW_BPRCH_BPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i)         (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPRCL_MAX_INDEX   15
+#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLSW_BPRCL_BPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i)         (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCH_MAX_INDEX   15
+#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLSW_BPTCH_BPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i)         (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_BPTCL_MAX_INDEX   15
+#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLSW_BPTCL_BPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i)         (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCH_MAX_INDEX   15
+#define I40E_GLSW_GORCH_GORCH_SHIFT 0
+#define I40E_GLSW_GORCH_GORCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i)         (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GORCL_MAX_INDEX   15
+#define I40E_GLSW_GORCL_GORCL_SHIFT 0
+#define I40E_GLSW_GORCL_GORCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i)         (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCH_MAX_INDEX   15
+#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLSW_GOTCH_GOTCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i)         (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_GOTCL_MAX_INDEX   15
+#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLSW_GOTCL_GOTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i)         (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCH_MAX_INDEX   15
+#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLSW_MPRCH_MPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i)         (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPRCL_MAX_INDEX   15
+#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLSW_MPRCL_MPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i)         (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCH_MAX_INDEX   15
+#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLSW_MPTCH_MPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i)         (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_MPTCL_MAX_INDEX   15
+#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLSW_MPTCL_MPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i)        (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_RUPP_MAX_INDEX  15
+#define I40E_GLSW_RUPP_RUPP_SHIFT 0
+#define I40E_GLSW_RUPP_RUPP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i)        (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_TDPC_MAX_INDEX  15
+#define I40E_GLSW_TDPC_TDPC_SHIFT 0
+#define I40E_GLSW_TDPC_TDPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i)         (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCH_MAX_INDEX   15
+#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLSW_UPRCH_UPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i)         (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPRCL_MAX_INDEX   15
+#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLSW_UPRCL_UPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i)         (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCH_MAX_INDEX   15
+#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
+#define I40E_GLSW_UPTCH_UPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i)         (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLSW_UPTCL_MAX_INDEX   15
+#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLSW_UPTCL_UPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i)         (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCH_MAX_INDEX   383
+#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
+#define I40E_GLV_BPRCH_BPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i)         (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPRCL_MAX_INDEX   383
+#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
+#define I40E_GLV_BPRCL_BPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i)         (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCH_MAX_INDEX   383
+#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
+#define I40E_GLV_BPTCH_BPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i)         (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_BPTCL_MAX_INDEX   383
+#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
+#define I40E_GLV_BPTCL_BPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i)         (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCH_MAX_INDEX   383
+#define I40E_GLV_GORCH_GORCH_SHIFT 0
+#define I40E_GLV_GORCH_GORCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i)         (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GORCL_MAX_INDEX   383
+#define I40E_GLV_GORCL_GORCL_SHIFT 0
+#define I40E_GLV_GORCL_GORCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i)         (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCH_MAX_INDEX   383
+#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
+#define I40E_GLV_GOTCH_GOTCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i)         (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_GOTCL_MAX_INDEX   383
+#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
+#define I40E_GLV_GOTCL_GOTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i)         (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCH_MAX_INDEX   383
+#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
+#define I40E_GLV_MPRCH_MPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i)         (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPRCL_MAX_INDEX   383
+#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
+#define I40E_GLV_MPRCL_MPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i)         (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCH_MAX_INDEX   383
+#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
+#define I40E_GLV_MPTCH_MPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i)         (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_MPTCL_MAX_INDEX   383
+#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
+#define I40E_GLV_MPTCL_MPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i)        (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RDPC_MAX_INDEX  383
+#define I40E_GLV_RDPC_RDPC_SHIFT 0
+#define I40E_GLV_RDPC_RDPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i)        (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_RUPP_MAX_INDEX  383
+#define I40E_GLV_RUPP_RUPP_SHIFT 0
+#define I40E_GLV_RUPP_RUPP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI)      (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC_MAX_INDEX  383
+#define I40E_GLV_TEPC_TEPC_SHIFT 0
+#define I40E_GLV_TEPC_TEPC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i)         (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCH_MAX_INDEX   383
+#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
+#define I40E_GLV_UPRCH_UPRCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i)         (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPRCL_MAX_INDEX   383
+#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
+#define I40E_GLV_UPRCL_UPRCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i)            (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCH_MAX_INDEX      383
+#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK  I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i)         (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_UPTCL_MAX_INDEX   383
+#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
+#define I40E_GLV_UPTCL_UPTCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j)      (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCH_MAX_INDEX   7
+#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j)      (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RBCL_MAX_INDEX   7
+#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j)      (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCH_MAX_INDEX   7
+#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j)      (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_RPCL_MAX_INDEX   7
+#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j)      (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCH_MAX_INDEX   7
+#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j)      (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TBCL_MAX_INDEX   7
+#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j)      (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCH_MAX_INDEX   7
+#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j)      (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
+#define I40E_GLVEBTC_TPCL_MAX_INDEX   7
+#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i)          (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCH_MAX_INDEX    127
+#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i)          (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_BPCL_MAX_INDEX    127
+#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i)         (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCH_MAX_INDEX   127
+#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i)         (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GORCL_MAX_INDEX   127
+#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i)         (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCH_MAX_INDEX   127
+#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i)         (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_GOTCL_MAX_INDEX   127
+#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i)          (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCH_MAX_INDEX    127
+#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i)          (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_MPCL_MAX_INDEX    127
+#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i)          (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCH_MAX_INDEX    127
+#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK  I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i)          (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_GLVEBVL_UPCL_MAX_INDEX    127
+#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H                 0x00269F4C /* Reset: CORER */
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK  I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i)              (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX        35
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i)                     (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX               1
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ               0x001E4280 /* Reset: GLOBR */
+#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK  I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_SIGN_SHIFT    31
+#define I40E_PRTTSYN_ADJ_SIGN_MASK     I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i)           (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_0_MAX_INDEX     1
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK  I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT  1
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK   I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT  3
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK   I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT  8
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK   I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK  I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i)               (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_AUX_1_MAX_INDEX         1
+#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT      0
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK       I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK  I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i)            (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_CLKO_MAX_INDEX      1
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0                       0x001E4200 /* Reset: GLOBR */
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK  I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT  1
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK   I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT   2
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK    I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT     3
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK      I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_SHIFT           8
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK            I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT         12
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK          I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT         31
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK          I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1                   0x00085020 /* Reset: CORER */
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK  I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK  I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK  I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK  I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT    24
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK     I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT     26
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK      I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT     31
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK      I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i)              (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_H_MAX_INDEX        1
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i)              (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_EVNT_L_MAX_INDEX        1
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H                 0x001E4060 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK  I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L                 0x001E4040 /* Reset: GLOBR */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i)            (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX      3
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i)            (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX      3
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0              0x001E4220 /* Reset: GLOBR */
+#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT   2
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK    I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT   3
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK    I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1            0x00085140 /* Reset: CORER */
+#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK  I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i)              (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_H_MAX_INDEX        1
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i)              (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
+#define I40E_PRTTSYN_TGT_L_MAX_INDEX        1
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H                  0x001E4120 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L                  0x001E4100 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H                0x001E41E0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L                0x001E41C0 /* Reset: GLOBR */
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GL_MDET_RX                0x0012A510 /* Reset: CORER */
+#define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
+#define I40E_GL_MDET_RX_FUNCTION_MASK  I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_SHIFT    8
+#define I40E_GL_MDET_RX_EVENT_MASK     I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_SHIFT    17
+#define I40E_GL_MDET_RX_QUEUE_MASK     I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_SHIFT    31
+#define I40E_GL_MDET_RX_VALID_MASK     I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX              0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT  0
+#define I40E_GL_MDET_TX_QUEUE_MASK   I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK  I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK  I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT  25
+#define I40E_GL_MDET_TX_EVENT_MASK   I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
+#define I40E_GL_MDET_TX_VALID_SHIFT  31
+#define I40E_GL_MDET_TX_VALID_MASK   I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX             0x0012A400 /* Reset: CORER */
+#define I40E_PF_MDET_RX_VALID_SHIFT 0
+#define I40E_PF_MDET_RX_VALID_MASK  I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX             0x000E6400 /* Reset: CORER */
+#define I40E_PF_MDET_TX_VALID_SHIFT 0
+#define I40E_PF_MDET_TX_VALID_MASK  I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC               0x001C0500 /* Reset: CORER */
+#define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK  I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT  8
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK   I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_SHIFT   31
+#define I40E_PF_VT_PFALLOC_VALID_MASK    I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF)        (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_RX_MAX_INDEX   127
+#define I40E_VP_MDET_RX_VALID_SHIFT 0
+#define I40E_VP_MDET_RX_VALID_MASK  I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF)        (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_VP_MDET_TX_MAX_INDEX   127
+#define I40E_VP_MDET_TX_VALID_SHIFT 0
+#define I40E_VP_MDET_TX_VALID_MASK  I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC                    0x0006C800 /* Reset: POR */
+#define I40E_GLPM_WUMC_NOTCO_SHIFT        0
+#define I40E_GLPM_WUMC_NOTCO_MASK         I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK  I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_SHIFT     2
+#define I40E_GLPM_WUMC_ROL_MODE_MASK      I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_SHIFT   3
+#define I40E_GLPM_WUMC_RESERVED_4_MASK    I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT    16
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK     I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM            0x000B8080 /* Reset: POR */
+#define I40E_PFPM_APM_APME_SHIFT 0
+#define I40E_PFPM_APM_APME_MASK  I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i)          (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
+#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX    7
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK  I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC                 0x0006B200 /* Reset: POR */
+#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
+#define I40E_PFPM_WUC_EN_APM_D0_MASK  I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC                 0x0006B400 /* Reset: POR */
+#define I40E_PFPM_WUFC_LNKC_SHIFT      0
+#define I40E_PFPM_WUFC_LNKC_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_MAG_SHIFT       1
+#define I40E_PFPM_WUFC_MAG_MASK        I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_SHIFT       3
+#define I40E_PFPM_WUFC_MNG_MASK        I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT  4
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT  5
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT  6
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT  7
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT  8
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT  9
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT  10
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT  11
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK   I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_SHIFT      16
+#define I40E_PFPM_WUFC_FLX0_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_SHIFT      17
+#define I40E_PFPM_WUFC_FLX1_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_SHIFT      18
+#define I40E_PFPM_WUFC_FLX2_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_SHIFT      19
+#define I40E_PFPM_WUFC_FLX3_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_SHIFT      20
+#define I40E_PFPM_WUFC_FLX4_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_SHIFT      21
+#define I40E_PFPM_WUFC_FLX5_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_SHIFT      22
+#define I40E_PFPM_WUFC_FLX6_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_SHIFT      23
+#define I40E_PFPM_WUFC_FLX7_MASK       I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK  I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS                  0x0006B600 /* Reset: POR */
+#define I40E_PFPM_WUS_LNKC_SHIFT       0
+#define I40E_PFPM_WUS_LNKC_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_MAG_SHIFT        1
+#define I40E_PFPM_WUS_MAG_MASK         I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
+#define I40E_PFPM_WUS_PME_STATUS_MASK  I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_MNG_SHIFT        3
+#define I40E_PFPM_WUS_MNG_MASK         I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_FLX0_SHIFT       16
+#define I40E_PFPM_WUS_FLX0_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX1_SHIFT       17
+#define I40E_PFPM_WUS_FLX1_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX2_SHIFT       18
+#define I40E_PFPM_WUS_FLX2_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX3_SHIFT       19
+#define I40E_PFPM_WUS_FLX3_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX4_SHIFT       20
+#define I40E_PFPM_WUS_FLX4_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX5_SHIFT       21
+#define I40E_PFPM_WUS_FLX5_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX6_SHIFT       22
+#define I40E_PFPM_WUS_FLX6_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX7_SHIFT       23
+#define I40E_PFPM_WUS_FLX7_MASK        I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FW_RST_WK_SHIFT  31
+#define I40E_PFPM_WUS_FW_RST_WK_MASK   I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR                 0x0006C000 /* Reset: POR */
+#define I40E_PRTPM_FHFHR_UNICAST_SHIFT   0
+#define I40E_PRTPM_FHFHR_UNICAST_MASK    I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK  I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i)             (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAH_MAX_INDEX       3
+#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT  0
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK   I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_SHIFT    26
+#define I40E_PRTPM_SAH_PF_NUM_MASK     I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK  I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_AV_SHIFT        31
+#define I40E_PRTPM_SAH_AV_MASK         I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i)            (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
+#define I40E_PRTPM_SAL_MAX_INDEX      3
+#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1              0x00006000 /* Reset: EMPR */
+#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
+#define I40E_VF_ARQBAH1_ARQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1              0x00006C00 /* Reset: EMPR */
+#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
+#define I40E_VF_ARQBAL1_ARQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1            0x00007400 /* Reset: EMPR */
+#define I40E_VF_ARQH1_ARQH_SHIFT 0
+#define I40E_VF_ARQH1_ARQH_MASK  I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1                 0x00008000 /* Reset: EMPR */
+#define I40E_VF_ARQLEN1_ARQLEN_SHIFT    0
+#define I40E_VF_ARQLEN1_ARQLEN_MASK     I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_SHIFT    28
+#define I40E_VF_ARQLEN1_ARQVFE_MASK     I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT   29
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK    I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT   30
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK    I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK  I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1            0x00007000 /* Reset: EMPR */
+#define I40E_VF_ARQT1_ARQT_SHIFT 0
+#define I40E_VF_ARQT1_ARQT_MASK  I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1              0x00007800 /* Reset: EMPR */
+#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
+#define I40E_VF_ATQBAH1_ATQBAH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1              0x00007C00 /* Reset: EMPR */
+#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
+#define I40E_VF_ATQBAL1_ATQBAL_MASK  I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1            0x00006400 /* Reset: EMPR */
+#define I40E_VF_ATQH1_ATQH_SHIFT 0
+#define I40E_VF_ATQH1_ATQH_MASK  I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1                 0x00006800 /* Reset: EMPR */
+#define I40E_VF_ATQLEN1_ATQLEN_SHIFT    0
+#define I40E_VF_ATQLEN1_ATQLEN_MASK     I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_SHIFT    28
+#define I40E_VF_ATQLEN1_ATQVFE_MASK     I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT   29
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK    I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT   30
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK    I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK  I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1            0x00008400 /* Reset: EMPR */
+#define I40E_VF_ATQT1_ATQT_SHIFT 0
+#define I40E_VF_ATQT1_ATQT_MASK  I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT                 0x00008800 /* Reset: VFR */
+#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK  I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01                       0x00005C00 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT          0
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK           I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT        1
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK         I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT      2
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT        3
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK         I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT        5
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT     25
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT      31
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF)               (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN1_MAX_INDEX             15
+#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT          0
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK           I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT        1
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK         I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT      2
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT        3
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK         I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT        5
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK         I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK  I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT     25
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK      I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT      31
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK       I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1                        0x00005000 /* Reset: CORER */
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT           30
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK            I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT             31
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK              I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01                        0x00004800 /* Reset: CORER */
+#define I40E_VFINT_ICR01_INTEVENT_SHIFT         0
+#define I40E_VFINT_ICR01_INTEVENT_MASK          I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_SHIFT          1
+#define I40E_VFINT_ICR01_QUEUE_0_MASK           I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_SHIFT          2
+#define I40E_VFINT_ICR01_QUEUE_1_MASK           I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_SHIFT          3
+#define I40E_VFINT_ICR01_QUEUE_2_MASK           I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_SHIFT          4
+#define I40E_VFINT_ICR01_QUEUE_3_MASK           I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK  I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_SHIFT           30
+#define I40E_VFINT_ICR01_ADMINQ_MASK            I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_SWINT_SHIFT            31
+#define I40E_VFINT_ICR01_SWINT_MASK             I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i)            (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
+#define I40E_VFINT_ITR01_MAX_INDEX      2
+#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITR01_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF)     (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
+#define I40E_VFINT_ITRN1_MAX_INDEX      2
+#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
+#define I40E_VFINT_ITRN1_INTERVAL_MASK  I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01                      0x00005400 /* Reset: CORER */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK  I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q)        (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_QRX_TAIL1_MAX_INDEX  15
+#define I40E_QRX_TAIL1_TAIL_SHIFT 0
+#define I40E_QRX_TAIL1_TAIL_MASK  I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q)        (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
+#define I40E_QTX_TAIL1_MAX_INDEX  15
+#define I40E_QTX_TAIL1_TAIL_SHIFT 0
+#define I40E_QTX_TAIL1_TAIL_MASK  I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA              0x00002000 /* Reset: VFLR */
+#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
+#define I40E_VFMSIX_PBA_PENBIT_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i)              (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TADD_MAX_INDEX        16
+#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK  I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT   2
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK    I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i)            (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TMSG_MAX_INDEX      16
+#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i)             (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TUADD_MAX_INDEX       16
+#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i)        (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
+#define I40E_VFMSIX_TVCTRL_MAX_INDEX  16
+#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
+#define I40E_VFMSIX_TVCTRL_MASK_MASK  I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA                  0x0000DC00 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK  I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT     4
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK      I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT      8
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK       I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO                     0x0000D800 /* Reset: VFR */
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT   0
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK    I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT    4
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK     I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i)             (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_VFQF_HENA_MAX_INDEX       1
+#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i)         (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_VFQF_HKEY_MAX_INDEX   12
+#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VFQF_HKEY_KEY_0_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VFQF_HKEY_KEY_1_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VFQF_HKEY_KEY_2_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VFQF_HKEY_KEY_3_MASK  I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i)        (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_VFQF_HLUT_MAX_INDEX  15
+#define I40E_VFQF_HLUT_LUT0_SHIFT 0
+#define I40E_VFQF_HLUT_LUT0_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_SHIFT 8
+#define I40E_VFQF_HLUT_LUT1_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_SHIFT 16
+#define I40E_VFQF_HLUT_LUT2_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT3_SHIFT 24
+#define I40E_VFQF_HLUT_LUT3_MASK  I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i)                  (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_VFQF_HREGION_MAX_INDEX            7
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_SHIFT       1
+#define I40E_VFQF_HREGION_REGION_0_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_SHIFT       5
+#define I40E_VFQF_HREGION_REGION_1_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_SHIFT       9
+#define I40E_VFQF_HREGION_REGION_2_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_SHIFT       13
+#define I40E_VFQF_HREGION_REGION_3_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_SHIFT       17
+#define I40E_VFQF_HREGION_REGION_4_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_SHIFT       21
+#define I40E_VFQF_HREGION_REGION_5_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_SHIFT       25
+#define I40E_VFQF_HREGION_REGION_6_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK  I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_SHIFT       29
+#define I40E_VFQF_HREGION_REGION_7_MASK        I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_MNGSB_FDCRC               0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK  I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS                   0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT    2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK     I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK  I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT  4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK   I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT    8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK     I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS                0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK  I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT  16
+#define I40E_MNGSB_FDS_LAST_BC_MASK   I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF)           (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX      127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK  I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF)           (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX      127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK  I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE                 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT      12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK       I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT      16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK       I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE                 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT      12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK       I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT      16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK       I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA                  0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK  I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT     4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK      I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT      8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK       I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO                     0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT   0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK    I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT    4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK     I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK  I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i)        (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX  7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK  I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK  I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE                 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT      8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK       I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT      20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK       I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i)                   (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX             15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i)             (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX       15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK  I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK  I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX                     0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK  I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i)              (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX        15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK  I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK  I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX                     0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK  I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i)              (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX        15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK  I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK  I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i)                (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX          15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i)               (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX         15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX                  0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK  I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ                    0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK  I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i)               (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX         15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i)              (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX        15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ                   0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i)              (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX        15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i)                (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX          15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ                    0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX                 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK  I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i)               (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX         15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i)             (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX       15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX                 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK  I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ                   0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i)                (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX          15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i)               (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX         15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX                  0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD                         0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK  I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i)               (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX         15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i)              (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX        15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i)                 (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX           15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX                   0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK  I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX                 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK  I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ                   0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i)               (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX         15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i)              (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX        15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ                   0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i)                (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX          15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i)               (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX         15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX                  0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK  I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ                    0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i)                  (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX            15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i)                 (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX           15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX                    0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ                      0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i)               (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX         15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i)              (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX        15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i)                 (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX           15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX                   0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK  I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX                 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK  I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ                   0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK  I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i)            (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX      15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK  I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK  I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i)                   (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX             31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i)             (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX       31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK  I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK  I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i)              (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX        31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK  I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK  I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i)              (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX        31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK  I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK  I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i)                (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX          31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i)               (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX         31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i)               (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX         31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT     0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK      I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK  I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT     16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK      I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i)                (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX          31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i)               (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX         31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i)               (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX         31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i)              (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX        31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i)              (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX        31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i)                (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX          31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i)               (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX         31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i)             (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX       31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i)                (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX          31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i)               (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX         31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i)               (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX         31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i)              (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX        31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i)                 (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX           31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i)               (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX         31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i)              (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX        31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i)                (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX          31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i)               (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX         31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i)                  (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX            31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i)                 (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX           31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i)               (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX         31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i)              (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX        31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK  I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i)                 (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX           31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK  I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i)            (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX      31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK  I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK  I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE                 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT      8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK       I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT      20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK       I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE                 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT      8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK       I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT      20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK       I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE                 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK  I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT      8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK       I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT      20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK       I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK  I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK  I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE                     0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE                     0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE                     0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG                                 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT     0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK      I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT       1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK        I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT             2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK              I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT  3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK   I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT             4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK              I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK  I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK  I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT   7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK    I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK  I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT       9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK        I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT    10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK     I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT    11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK     I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT    12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK     I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT           13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK            I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT       14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK        I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE                        0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC                   0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT   0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK    I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT      2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK       I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK  I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT       30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT       30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT       30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT       30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF)               (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX          127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT     0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK      I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT       11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK        I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK  I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER                         0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK  I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ                        0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT              0
+#define I40E_GLNVM_AL_REQ_POR_MASK               I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT        1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK         I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT            2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK             I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT            3
+#define I40E_GLNVM_AL_REQ_CORER_MASK             I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT               4
+#define I40E_GLNVM_AL_REQ_PE_MASK                I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK  I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS                   0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK  I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK  I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA              0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK  I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD                    0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT   0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK  I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT   3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT   4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT     5
+#define I40E_GLNVM_ULD_POR_DONE_MASK      I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT   8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK    I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK  I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT      10
+#define I40E_GLNVM_ULD_PE_DONE_MASK       I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT                      0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT   0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK  I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT     2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK      I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT   3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK    I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK  I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT    5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK     I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT     6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK      I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT     7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK      I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT    8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK     I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK  I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT     10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK      I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT                           0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK  I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT  1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT   2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT  3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT  4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT  5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT  6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT  7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT   8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT   9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT   10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT  11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT   12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT   13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT   14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT  15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK   I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT   16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK    I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD            0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT                0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL                  0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT    0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK     I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT    8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK     I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT   26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK    I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK  I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT     30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK      I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT       31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK        I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA            0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0                   0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK  I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT      8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK       I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT      16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK       I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT         24
+#define I40E_MNGSB_RHDR0_TAG_MASK          I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT    27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK     I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT          31
+#define I40E_MNGSB_RHDR0_EH_MASK           I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL                      0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK  I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT       26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK        I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT    30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK     I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT        31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK         I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA            0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0                  0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT   0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK    I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT   12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK    I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK  I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT        24
+#define I40E_MNGSB_WHDR0_TAG_MASK         I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1            0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2              0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK  I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT       21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK        I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON                  0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON                  0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD                  0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD                  0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD                  0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD                  0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD                  0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD                  0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD                  0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD                  0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD                  0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD                  0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD                  0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD                  0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD                  0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD                  0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON                  0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT        16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT    4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK     I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK  I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG                    0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT    0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK     I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT     1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK      I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT   2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK    I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT    6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK     I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK  I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON                  0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD                  0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD                  0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD                  0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD                  0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD                  0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD                  0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD                  0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK  I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT        16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK         I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0                    0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1                    0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2                    0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0                   0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT  0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK   I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK  I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK  I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM                     0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK  I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i)               (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX         15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i)               (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX         15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i)              (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX        15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL                0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK  I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL                0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK  I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL                   0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT    0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK     I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK  I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK  I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK  I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK  I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT    30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK     I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT   31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK    I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL                0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK  I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL                 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK  I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN                 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK  I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i)               (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX         31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i)               (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX         31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i)              (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX        31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i)                  (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX            31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK  I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT   8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK    I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i)               (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX         31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i)               (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX         31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK  I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i)                    (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX              31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT  0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT  1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT  2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT  3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK  I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i)         (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX   31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT   0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK    I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK  I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC               0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH                  0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW                 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS                   0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT   0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK    I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK  I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK  I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT    31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK     I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK              0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM              0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB              0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK  I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES                      0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL                  0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT     0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK      I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK  I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR                   0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK  I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR                   0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK  I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0                        0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT           0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK            I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK  I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK                       0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK  I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR                        0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK  I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER               0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL                        0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT  0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT  1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT  2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT  3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK   I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK  I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN             0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT   0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK    I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK  I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC                      0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT         0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK          I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK  I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC              0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK  I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i)        (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX  7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK  I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM                 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT       0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK        I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT      13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK       I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK  I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03                0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK  I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK  I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK  I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK  I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR             0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL                 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT      13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK       I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK  I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL              0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC                 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC                 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT   0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK    I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK  I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47                0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03                0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47                0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES                     0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT      8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK       I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK  I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF)          (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX     127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF)             (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX        127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF)            (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX       127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF)              (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX         127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT   0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK    I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK  I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK  I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT    31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK     I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF)         (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX    127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF)         (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX    127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF)         (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX    127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK  I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF)                 (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX            127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF)             (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX        127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT     0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK      I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK  I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF)                   (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX              127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT           0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK            I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK  I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF)                  (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX             127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK  I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF)                   (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX              127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK  I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF)          (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX     127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF)                 (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX            127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT         0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK          I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK  I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i)                (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i)                (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i)                (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i)                 (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i)                 (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i)                 (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i)                 (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i)               (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i)               (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i)               (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i)               (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i)              (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX        15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i)                (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i)                (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i)                 (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i)                 (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i)                 (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i)                 (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i)                (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i)               (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i)               (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i)               (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i)               (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i)                (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i)                (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i)                (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i)                 (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i)                 (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i)                 (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i)                 (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i)               (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i)               (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i)               (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i)               (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i)              (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX        15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i)                (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i)                (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i)                 (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i)                 (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i)                 (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i)                 (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i)                (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i)               (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i)               (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i)               (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i)               (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i)               (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i)               (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i)                (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX          15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i)                (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX          15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i)               (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i)               (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i)               (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i)               (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i)                (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX          15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i)                (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX          15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i)               (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i)               (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i)              (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i)              (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i)              (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i)              (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i)             (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX       15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i)             (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX       15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i)               (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i)                 (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX           15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i)               (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i)               (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i)              (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX        15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i)              (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX        15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i)               (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i)               (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i)               (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i)               (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI                         0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO                         0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI                      0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO                      0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK                     0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN                     0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI                       0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO                       0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI                      0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO                      0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI                       0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO                      0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI                        0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO                        0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI                      0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO                      0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI                          0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO                          0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI                        0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO                        0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI                    0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO                    0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i)                (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i)                (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i)                (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i)                 (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i)                 (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i)                 (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i)                 (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i)               (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i)               (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i)               (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i)               (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i)              (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX        31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i)                (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i)                (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i)                 (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i)                 (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i)                 (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i)                 (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i)                (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i)               (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i)               (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i)               (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i)               (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i)                (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i)                (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i)                (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i)                 (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i)                 (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i)                 (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i)                 (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i)               (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i)               (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i)               (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i)               (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i)              (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX        31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i)                (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i)                (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i)                 (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i)                 (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i)                 (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i)                 (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i)                (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i)               (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i)               (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i)               (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i)               (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i)               (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i)               (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i)                (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX          31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i)                (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX          31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i)               (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i)               (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i)               (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i)               (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i)                (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX          31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i)                (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX          31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i)               (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i)               (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i)              (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i)              (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i)              (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i)              (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i)             (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX       31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i)             (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX       31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i)               (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i)                 (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX           31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK  I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i)               (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i)               (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i)              (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX        31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i)              (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX        31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i)               (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i)               (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i)               (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK  I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i)               (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO                     0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK  I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i)         (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX   2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i)             (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX       63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK  I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i)                   (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX             1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK  I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG                0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK  I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK  I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2               0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK  I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK  I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i)        (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX  127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK  I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK  I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK  I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK  I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i)                  (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX            7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT       1
+#define I40E_PFQF_HREGION_REGION_0_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT       5
+#define I40E_PFQF_HREGION_REGION_1_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT       9
+#define I40E_PFQF_HREGION_REGION_2_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT       13
+#define I40E_PFQF_HREGION_REGION_3_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT       17
+#define I40E_PFQF_HREGION_REGION_4_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT       21
+#define I40E_PFQF_HREGION_REGION_5_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT       25
+#define I40E_PFQF_HREGION_REGION_6_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK  I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT       29
+#define I40E_PFQF_HREGION_REGION_7_MASK        I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK  I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI)    (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX   12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK  I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK  I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK  I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK  I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI)   (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX  15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK  I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK  I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK  I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK  I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR                        0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK  I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT                  0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK  I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT       30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT       30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK        I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1               0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1                  0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1                 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1                   0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT   0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK    I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK  I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK  I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT    31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK     I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1              0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1              0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK  I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1              0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK  I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1                      0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK  I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1                  0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT     0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK      I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK  I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01                        0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT           0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK            I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK  I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1                       0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK  I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1                        0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK  I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1               0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK  I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1                      0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT         0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK          I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK  I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
+#endif /* X722_SUPPORT */
+#endif /* _I40E_REGISTER_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_register.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_status.h
===================================================================
--- trunk/sys/dev/ixl/i40e_status.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_status.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,109 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_status.h 292099 2015-12-11 13:05:18Z smh $*/
+
+#ifndef _I40E_STATUS_H_
+#define _I40E_STATUS_H_
+
+/* Error Codes */
+enum i40e_status_code {
+	I40E_SUCCESS				= 0,
+	I40E_ERR_NVM				= -1,
+	I40E_ERR_NVM_CHECKSUM			= -2,
+	I40E_ERR_PHY				= -3,
+	I40E_ERR_CONFIG				= -4,
+	I40E_ERR_PARAM				= -5,
+	I40E_ERR_MAC_TYPE			= -6,
+	I40E_ERR_UNKNOWN_PHY			= -7,
+	I40E_ERR_LINK_SETUP			= -8,
+	I40E_ERR_ADAPTER_STOPPED		= -9,
+	I40E_ERR_INVALID_MAC_ADDR		= -10,
+	I40E_ERR_DEVICE_NOT_SUPPORTED		= -11,
+	I40E_ERR_MASTER_REQUESTS_PENDING	= -12,
+	I40E_ERR_INVALID_LINK_SETTINGS		= -13,
+	I40E_ERR_AUTONEG_NOT_COMPLETE		= -14,
+	I40E_ERR_RESET_FAILED			= -15,
+	I40E_ERR_SWFW_SYNC			= -16,
+	I40E_ERR_NO_AVAILABLE_VSI		= -17,
+	I40E_ERR_NO_MEMORY			= -18,
+	I40E_ERR_BAD_PTR			= -19,
+	I40E_ERR_RING_FULL			= -20,
+	I40E_ERR_INVALID_PD_ID			= -21,
+	I40E_ERR_INVALID_QP_ID			= -22,
+	I40E_ERR_INVALID_CQ_ID			= -23,
+	I40E_ERR_INVALID_CEQ_ID			= -24,
+	I40E_ERR_INVALID_AEQ_ID			= -25,
+	I40E_ERR_INVALID_SIZE			= -26,
+	I40E_ERR_INVALID_ARP_INDEX		= -27,
+	I40E_ERR_INVALID_FPM_FUNC_ID		= -28,
+	I40E_ERR_QP_INVALID_MSG_SIZE		= -29,
+	I40E_ERR_QP_TOOMANY_WRS_POSTED		= -30,
+	I40E_ERR_INVALID_FRAG_COUNT		= -31,
+	I40E_ERR_QUEUE_EMPTY			= -32,
+	I40E_ERR_INVALID_ALIGNMENT		= -33,
+	I40E_ERR_FLUSHED_QUEUE			= -34,
+	I40E_ERR_INVALID_PUSH_PAGE_INDEX	= -35,
+	I40E_ERR_INVALID_IMM_DATA_SIZE		= -36,
+	I40E_ERR_TIMEOUT			= -37,
+	I40E_ERR_OPCODE_MISMATCH		= -38,
+	I40E_ERR_CQP_COMPL_ERROR		= -39,
+	I40E_ERR_INVALID_VF_ID			= -40,
+	I40E_ERR_INVALID_HMCFN_ID		= -41,
+	I40E_ERR_BACKING_PAGE_ERROR		= -42,
+	I40E_ERR_NO_PBLCHUNKS_AVAILABLE		= -43,
+	I40E_ERR_INVALID_PBLE_INDEX		= -44,
+	I40E_ERR_INVALID_SD_INDEX		= -45,
+	I40E_ERR_INVALID_PAGE_DESC_INDEX	= -46,
+	I40E_ERR_INVALID_SD_TYPE		= -47,
+	I40E_ERR_MEMCPY_FAILED			= -48,
+	I40E_ERR_INVALID_HMC_OBJ_INDEX		= -49,
+	I40E_ERR_INVALID_HMC_OBJ_COUNT		= -50,
+	I40E_ERR_INVALID_SRQ_ARM_LIMIT		= -51,
+	I40E_ERR_SRQ_ENABLED			= -52,
+	I40E_ERR_ADMIN_QUEUE_ERROR		= -53,
+	I40E_ERR_ADMIN_QUEUE_TIMEOUT		= -54,
+	I40E_ERR_BUF_TOO_SHORT			= -55,
+	I40E_ERR_ADMIN_QUEUE_FULL		= -56,
+	I40E_ERR_ADMIN_QUEUE_NO_WORK		= -57,
+	I40E_ERR_BAD_IWARP_CQE			= -58,
+	I40E_ERR_NVM_BLANK_MODE			= -59,
+	I40E_ERR_NOT_IMPLEMENTED		= -60,
+	I40E_ERR_PE_DOORBELL_NOT_ENABLED	= -61,
+	I40E_ERR_DIAG_TEST_FAILED		= -62,
+	I40E_ERR_NOT_READY			= -63,
+	I40E_NOT_SUPPORTED			= -64,
+	I40E_ERR_FIRMWARE_API_VERSION		= -65,
+};
+
+#endif /* _I40E_STATUS_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_status.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_type.h
===================================================================
--- trunk/sys/dev/ixl/i40e_type.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_type.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,1573 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_type.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_TYPE_H_
+#define _I40E_TYPE_H_
+
+#include "i40e_status.h"
+#include "i40e_osdep.h"
+#include "i40e_register.h"
+#include "i40e_adminq.h"
+#include "i40e_hmc.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_devids.h"
+
+#define UNREFERENCED_XPARAMETER
+
+#define BIT(a) (1UL << (a))
+#define BIT_ULL(a) (1ULL << (a))
+
+#ifndef I40E_MASK
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+#endif
+
+#define I40E_MAX_PF			16
+#define I40E_MAX_PF_VSI			64
+#define I40E_MAX_PF_QP			128
+#define I40E_MAX_VSI_QP			16
+#define I40E_MAX_VF_VSI			3
+#define I40E_MAX_CHAINED_RX_BUFFERS	5
+#define I40E_MAX_PF_UDP_OFFLOAD_PORTS	16
+
+/* something less than 1 minute */
+#define I40E_HEARTBEAT_TIMEOUT		(HZ * 50)
+
+/* Max default timeout in ms, */
+#define I40E_MAX_NVM_TIMEOUT		18000
+
+/* Check whether address is multicast. */
+#define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define I40E_IS_BROADCAST(address)	\
+	((((u8 *)(address))[0] == ((u8)0xff)) && \
+	(((u8 *)(address))[1] == ((u8)0xff)))
+
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time)		((time) * 1000)
+
+/* forward declaration */
+struct i40e_hw;
+typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
+
+#define I40E_ETH_LENGTH_OF_ADDRESS	6
+/* Data type manipulation macros. */
+#define I40E_HI_DWORD(x)	((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define I40E_LO_DWORD(x)	((u32)((x) & 0xFFFFFFFF))
+
+#define I40E_HI_WORD(x)		((u16)(((x) >> 16) & 0xFFFF))
+#define I40E_LO_WORD(x)		((u16)((x) & 0xFFFF))
+
+#define I40E_HI_BYTE(x)		((u8)(((x) >> 8) & 0xFF))
+#define I40E_LO_BYTE(x)		((u8)((x) & 0xFF))
+
+/* Number of Transmit Descriptors must be a multiple of 8. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE	8
+/* Number of Receive Descriptors must be a multiple of 32 if
+ * the number of descriptors is greater than 32.
+ */
+#define I40E_REQ_RX_DESCRIPTOR_MULTIPLE	32
+
+#define I40E_DESC_UNUSED(R)	\
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+/* bitfields for Tx queue mapping in QTX_CTL */
+#define I40E_QTX_CTL_VF_QUEUE	0x0
+#define I40E_QTX_CTL_VM_QUEUE	0x1
+#define I40E_QTX_CTL_PF_QUEUE	0x2
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+	I40E_DEBUG_INIT			= 0x00000001,
+	I40E_DEBUG_RELEASE		= 0x00000002,
+
+	I40E_DEBUG_LINK			= 0x00000010,
+	I40E_DEBUG_PHY			= 0x00000020,
+	I40E_DEBUG_HMC			= 0x00000040,
+	I40E_DEBUG_NVM			= 0x00000080,
+	I40E_DEBUG_LAN			= 0x00000100,
+	I40E_DEBUG_FLOW			= 0x00000200,
+	I40E_DEBUG_DCB			= 0x00000400,
+	I40E_DEBUG_DIAG			= 0x00000800,
+	I40E_DEBUG_FD			= 0x00001000,
+
+	I40E_DEBUG_AQ_MESSAGE		= 0x01000000,
+	I40E_DEBUG_AQ_DESCRIPTOR	= 0x02000000,
+	I40E_DEBUG_AQ_DESC_BUFFER	= 0x04000000,
+	I40E_DEBUG_AQ_COMMAND		= 0x06000000,
+	I40E_DEBUG_AQ			= 0x0F000000,
+
+	I40E_DEBUG_USER			= 0xF0000000,
+
+	I40E_DEBUG_ALL			= 0xFFFFFFFF
+};
+
+/* PCI Bus Info */
+#define I40E_PCI_LINK_STATUS		0xB2
+#define I40E_PCI_LINK_WIDTH		0x3F0
+#define I40E_PCI_LINK_WIDTH_1		0x10
+#define I40E_PCI_LINK_WIDTH_2		0x20
+#define I40E_PCI_LINK_WIDTH_4		0x40
+#define I40E_PCI_LINK_WIDTH_8		0x80
+#define I40E_PCI_LINK_SPEED		0xF
+#define I40E_PCI_LINK_SPEED_2500	0x1
+#define I40E_PCI_LINK_SPEED_5000	0x2
+#define I40E_PCI_LINK_SPEED_8000	0x3
+
+/* Memory types */
+enum i40e_memset_type {
+	I40E_NONDMA_MEM = 0,
+	I40E_DMA_MEM
+};
+
+/* Memcpy types */
+enum i40e_memcpy_type {
+	I40E_NONDMA_TO_NONDMA = 0,
+	I40E_NONDMA_TO_DMA,
+	I40E_DMA_TO_DMA,
+	I40E_DMA_TO_NONDMA
+};
+
+/* These are structs for managing the hardware information and the operations.
+ * The structures of function pointers are filled out at init time when we
+ * know for sure exactly which hardware we're working with.  This gives us the
+ * flexibility of using the same main driver code but adapting to slightly
+ * different hardware needs as new parts are developed.  For this architecture,
+ * the Firmware and AdminQ are intended to insulate the driver from most of the
+ * future changes, but these structures will also do part of the job.
+ */
+enum i40e_mac_type {
+	I40E_MAC_UNKNOWN = 0,
+	I40E_MAC_X710,
+	I40E_MAC_XL710,
+	I40E_MAC_VF,
+#ifdef X722_SUPPORT
+	I40E_MAC_X722,
+	I40E_MAC_X722_VF,
+#endif
+	I40E_MAC_GENERIC,
+};
+
+enum i40e_media_type {
+	I40E_MEDIA_TYPE_UNKNOWN = 0,
+	I40E_MEDIA_TYPE_FIBER,
+	I40E_MEDIA_TYPE_BASET,
+	I40E_MEDIA_TYPE_BACKPLANE,
+	I40E_MEDIA_TYPE_CX4,
+	I40E_MEDIA_TYPE_DA,
+	I40E_MEDIA_TYPE_VIRTUAL
+};
+
+enum i40e_fc_mode {
+	I40E_FC_NONE = 0,
+	I40E_FC_RX_PAUSE,
+	I40E_FC_TX_PAUSE,
+	I40E_FC_FULL,
+	I40E_FC_PFC,
+	I40E_FC_DEFAULT
+};
+
+enum i40e_set_fc_aq_failures {
+	I40E_SET_FC_AQ_FAIL_NONE = 0,
+	I40E_SET_FC_AQ_FAIL_GET = 1,
+	I40E_SET_FC_AQ_FAIL_SET = 2,
+	I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+	I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
+enum i40e_vsi_type {
+	I40E_VSI_MAIN	= 0,
+	I40E_VSI_VMDQ1	= 1,
+	I40E_VSI_VMDQ2	= 2,
+	I40E_VSI_CTRL	= 3,
+	I40E_VSI_FCOE	= 4,
+	I40E_VSI_MIRROR	= 5,
+	I40E_VSI_SRIOV	= 6,
+	I40E_VSI_FDIR	= 7,
+	I40E_VSI_TYPE_UNKNOWN
+};
+
+enum i40e_queue_type {
+	I40E_QUEUE_TYPE_RX = 0,
+	I40E_QUEUE_TYPE_TX,
+	I40E_QUEUE_TYPE_PE_CEQ,
+	I40E_QUEUE_TYPE_UNKNOWN
+};
+
+struct i40e_link_status {
+	enum i40e_aq_phy_type phy_type;
+	enum i40e_aq_link_speed link_speed;
+	u8 link_info;
+	u8 an_info;
+	u8 ext_info;
+	u8 loopback;
+	/* is Link Status Event notification to SW enabled */
+	bool lse_enable;
+	u16 max_frame_size;
+	bool crc_enable;
+	u8 pacing;
+	u8 requested_speeds;
+	u8 module_type[3];
+	/* 1st byte: module identifier */
+#define I40E_MODULE_TYPE_SFP		0x03
+#define I40E_MODULE_TYPE_QSFP		0x0D
+	/* 2nd byte: ethernet compliance codes for 10/40G */
+#define I40E_MODULE_TYPE_40G_ACTIVE	0x01
+#define I40E_MODULE_TYPE_40G_LR4	0x02
+#define I40E_MODULE_TYPE_40G_SR4	0x04
+#define I40E_MODULE_TYPE_40G_CR4	0x08
+#define I40E_MODULE_TYPE_10G_BASE_SR	0x10
+#define I40E_MODULE_TYPE_10G_BASE_LR	0x20
+#define I40E_MODULE_TYPE_10G_BASE_LRM	0x40
+#define I40E_MODULE_TYPE_10G_BASE_ER	0x80
+	/* 3rd byte: ethernet compliance codes for 1G */
+#define I40E_MODULE_TYPE_1000BASE_SX	0x01
+#define I40E_MODULE_TYPE_1000BASE_LX	0x02
+#define I40E_MODULE_TYPE_1000BASE_CX	0x04
+#define I40E_MODULE_TYPE_1000BASE_T	0x08
+};
+
+enum i40e_aq_capabilities_phy_type {
+	I40E_CAP_PHY_TYPE_SGMII			= BIT(I40E_PHY_TYPE_SGMII),
+	I40E_CAP_PHY_TYPE_1000BASE_KX		= BIT(I40E_PHY_TYPE_1000BASE_KX),
+	I40E_CAP_PHY_TYPE_10GBASE_KX4		= BIT(I40E_PHY_TYPE_10GBASE_KX4),
+	I40E_CAP_PHY_TYPE_10GBASE_KR		= BIT(I40E_PHY_TYPE_10GBASE_KR),
+	I40E_CAP_PHY_TYPE_40GBASE_KR4		= BIT(I40E_PHY_TYPE_40GBASE_KR4),
+	I40E_CAP_PHY_TYPE_XAUI			= BIT(I40E_PHY_TYPE_XAUI),
+	I40E_CAP_PHY_TYPE_XFI			= BIT(I40E_PHY_TYPE_XFI),
+	I40E_CAP_PHY_TYPE_SFI			= BIT(I40E_PHY_TYPE_SFI),
+	I40E_CAP_PHY_TYPE_XLAUI			= BIT(I40E_PHY_TYPE_XLAUI),
+	I40E_CAP_PHY_TYPE_XLPPI			= BIT(I40E_PHY_TYPE_XLPPI),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4_CU	= BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1_CU	= BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_AOC		= BIT(I40E_PHY_TYPE_10GBASE_AOC),
+	I40E_CAP_PHY_TYPE_40GBASE_AOC		= BIT(I40E_PHY_TYPE_40GBASE_AOC),
+	I40E_CAP_PHY_TYPE_100BASE_TX		= BIT(I40E_PHY_TYPE_100BASE_TX),
+	I40E_CAP_PHY_TYPE_1000BASE_T		= BIT(I40E_PHY_TYPE_1000BASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_T		= BIT(I40E_PHY_TYPE_10GBASE_T),
+	I40E_CAP_PHY_TYPE_10GBASE_SR		= BIT(I40E_PHY_TYPE_10GBASE_SR),
+	I40E_CAP_PHY_TYPE_10GBASE_LR		= BIT(I40E_PHY_TYPE_10GBASE_LR),
+	I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU	= BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
+	I40E_CAP_PHY_TYPE_10GBASE_CR1		= BIT(I40E_PHY_TYPE_10GBASE_CR1),
+	I40E_CAP_PHY_TYPE_40GBASE_CR4		= BIT(I40E_PHY_TYPE_40GBASE_CR4),
+	I40E_CAP_PHY_TYPE_40GBASE_SR4		= BIT(I40E_PHY_TYPE_40GBASE_SR4),
+	I40E_CAP_PHY_TYPE_40GBASE_LR4		= BIT(I40E_PHY_TYPE_40GBASE_LR4),
+	I40E_CAP_PHY_TYPE_1000BASE_SX		= BIT(I40E_PHY_TYPE_1000BASE_SX),
+	I40E_CAP_PHY_TYPE_1000BASE_LX		= BIT(I40E_PHY_TYPE_1000BASE_LX),
+	I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL	= BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
+	I40E_CAP_PHY_TYPE_20GBASE_KR2		= BIT(I40E_PHY_TYPE_20GBASE_KR2)
+};
+
+struct i40e_phy_info {
+	struct i40e_link_status link_info;
+	struct i40e_link_status link_info_old;
+	bool get_link_info;
+	enum i40e_media_type media_type;
+	/* all the phy types the NVM is capable of */
+	enum i40e_aq_capabilities_phy_type phy_types;
+};
+
+#define I40E_HW_CAP_MAX_GPIO			30
+#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO		0
+#define I40E_HW_CAP_MDIO_PORT_MODE_I2C		1
+
+/* Capabilities of a PF or a VF or the whole device */
+struct i40e_hw_capabilities {
+	u32  switch_mode;
+#define I40E_NVM_IMAGE_TYPE_EVB		0x0
+#define I40E_NVM_IMAGE_TYPE_CLOUD	0x2
+#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD	0x3
+
+	u32  management_mode;
+	u32  npar_enable;
+	u32  os2bmc;
+	u32  valid_functions;
+	bool sr_iov_1_1;
+	bool vmdq;
+	bool evb_802_1_qbg; /* Edge Virtual Bridging */
+	bool evb_802_1_qbh; /* Bridge Port Extension */
+	bool dcb;
+	bool fcoe;
+	bool iscsi; /* Indicates iSCSI enabled */
+	bool flex10_enable;
+	bool flex10_capable;
+	u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN	0x0
+#define I40E_FLEX10_MODE_DCC		0x1
+#define I40E_FLEX10_MODE_DCI		0x2
+
+	u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR	0x1
+#define I40E_FLEX10_STATUS_VC_MODE	0x2
+
+	bool mgmt_cem;
+	bool ieee_1588;
+	bool iwarp;
+	bool fd;
+	u32 fd_filters_guaranteed;
+	u32 fd_filters_best_effort;
+	bool rss;
+	u32 rss_table_size;
+	u32 rss_table_entry_width;
+	bool led[I40E_HW_CAP_MAX_GPIO];
+	bool sdp[I40E_HW_CAP_MAX_GPIO];
+	u32 nvm_image_type;
+	u32 num_flow_director_filters;
+	u32 num_vfs;
+	u32 vf_base_id;
+	u32 num_vsis;
+	u32 num_rx_qp;
+	u32 num_tx_qp;
+	u32 base_queue;
+	u32 num_msix_vectors;
+	u32 num_msix_vectors_vf;
+	u32 led_pin_num;
+	u32 sdp_pin_num;
+	u32 mdio_port_num;
+	u32 mdio_port_mode;
+	u8 rx_buf_chain_len;
+	u32 enabled_tcmap;
+	u32 maxtc;
+	u64 wr_csr_prot;
+};
+
+struct i40e_mac_info {
+	enum i40e_mac_type type;
+	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 perm_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 san_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 port_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u16 max_fcoeq;
+};
+
+enum i40e_aq_resources_ids {
+	I40E_NVM_RESOURCE_ID = 1
+};
+
+enum i40e_aq_resource_access_type {
+	I40E_RESOURCE_READ = 1,
+	I40E_RESOURCE_WRITE
+};
+
+struct i40e_nvm_info {
+	u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */
+	u32 timeout;              /* [ms] */
+	u16 sr_size;              /* Shadow RAM size in words */
+	bool blank_nvm_mode;      /* is NVM empty (no FW present)*/
+	u16 version;              /* NVM package version */
+	u32 eetrack;              /* NVM data version */
+	u32 oem_ver;              /* OEM version info */
+};
+
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+	I40E_NVMUPD_INVALID,
+	I40E_NVMUPD_READ_CON,
+	I40E_NVMUPD_READ_SNT,
+	I40E_NVMUPD_READ_LCB,
+	I40E_NVMUPD_READ_SA,
+	I40E_NVMUPD_WRITE_ERA,
+	I40E_NVMUPD_WRITE_CON,
+	I40E_NVMUPD_WRITE_SNT,
+	I40E_NVMUPD_WRITE_LCB,
+	I40E_NVMUPD_WRITE_SA,
+	I40E_NVMUPD_CSUM_CON,
+	I40E_NVMUPD_CSUM_SA,
+	I40E_NVMUPD_CSUM_LCB,
+	I40E_NVMUPD_STATUS,
+	I40E_NVMUPD_EXEC_AQ,
+	I40E_NVMUPD_GET_AQ_RESULT,
+};
+
+enum i40e_nvmupd_state {
+	I40E_NVMUPD_STATE_INIT,
+	I40E_NVMUPD_STATE_READING,
+	I40E_NVMUPD_STATE_WRITING,
+	I40E_NVMUPD_STATE_INIT_WAIT,
+	I40E_NVMUPD_STATE_WRITE_WAIT,
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ	0xB
+#define I40E_NVM_WRITE	0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT	8
+#define I40E_NVM_TRANS_MASK	(0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON		0x0
+#define I40E_NVM_SNT		0x1
+#define I40E_NVM_LCB		0x2
+#define I40E_NVM_SA		(I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA		0x4
+#define I40E_NVM_CSUM		0x8
+#define I40E_NVM_EXEC		0xf
+
+#define I40E_NVM_ADAPT_SHIFT	16
+#define I40E_NVM_ADAPT_MASK	(0xffffULL << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA	4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+	u32 command;
+	u32 config;
+	u32 offset;	/* in bytes */
+	u32 data_size;	/* in bytes */
+	u8 data[1];
+};
+
+/* PCI bus types */
+enum i40e_bus_type {
+	i40e_bus_type_unknown = 0,
+	i40e_bus_type_pci,
+	i40e_bus_type_pcix,
+	i40e_bus_type_pci_express,
+	i40e_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum i40e_bus_speed {
+	i40e_bus_speed_unknown	= 0,
+	i40e_bus_speed_33	= 33,
+	i40e_bus_speed_66	= 66,
+	i40e_bus_speed_100	= 100,
+	i40e_bus_speed_120	= 120,
+	i40e_bus_speed_133	= 133,
+	i40e_bus_speed_2500	= 2500,
+	i40e_bus_speed_5000	= 5000,
+	i40e_bus_speed_8000	= 8000,
+	i40e_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum i40e_bus_width {
+	i40e_bus_width_unknown	= 0,
+	i40e_bus_width_pcie_x1	= 1,
+	i40e_bus_width_pcie_x2	= 2,
+	i40e_bus_width_pcie_x4	= 4,
+	i40e_bus_width_pcie_x8	= 8,
+	i40e_bus_width_32	= 32,
+	i40e_bus_width_64	= 64,
+	i40e_bus_width_reserved
+};
+
+/* Bus parameters */
+struct i40e_bus_info {
+	enum i40e_bus_speed speed;
+	enum i40e_bus_width width;
+	enum i40e_bus_type type;
+
+	u16 func;
+	u16 device;
+	u16 lan_id;
+};
+
+/* Flow control (FC) parameters */
+struct i40e_fc_info {
+	enum i40e_fc_mode current_mode; /* FC mode in effect */
+	enum i40e_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+#define I40E_MAX_TRAFFIC_CLASS		8
+#define I40E_MAX_USER_PRIORITY		8
+#define I40E_DCBX_MAX_APPS		32
+#define I40E_LLDPDU_SIZE		1500
+#define I40E_TLV_STATUS_OPER		0x1
+#define I40E_TLV_STATUS_SYNC		0x2
+#define I40E_TLV_STATUS_ERR		0x4
+#define I40E_CEE_OPER_MAX_APPS		3
+#define I40E_APP_PROTOID_FCOE		0x8906
+#define I40E_APP_PROTOID_ISCSI		0x0cbc
+#define I40E_APP_PROTOID_FIP		0x8914
+#define I40E_APP_SEL_ETHTYPE		0x1
+#define I40E_APP_SEL_TCPIP		0x2
+#define I40E_CEE_APP_SEL_ETHTYPE	0x0
+#define I40E_CEE_APP_SEL_TCPIP		0x1
+
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct i40e_dcb_ets_config {
+	u8 willing;
+	u8 cbs;
+	u8 maxtcs;
+	u8 prioritytable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS];
+	u8 tsatable[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct i40e_dcb_pfc_config {
+	u8 willing;
+	u8 mbc;
+	u8 pfccap;
+	u8 pfcenable;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct i40e_dcb_app_priority_table {
+	u8  priority;
+	u8  selector;
+	u16 protocolid;
+};
+
+struct i40e_dcbx_config {
+	u8  dcbx_mode;
+#define I40E_DCBX_MODE_CEE	0x1
+#define I40E_DCBX_MODE_IEEE	0x2
+	u32 numapps;
+	u32 tlv_status; /* CEE mode TLV status */
+	struct i40e_dcb_ets_config etscfg;
+	struct i40e_dcb_ets_config etsrec;
+	struct i40e_dcb_pfc_config pfc;
+	struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
+};
+
+/* Port hardware description */
+struct i40e_hw {
+	u8 *hw_addr;
+	void *back;
+
+	/* subsystem structs */
+	struct i40e_phy_info phy;
+	struct i40e_mac_info mac;
+	struct i40e_bus_info bus;
+	struct i40e_nvm_info nvm;
+	struct i40e_fc_info fc;
+
+	/* pci info */
+	u16 device_id;
+	u16 vendor_id;
+	u16 subsystem_device_id;
+	u16 subsystem_vendor_id;
+	u8 revision_id;
+	u8 port;
+	bool adapter_stopped;
+
+	/* capabilities for entire device and PCI func */
+	struct i40e_hw_capabilities dev_caps;
+	struct i40e_hw_capabilities func_caps;
+
+	/* Flow Director shared filter space */
+	u16 fdir_shared_filter_count;
+
+	/* device profile info */
+	u8  pf_id;
+	u16 main_vsi_seid;
+
+	/* for multi-function MACs */
+	u16 partition_id;
+	u16 num_partitions;
+	u16 num_ports;
+
+	/* Closest numa node to the device */
+	u16 numa_node;
+
+	/* Admin Queue info */
+	struct i40e_adminq_info aq;
+
+	/* state of nvm update process */
+	enum i40e_nvmupd_state nvmupd_state;
+	struct i40e_aq_desc nvm_wb_desc;
+	struct i40e_virt_mem nvm_buff;
+
+	/* HMC info */
+	struct i40e_hmc_info hmc; /* HMC info struct */
+
+	/* LLDP/DCBX Status */
+	u16 dcbx_status;
+
+	/* DCBX info */
+	struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
+	struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
+	struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+
+	/* debug mask */
+	u32 debug_mask;
+	char err_str[16];
+};
+
+static INLINE bool i40e_is_vf(struct i40e_hw *hw)
+{
+#ifdef X722_SUPPORT
+	return (hw->mac.type == I40E_MAC_VF ||
+		hw->mac.type == I40E_MAC_X722_VF);
+#else
+	return hw->mac.type == I40E_MAC_VF;
+#endif
+}
+
+struct i40e_driver_version {
+	u8 major_version;
+	u8 minor_version;
+	u8 build_version;
+	u8 subbuild_version;
+	u8 driver_string[32];
+};
+
+/* RX Descriptors */
+union i40e_16byte_rx_desc {
+	struct {
+		__le64 pkt_addr; /* Packet buffer address */
+		__le64 hdr_addr; /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fd_id; /* Flow director filter id */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+			} hi_dword;
+		} qword0;
+		struct {
+			/* ext status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+	} wb;  /* writeback */
+};
+
+union i40e_32byte_rx_desc {
+	struct {
+		__le64  pkt_addr; /* Packet buffer address */
+		__le64  hdr_addr; /* Header buffer address */
+			/* bit 0 of hdr_buffer_addr is DD bit */
+		__le64  rsvd1;
+		__le64  rsvd2;
+	} read;
+	struct {
+		struct {
+			struct {
+				union {
+					__le16 mirroring_status;
+					__le16 fcoe_ctx_id;
+				} mirr_fcoe;
+				__le16 l2tag1;
+			} lo_dword;
+			union {
+				__le32 rss; /* RSS Hash */
+				__le32 fcoe_param; /* FCoE DDP Context id */
+				/* Flow director filter id in case of
+				 * Programming status desc WB
+				 */
+				__le32 fd_id;
+			} hi_dword;
+		} qword0;
+		struct {
+			/* status/error/pktype/length */
+			__le64 status_error_len;
+		} qword1;
+		struct {
+			__le16 ext_status; /* extended status */
+			__le16 rsvd;
+			__le16 l2tag2_1;
+			__le16 l2tag2_2;
+		} qword2;
+		struct {
+			union {
+				__le32 flex_bytes_lo;
+				__le32 pe_status;
+			} lo_dword;
+			union {
+				__le32 flex_bytes_hi;
+				__le32 fd_id;
+			} hi_dword;
+		} qword3;
+	} wb;  /* writeback */
+};
+
+#define I40E_RXD_QW0_MIRROR_STATUS_SHIFT	8
+#define I40E_RXD_QW0_MIRROR_STATUS_MASK	(0x3FUL << \
+					 I40E_RXD_QW0_MIRROR_STATUS_SHIFT)
+#define I40E_RXD_QW0_FCOEINDX_SHIFT	0
+#define I40E_RXD_QW0_FCOEINDX_MASK	(0xFFFUL << \
+					 I40E_RXD_QW0_FCOEINDX_SHIFT)
+
+enum i40e_rx_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_STATUS_DD_SHIFT		= 0,
+	I40E_RX_DESC_STATUS_EOF_SHIFT		= 1,
+	I40E_RX_DESC_STATUS_L2TAG1P_SHIFT	= 2,
+	I40E_RX_DESC_STATUS_L3L4P_SHIFT		= 3,
+	I40E_RX_DESC_STATUS_CRCP_SHIFT		= 4,
+	I40E_RX_DESC_STATUS_TSYNINDX_SHIFT	= 5, /* 2 BITS */
+	I40E_RX_DESC_STATUS_TSYNVALID_SHIFT	= 7,
+#ifdef X722_SUPPORT
+	I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT	= 8,
+#else
+	I40E_RX_DESC_STATUS_RESERVED1_SHIFT	= 8,
+#endif
+
+	I40E_RX_DESC_STATUS_UMBCAST_SHIFT	= 9, /* 2 BITS */
+	I40E_RX_DESC_STATUS_FLM_SHIFT		= 11,
+	I40E_RX_DESC_STATUS_FLTSTAT_SHIFT	= 12, /* 2 BITS */
+	I40E_RX_DESC_STATUS_LPBK_SHIFT		= 14,
+	I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT	= 15,
+	I40E_RX_DESC_STATUS_RESERVED2_SHIFT	= 16, /* 2 BITS */
+#ifdef X722_SUPPORT
+	I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT	= 18,
+#else
+	I40E_RX_DESC_STATUS_UDP_0_SHIFT		= 18,
+#endif
+	I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
+};
+
+#define I40E_RXD_QW1_STATUS_SHIFT	0
+#define I40E_RXD_QW1_STATUS_MASK	((BIT(I40E_RX_DESC_STATUS_LAST) - 1) << \
+					 I40E_RXD_QW1_STATUS_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK	(0x3UL << \
+					     I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+
+#define I40E_RXD_QW1_STATUS_UMBCAST_SHIFT	I40E_RX_DESC_STATUS_UMBCAST
+#define I40E_RXD_QW1_STATUS_UMBCAST_MASK	(0x3UL << \
+					 I40E_RXD_QW1_STATUS_UMBCAST_SHIFT)
+
+enum i40e_rx_desc_fltstat_values {
+	I40E_RX_DESC_FLTSTAT_NO_DATA	= 0,
+	I40E_RX_DESC_FLTSTAT_RSV_FD_ID	= 1, /* 16byte desc? FD_ID : RSV */
+	I40E_RX_DESC_FLTSTAT_RSV	= 2,
+	I40E_RX_DESC_FLTSTAT_RSS_HASH	= 3,
+};
+
+#define I40E_RXD_PACKET_TYPE_UNICAST	0
+#define I40E_RXD_PACKET_TYPE_MULTICAST	1
+#define I40E_RXD_PACKET_TYPE_BROADCAST	2
+#define I40E_RXD_PACKET_TYPE_MIRRORED	3
+
+#define I40E_RXD_QW1_ERROR_SHIFT	19
+#define I40E_RXD_QW1_ERROR_MASK		(0xFFUL << I40E_RXD_QW1_ERROR_SHIFT)
+
+enum i40e_rx_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_ERROR_RXE_SHIFT		= 0,
+	I40E_RX_DESC_ERROR_RECIPE_SHIFT		= 1,
+	I40E_RX_DESC_ERROR_HBO_SHIFT		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_SHIFT		= 3, /* 3 BITS */
+	I40E_RX_DESC_ERROR_IPE_SHIFT		= 3,
+	I40E_RX_DESC_ERROR_L4E_SHIFT		= 4,
+	I40E_RX_DESC_ERROR_EIPE_SHIFT		= 5,
+	I40E_RX_DESC_ERROR_OVERSIZE_SHIFT	= 6,
+	I40E_RX_DESC_ERROR_PPRS_SHIFT		= 7
+};
+
+enum i40e_rx_desc_error_l3l4e_fcoe_masks {
+	I40E_RX_DESC_ERROR_L3L4E_NONE		= 0,
+	I40E_RX_DESC_ERROR_L3L4E_PROT		= 1,
+	I40E_RX_DESC_ERROR_L3L4E_FC		= 2,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR	= 3,
+	I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN	= 4
+};
+
+#define I40E_RXD_QW1_PTYPE_SHIFT	30
+#define I40E_RXD_QW1_PTYPE_MASK		(0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
+
+/* Packet type non-ip values */
+enum i40e_rx_l2_ptype {
+	I40E_RX_PTYPE_L2_RESERVED			= 0,
+	I40E_RX_PTYPE_L2_MAC_PAY2			= 1,
+	I40E_RX_PTYPE_L2_TIMESYNC_PAY2			= 2,
+	I40E_RX_PTYPE_L2_FIP_PAY2			= 3,
+	I40E_RX_PTYPE_L2_OUI_PAY2			= 4,
+	I40E_RX_PTYPE_L2_MACCNTRL_PAY2			= 5,
+	I40E_RX_PTYPE_L2_LLDP_PAY2			= 6,
+	I40E_RX_PTYPE_L2_ECP_PAY2			= 7,
+	I40E_RX_PTYPE_L2_EVB_PAY2			= 8,
+	I40E_RX_PTYPE_L2_QCN_PAY2			= 9,
+	I40E_RX_PTYPE_L2_EAPOL_PAY2			= 10,
+	I40E_RX_PTYPE_L2_ARP				= 11,
+	I40E_RX_PTYPE_L2_FCOE_PAY3			= 12,
+	I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3		= 13,
+	I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3		= 14,
+	I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3		= 15,
+	I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA		= 16,
+	I40E_RX_PTYPE_L2_FCOE_VFT_PAY3			= 17,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA		= 18,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY			= 19,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP			= 20,
+	I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER		= 21,
+	I40E_RX_PTYPE_GRENAT4_MAC_PAY3			= 58,
+	I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4	= 87,
+	I40E_RX_PTYPE_GRENAT6_MAC_PAY3			= 124,
+	I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4	= 153
+};
+
+struct i40e_rx_ptype_decoded {
+	u32 ptype:8;
+	u32 known:1;
+	u32 outer_ip:1;
+	u32 outer_ip_ver:1;
+	u32 outer_frag:1;
+	u32 tunnel_type:3;
+	u32 tunnel_end_prot:2;
+	u32 tunnel_end_frag:1;
+	u32 inner_prot:4;
+	u32 payload_layer:3;
+};
+
+enum i40e_rx_ptype_outer_ip {
+	I40E_RX_PTYPE_OUTER_L2	= 0,
+	I40E_RX_PTYPE_OUTER_IP	= 1
+};
+
+enum i40e_rx_ptype_outer_ip_ver {
+	I40E_RX_PTYPE_OUTER_NONE	= 0,
+	I40E_RX_PTYPE_OUTER_IPV4	= 0,
+	I40E_RX_PTYPE_OUTER_IPV6	= 1
+};
+
+enum i40e_rx_ptype_outer_fragmented {
+	I40E_RX_PTYPE_NOT_FRAG	= 0,
+	I40E_RX_PTYPE_FRAG	= 1
+};
+
+enum i40e_rx_ptype_tunnel_type {
+	I40E_RX_PTYPE_TUNNEL_NONE		= 0,
+	I40E_RX_PTYPE_TUNNEL_IP_IP		= 1,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT		= 2,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC	= 3,
+	I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN	= 4,
+};
+
+enum i40e_rx_ptype_tunnel_end_prot {
+	I40E_RX_PTYPE_TUNNEL_END_NONE	= 0,
+	I40E_RX_PTYPE_TUNNEL_END_IPV4	= 1,
+	I40E_RX_PTYPE_TUNNEL_END_IPV6	= 2,
+};
+
+enum i40e_rx_ptype_inner_prot {
+	I40E_RX_PTYPE_INNER_PROT_NONE		= 0,
+	I40E_RX_PTYPE_INNER_PROT_UDP		= 1,
+	I40E_RX_PTYPE_INNER_PROT_TCP		= 2,
+	I40E_RX_PTYPE_INNER_PROT_SCTP		= 3,
+	I40E_RX_PTYPE_INNER_PROT_ICMP		= 4,
+	I40E_RX_PTYPE_INNER_PROT_TIMESYNC	= 5
+};
+
+enum i40e_rx_ptype_payload_layer {
+	I40E_RX_PTYPE_PAYLOAD_LAYER_NONE	= 0,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2	= 1,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3	= 2,
+	I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4	= 3,
+};
+
+#define I40E_RX_PTYPE_BIT_MASK		0x0FFFFFFF
+#define I40E_RX_PTYPE_SHIFT		56
+
+#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT	38
+#define I40E_RXD_QW1_LENGTH_PBUF_MASK	(0x3FFFULL << \
+					 I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT	52
+#define I40E_RXD_QW1_LENGTH_HBUF_MASK	(0x7FFULL << \
+					 I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
+
+#define I40E_RXD_QW1_LENGTH_SPH_SHIFT	63
+#define I40E_RXD_QW1_LENGTH_SPH_MASK	BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+
+#define I40E_RXD_QW1_NEXTP_SHIFT	38
+#define I40E_RXD_QW1_NEXTP_MASK		(0x1FFFULL << I40E_RXD_QW1_NEXTP_SHIFT)
+
+#define I40E_RXD_QW2_EXT_STATUS_SHIFT	0
+#define I40E_RXD_QW2_EXT_STATUS_MASK	(0xFFFFFUL << \
+					 I40E_RXD_QW2_EXT_STATUS_SHIFT)
+
+enum i40e_rx_desc_ext_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT	= 0,
+	I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT	= 1,
+	I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT	= 2, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT	= 4, /* 2 BITS */
+	I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT	= 9,
+	I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT	= 10,
+	I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT	= 11,
+};
+
+#define I40E_RXD_QW2_L2TAG2_SHIFT	0
+#define I40E_RXD_QW2_L2TAG2_MASK	(0xFFFFUL << I40E_RXD_QW2_L2TAG2_SHIFT)
+
+#define I40E_RXD_QW2_L2TAG3_SHIFT	16
+#define I40E_RXD_QW2_L2TAG3_MASK	(0xFFFFUL << I40E_RXD_QW2_L2TAG3_SHIFT)
+
+enum i40e_rx_desc_pe_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_DESC_PE_STATUS_QPID_SHIFT	= 0, /* 18 BITS */
+	I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT	= 0, /* 16 BITS */
+	I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT	= 16, /* 8 BITS */
+	I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT	= 24,
+	I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT	= 25,
+	I40E_RX_DESC_PE_STATUS_PORTV_SHIFT	= 26,
+	I40E_RX_DESC_PE_STATUS_URG_SHIFT	= 27,
+	I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT	= 28,
+	I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT	= 29
+};
+
+#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT		38
+#define I40E_RX_PROG_STATUS_DESC_LENGTH			0x2000000
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT	2
+#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK	(0x7UL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT	0
+#define I40E_RX_PROG_STATUS_DESC_QW1_STATUS_MASK	(0x7FFFUL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_STATUS_SHIFT)
+
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT	19
+#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK		(0x3FUL << \
+				I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT)
+
+enum i40e_rx_prog_status_desc_status_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_DD_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT	= 2 /* 3 BITS */
+};
+
+enum i40e_rx_prog_status_desc_prog_id_masks {
+	I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS	= 4,
+};
+
+enum i40e_rx_prog_status_desc_error_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT	= 0,
+	I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT	= 1,
+	I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT	= 2,
+	I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT	= 3
+};
+
+#define I40E_TWO_BIT_MASK	0x3
+#define I40E_THREE_BIT_MASK	0x7
+#define I40E_FOUR_BIT_MASK	0xF
+#define I40E_EIGHTEEN_BIT_MASK	0x3FFFF
+
+/* TX Descriptor */
+struct i40e_tx_desc {
+	__le64 buffer_addr; /* Address of descriptor's data buf */
+	__le64 cmd_type_offset_bsz;
+};
+
+#define I40E_TXD_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_QW1_DTYPE_MASK		(0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
+
+enum i40e_tx_desc_dtype_value {
+	I40E_TX_DESC_DTYPE_DATA		= 0x0,
+	I40E_TX_DESC_DTYPE_NOP		= 0x1, /* same as Context desc */
+	I40E_TX_DESC_DTYPE_CONTEXT	= 0x1,
+	I40E_TX_DESC_DTYPE_FCOE_CTX	= 0x2,
+	I40E_TX_DESC_DTYPE_FILTER_PROG	= 0x8,
+	I40E_TX_DESC_DTYPE_DDP_CTX	= 0x9,
+	I40E_TX_DESC_DTYPE_FLEX_DATA	= 0xB,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_1	= 0xC,
+	I40E_TX_DESC_DTYPE_FLEX_CTX_2	= 0xD,
+	I40E_TX_DESC_DTYPE_DESC_DONE	= 0xF
+};
+
+#define I40E_TXD_QW1_CMD_SHIFT	4
+#define I40E_TXD_QW1_CMD_MASK	(0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
+
+enum i40e_tx_desc_cmd_bits {
+	I40E_TX_DESC_CMD_EOP			= 0x0001,
+	I40E_TX_DESC_CMD_RS			= 0x0002,
+	I40E_TX_DESC_CMD_ICRC			= 0x0004,
+	I40E_TX_DESC_CMD_IL2TAG1		= 0x0008,
+	I40E_TX_DESC_CMD_DUMMY			= 0x0010,
+	I40E_TX_DESC_CMD_IIPT_NONIP		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV6		= 0x0020, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4		= 0x0040, /* 2 BITS */
+	I40E_TX_DESC_CMD_IIPT_IPV4_CSUM		= 0x0060, /* 2 BITS */
+	I40E_TX_DESC_CMD_FCOET			= 0x0080,
+	I40E_TX_DESC_CMD_L4T_EOFT_UNK		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_TCP		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_SCTP		= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_UDP		= 0x0300, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_N		= 0x0000, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_T		= 0x0100, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI	= 0x0200, /* 2 BITS */
+	I40E_TX_DESC_CMD_L4T_EOFT_EOF_A		= 0x0300, /* 2 BITS */
+};
+
+#define I40E_TXD_QW1_OFFSET_SHIFT	16
+#define I40E_TXD_QW1_OFFSET_MASK	(0x3FFFFULL << \
+					 I40E_TXD_QW1_OFFSET_SHIFT)
+
+enum i40e_tx_desc_length_fields {
+	/* Note: These are predefined bit offsets */
+	I40E_TX_DESC_LENGTH_MACLEN_SHIFT	= 0, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_IPLEN_SHIFT		= 7, /* 7 BITS */
+	I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT	= 14 /* 4 BITS */
+};
+
+#define I40E_TXD_QW1_MACLEN_MASK (0x7FUL << I40E_TX_DESC_LENGTH_MACLEN_SHIFT)
+#define I40E_TXD_QW1_IPLEN_MASK  (0x7FUL << I40E_TX_DESC_LENGTH_IPLEN_SHIFT)
+#define I40E_TXD_QW1_L4LEN_MASK  (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+#define I40E_TXD_QW1_FCLEN_MASK  (0xFUL << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT)
+
+#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT	34
+#define I40E_TXD_QW1_TX_BUF_SZ_MASK	(0x3FFFULL << \
+					 I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+
+#define I40E_TXD_QW1_L2TAG1_SHIFT	48
+#define I40E_TXD_QW1_L2TAG1_MASK	(0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
+
+/* Context descriptors */
+struct i40e_tx_context_desc {
+	__le32 tunneling_params;
+	__le16 l2tag2;
+	__le16 rsvd;
+	__le64 type_cmd_tso_mss;
+};
+
+#define I40E_TXD_CTX_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_CTX_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_CTX_QW1_CMD_SHIFT	4
+#define I40E_TXD_CTX_QW1_CMD_MASK	(0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
+
+enum i40e_tx_ctx_desc_cmd_bits {
+	I40E_TX_CTX_DESC_TSO		= 0x01,
+	I40E_TX_CTX_DESC_TSYN		= 0x02,
+	I40E_TX_CTX_DESC_IL2TAG2	= 0x04,
+	I40E_TX_CTX_DESC_IL2TAG2_IL2H	= 0x08,
+	I40E_TX_CTX_DESC_SWTCH_NOTAG	= 0x00,
+	I40E_TX_CTX_DESC_SWTCH_UPLINK	= 0x10,
+	I40E_TX_CTX_DESC_SWTCH_LOCAL	= 0x20,
+	I40E_TX_CTX_DESC_SWTCH_VSI	= 0x30,
+	I40E_TX_CTX_DESC_SWPE		= 0x40
+};
+
+#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT	30
+#define I40E_TXD_CTX_QW1_TSO_LEN_MASK	(0x3FFFFULL << \
+					 I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
+
+#define I40E_TXD_CTX_QW1_MSS_SHIFT	50
+#define I40E_TXD_CTX_QW1_MSS_MASK	(0x3FFFULL << \
+					 I40E_TXD_CTX_QW1_MSS_SHIFT)
+
+#define I40E_TXD_CTX_QW1_VSI_SHIFT	50
+#define I40E_TXD_CTX_QW1_VSI_MASK	(0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT	0
+#define I40E_TXD_CTX_QW0_EXT_IP_MASK	(0x3ULL << \
+					 I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
+
+enum i40e_tx_ctx_desc_eipt_offload {
+	I40E_TX_CTX_EXT_IP_NONE		= 0x0,
+	I40E_TX_CTX_EXT_IP_IPV6		= 0x1,
+	I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM	= 0x2,
+	I40E_TX_CTX_EXT_IP_IPV4		= 0x3
+};
+
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT	2
+#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK	(0x3FULL << \
+					 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_NATT_SHIFT	9
+#define I40E_TXD_CTX_QW0_NATT_MASK	(0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_UDP_TUNNELING	BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_GRE_TUNNELING	(0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+
+#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT	11
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK	BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+
+#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST	I40E_TXD_CTX_QW0_EIP_NOINC_MASK
+
+#define I40E_TXD_CTX_QW0_NATLEN_SHIFT	12
+#define I40E_TXD_CTX_QW0_NATLEN_MASK	(0X7FULL << \
+					 I40E_TXD_CTX_QW0_NATLEN_SHIFT)
+
+#define I40E_TXD_CTX_QW0_DECTTL_SHIFT	19
+#define I40E_TXD_CTX_QW0_DECTTL_MASK	(0xFULL << \
+					 I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+
+#ifdef X722_SUPPORT
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT	23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK	BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
+#endif
+struct i40e_nop_desc {
+	__le64 rsvd;
+	__le64 dtype_cmd;
+};
+
+#define I40E_TXD_NOP_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_NOP_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_NOP_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_NOP_QW1_CMD_SHIFT	4
+#define I40E_TXD_NOP_QW1_CMD_MASK	(0x7FUL << I40E_TXD_NOP_QW1_CMD_SHIFT)
+
+enum i40e_tx_nop_desc_cmd_bits {
+	/* Note: These are predefined bit offsets */
+	I40E_TX_NOP_DESC_EOP_SHIFT	= 0,
+	I40E_TX_NOP_DESC_RS_SHIFT	= 1,
+	I40E_TX_NOP_DESC_RSV_SHIFT	= 2 /* 5 bits */
+};
+
+struct i40e_filter_program_desc {
+	__le32 qindex_flex_ptype_vsi;
+	__le32 rsvd;
+	__le32 dtype_cmd_cntindex;
+	__le32 fd_id;
+};
+#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT	0
+#define I40E_TXD_FLTR_QW0_QINDEX_MASK	(0x7FFUL << \
+					 I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
+#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT	11
+#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK	(0x7UL << \
+					 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
+#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT	17
+#define I40E_TXD_FLTR_QW0_PCTYPE_MASK	(0x3FUL << \
+					 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
+
+/* Packet Classifier Types for filters */
+enum i40e_filter_pctype {
+#ifdef X722_SUPPORT
+	/* Note: Values 0-28 are reserved for future use.
+	 * Value 29, 30, 32 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP	= 29,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP	= 30,
+#else
+	/* Note: Values 0-30 are reserved for future use */
+#endif
+	I40E_FILTER_PCTYPE_NONF_IPV4_UDP		= 31,
+#ifdef X722_SUPPORT
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK	= 32,
+#else
+	/* Note: Value 32 is reserved for future use */
+#endif
+	I40E_FILTER_PCTYPE_NONF_IPV4_TCP		= 33,
+	I40E_FILTER_PCTYPE_NONF_IPV4_SCTP		= 34,
+	I40E_FILTER_PCTYPE_NONF_IPV4_OTHER		= 35,
+	I40E_FILTER_PCTYPE_FRAG_IPV4			= 36,
+#ifdef X722_SUPPORT
+	/* Note: Values 37-38 are reserved for future use.
+	 * Value 39, 40, 42 are not supported on XL710 and X710.
+	 */
+	I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP	= 39,
+	I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP	= 40,
+#else
+	/* Note: Values 37-40 are reserved for future use */
+#endif
+	I40E_FILTER_PCTYPE_NONF_IPV6_UDP		= 41,
+#ifdef X722_SUPPORT
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK	= 42,
+#endif
+	I40E_FILTER_PCTYPE_NONF_IPV6_TCP		= 43,
+	I40E_FILTER_PCTYPE_NONF_IPV6_SCTP		= 44,
+	I40E_FILTER_PCTYPE_NONF_IPV6_OTHER		= 45,
+	I40E_FILTER_PCTYPE_FRAG_IPV6			= 46,
+	/* Note: Value 47 is reserved for future use */
+	I40E_FILTER_PCTYPE_FCOE_OX			= 48,
+	I40E_FILTER_PCTYPE_FCOE_RX			= 49,
+	I40E_FILTER_PCTYPE_FCOE_OTHER			= 50,
+	/* Note: Values 51-62 are reserved for future use */
+	I40E_FILTER_PCTYPE_L2_PAYLOAD			= 63,
+};
+
+enum i40e_filter_program_desc_dest {
+	I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET		= 0x0,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER	= 0x2,
+};
+
+enum i40e_filter_program_desc_fd_status {
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE			= 0x0,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID		= 0x1,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES	= 0x2,
+	I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES		= 0x3,
+};
+
+#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT	23
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK	BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_DTYPE_SHIFT	0
+#define I40E_TXD_FLTR_QW1_DTYPE_MASK	(0xFUL << I40E_TXD_FLTR_QW1_DTYPE_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CMD_SHIFT	4
+#define I40E_TXD_FLTR_QW1_CMD_MASK	(0xFFFFULL << \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_PCMD_SHIFT	(0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_PCMD_MASK	(0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
+
+enum i40e_filter_program_desc_pcmd {
+	I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE	= 0x1,
+	I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE		= 0x2,
+};
+
+#define I40E_TXD_FLTR_QW1_DEST_SHIFT	(0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_DEST_MASK	(0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT	(0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+
+#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT	(0x9ULL + \
+						 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
+					  I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#ifdef X722_SUPPORT
+
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT	(0xEULL + \
+					 I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK	BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+#endif
+
+#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
+#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK	(0x1FFUL << \
+					 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
+
+enum i40e_filter_type {
+	I40E_FLOW_DIRECTOR_FLTR = 0,
+	I40E_PE_QUAD_HASH_FLTR = 1,
+	I40E_ETHERTYPE_FLTR,
+	I40E_FCOE_CTX_FLTR,
+	I40E_MAC_VLAN_FLTR,
+	I40E_HASH_FLTR
+};
+
+struct i40e_vsi_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 vsi_number;
+	u16 vsis_allocated;
+	u16 vsis_unallocated;
+	u16 flags;
+	u8 pf_num;
+	u8 vf_num;
+	u8 connection_type;
+	struct i40e_aqc_vsi_properties_data info;
+};
+
+struct i40e_veb_context {
+	u16 seid;
+	u16 uplink_seid;
+	u16 veb_number;
+	u16 vebs_allocated;
+	u16 vebs_unallocated;
+	u16 flags;
+	struct i40e_aqc_get_veb_parameters_completion info;
+};
+
+/* Statistics collected by each port, VSI, VEB, and S-channel */
+struct i40e_eth_stats {
+	u64 rx_bytes;			/* gorc */
+	u64 rx_unicast;			/* uprc */
+	u64 rx_multicast;		/* mprc */
+	u64 rx_broadcast;		/* bprc */
+	u64 rx_discards;		/* rdpc */
+	u64 rx_unknown_protocol;	/* rupp */
+	u64 tx_bytes;			/* gotc */
+	u64 tx_unicast;			/* uptc */
+	u64 tx_multicast;		/* mptc */
+	u64 tx_broadcast;		/* bptc */
+	u64 tx_discards;		/* tdpc */
+	u64 tx_errors;			/* tepc */
+};
+
+/* Statistics collected per VEB per TC */
+struct i40e_veb_tc_stats {
+	u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS];
+	u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
+};
+
+/* Statistics collected by the MAC */
+struct i40e_hw_port_stats {
+	/* eth stats collected by the port */
+	struct i40e_eth_stats eth;
+
+	/* additional port specific stats */
+	u64 tx_dropped_link_down;	/* tdold */
+	u64 crc_errors;			/* crcerrs */
+	u64 illegal_bytes;		/* illerrc */
+	u64 error_bytes;		/* errbc */
+	u64 mac_local_faults;		/* mlfc */
+	u64 mac_remote_faults;		/* mrfc */
+	u64 rx_length_errors;		/* rlec */
+	u64 link_xon_rx;		/* lxonrxc */
+	u64 link_xoff_rx;		/* lxoffrxc */
+	u64 priority_xon_rx[8];		/* pxonrxc[8] */
+	u64 priority_xoff_rx[8];	/* pxoffrxc[8] */
+	u64 link_xon_tx;		/* lxontxc */
+	u64 link_xoff_tx;		/* lxofftxc */
+	u64 priority_xon_tx[8];		/* pxontxc[8] */
+	u64 priority_xoff_tx[8];	/* pxofftxc[8] */
+	u64 priority_xon_2_xoff[8];	/* pxon2offc[8] */
+	u64 rx_size_64;			/* prc64 */
+	u64 rx_size_127;		/* prc127 */
+	u64 rx_size_255;		/* prc255 */
+	u64 rx_size_511;		/* prc511 */
+	u64 rx_size_1023;		/* prc1023 */
+	u64 rx_size_1522;		/* prc1522 */
+	u64 rx_size_big;		/* prc9522 */
+	u64 rx_undersize;		/* ruc */
+	u64 rx_fragments;		/* rfc */
+	u64 rx_oversize;		/* roc */
+	u64 rx_jabber;			/* rjc */
+	u64 tx_size_64;			/* ptc64 */
+	u64 tx_size_127;		/* ptc127 */
+	u64 tx_size_255;		/* ptc255 */
+	u64 tx_size_511;		/* ptc511 */
+	u64 tx_size_1023;		/* ptc1023 */
+	u64 tx_size_1522;		/* ptc1522 */
+	u64 tx_size_big;		/* ptc9522 */
+	u64 mac_short_packet_dropped;	/* mspdc */
+	u64 checksum_error;		/* xec */
+	/* flow director stats */
+	u64 fd_atr_match;
+	u64 fd_sb_match;
+	u64 fd_atr_tunnel_match;
+	u32 fd_atr_status;
+	u32 fd_sb_status;
+	/* EEE LPI */
+	u32 tx_lpi_status;
+	u32 rx_lpi_status;
+	u64 tx_lpi_count;		/* etlpic */
+	u64 rx_lpi_count;		/* erlpic */
+};
+
+/* Checksum and Shadow RAM pointers */
+#define I40E_SR_NVM_CONTROL_WORD		0x00
+#define I40E_SR_PCIE_ANALOG_CONFIG_PTR		0x03
+#define I40E_SR_PHY_ANALOG_CONFIG_PTR		0x04
+#define I40E_SR_OPTION_ROM_PTR			0x05
+#define I40E_SR_RO_PCIR_REGS_AUTO_LOAD_PTR	0x06
+#define I40E_SR_AUTO_GENERATED_POINTERS_PTR	0x07
+#define I40E_SR_PCIR_REGS_AUTO_LOAD_PTR		0x08
+#define I40E_SR_EMP_GLOBAL_MODULE_PTR		0x09
+#define I40E_SR_RO_PCIE_LCB_PTR			0x0A
+#define I40E_SR_EMP_IMAGE_PTR			0x0B
+#define I40E_SR_PE_IMAGE_PTR			0x0C
+#define I40E_SR_CSR_PROTECTED_LIST_PTR		0x0D
+#define I40E_SR_MNG_CONFIG_PTR			0x0E
+#define I40E_SR_EMP_MODULE_PTR			0x0F
+#define I40E_SR_PBA_FLAGS			0x15
+#define I40E_SR_PBA_BLOCK_PTR			0x16
+#define I40E_SR_BOOT_CONFIG_PTR			0x17
+#define I40E_NVM_OEM_VER_OFF			0x83
+#define I40E_SR_NVM_DEV_STARTER_VERSION		0x18
+#define I40E_SR_NVM_WAKE_ON_LAN			0x19
+#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR	0x27
+#define I40E_SR_PERMANENT_SAN_MAC_ADDRESS_PTR	0x28
+#define I40E_SR_NVM_MAP_VERSION			0x29
+#define I40E_SR_NVM_IMAGE_VERSION		0x2A
+#define I40E_SR_NVM_STRUCTURE_VERSION		0x2B
+#define I40E_SR_NVM_EETRACK_LO			0x2D
+#define I40E_SR_NVM_EETRACK_HI			0x2E
+#define I40E_SR_VPD_PTR				0x2F
+#define I40E_SR_PXE_SETUP_PTR			0x30
+#define I40E_SR_PXE_CONFIG_CUST_OPTIONS_PTR	0x31
+#define I40E_SR_NVM_ORIGINAL_EETRACK_LO		0x34
+#define I40E_SR_NVM_ORIGINAL_EETRACK_HI		0x35
+#define I40E_SR_SW_ETHERNET_MAC_ADDRESS_PTR	0x37
+#define I40E_SR_POR_REGS_AUTO_LOAD_PTR		0x38
+#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR		0x3A
+#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR	0x3B
+#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR	0x3C
+#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR		0x3E
+#define I40E_SR_SW_CHECKSUM_WORD		0x3F
+#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR	0x40
+#define I40E_SR_4TH_FREE_PROVISION_AREA_PTR	0x42
+#define I40E_SR_3RD_FREE_PROVISION_AREA_PTR	0x44
+#define I40E_SR_2ND_FREE_PROVISION_AREA_PTR	0x46
+#define I40E_SR_EMP_SR_SETTINGS_PTR		0x48
+#define I40E_SR_FEATURE_CONFIGURATION_PTR	0x49
+#define I40E_SR_CONFIGURATION_METADATA_PTR	0x4D
+#define I40E_SR_IMMEDIATE_VALUES_PTR		0x4E
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define I40E_SR_VPD_MODULE_MAX_SIZE		1024
+#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE	1024
+#define I40E_SR_CONTROL_WORD_1_SHIFT		0x06
+#define I40E_SR_CONTROL_WORD_1_MASK	(0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
+
+/* Shadow RAM related */
+#define I40E_SR_SECTOR_SIZE_IN_WORDS	0x800
+#define I40E_SR_BUF_ALIGNMENT		4096
+#define I40E_SR_WORDS_IN_1KB		512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define I40E_SR_SW_CHECKSUM_BASE	0xBABA
+
+#define I40E_SRRD_SRCTL_ATTEMPTS	100000
+
+enum i40e_switch_element_types {
+	I40E_SWITCH_ELEMENT_TYPE_MAC	= 1,
+	I40E_SWITCH_ELEMENT_TYPE_PF	= 2,
+	I40E_SWITCH_ELEMENT_TYPE_VF	= 3,
+	I40E_SWITCH_ELEMENT_TYPE_EMP	= 4,
+	I40E_SWITCH_ELEMENT_TYPE_BMC	= 6,
+	I40E_SWITCH_ELEMENT_TYPE_PE	= 16,
+	I40E_SWITCH_ELEMENT_TYPE_VEB	= 17,
+	I40E_SWITCH_ELEMENT_TYPE_PA	= 18,
+	I40E_SWITCH_ELEMENT_TYPE_VSI	= 19,
+};
+
+/* Supported EtherType filters */
+enum i40e_ether_type_index {
+	I40E_ETHER_TYPE_1588		= 0,
+	I40E_ETHER_TYPE_FIP		= 1,
+	I40E_ETHER_TYPE_OUI_EXTENDED	= 2,
+	I40E_ETHER_TYPE_MAC_CONTROL	= 3,
+	I40E_ETHER_TYPE_LLDP		= 4,
+	I40E_ETHER_TYPE_EVB_PROTOCOL1	= 5,
+	I40E_ETHER_TYPE_EVB_PROTOCOL2	= 6,
+	I40E_ETHER_TYPE_QCN_CNM		= 7,
+	I40E_ETHER_TYPE_8021X		= 8,
+	I40E_ETHER_TYPE_ARP		= 9,
+	I40E_ETHER_TYPE_RSV1		= 10,
+	I40E_ETHER_TYPE_RSV2		= 11,
+};
+
+/* Filter context base size is 1K */
+#define I40E_HASH_FILTER_BASE_SIZE	1024
+/* Supported Hash filter values */
+enum i40e_hash_filter_size {
+	I40E_HASH_FILTER_SIZE_1K	= 0,
+	I40E_HASH_FILTER_SIZE_2K	= 1,
+	I40E_HASH_FILTER_SIZE_4K	= 2,
+	I40E_HASH_FILTER_SIZE_8K	= 3,
+	I40E_HASH_FILTER_SIZE_16K	= 4,
+	I40E_HASH_FILTER_SIZE_32K	= 5,
+	I40E_HASH_FILTER_SIZE_64K	= 6,
+	I40E_HASH_FILTER_SIZE_128K	= 7,
+	I40E_HASH_FILTER_SIZE_256K	= 8,
+	I40E_HASH_FILTER_SIZE_512K	= 9,
+	I40E_HASH_FILTER_SIZE_1M	= 10,
+};
+
+/* DMA context base size is 0.5K */
+#define I40E_DMA_CNTX_BASE_SIZE		512
+/* Supported DMA context values */
+enum i40e_dma_cntx_size {
+	I40E_DMA_CNTX_SIZE_512		= 0,
+	I40E_DMA_CNTX_SIZE_1K		= 1,
+	I40E_DMA_CNTX_SIZE_2K		= 2,
+	I40E_DMA_CNTX_SIZE_4K		= 3,
+	I40E_DMA_CNTX_SIZE_8K		= 4,
+	I40E_DMA_CNTX_SIZE_16K		= 5,
+	I40E_DMA_CNTX_SIZE_32K		= 6,
+	I40E_DMA_CNTX_SIZE_64K		= 7,
+	I40E_DMA_CNTX_SIZE_128K		= 8,
+	I40E_DMA_CNTX_SIZE_256K		= 9,
+};
+
+/* Supported Hash look up table (LUT) sizes */
+enum i40e_hash_lut_size {
+	I40E_HASH_LUT_SIZE_128		= 0,
+	I40E_HASH_LUT_SIZE_512		= 1,
+};
+
+/* Structure to hold a per PF filter control settings */
+struct i40e_filter_control_settings {
+	/* number of PE Quad Hash filter buckets */
+	enum i40e_hash_filter_size pe_filt_num;
+	/* number of PE Quad Hash contexts */
+	enum i40e_dma_cntx_size pe_cntx_num;
+	/* number of FCoE filter buckets */
+	enum i40e_hash_filter_size fcoe_filt_num;
+	/* number of FCoE DDP contexts */
+	enum i40e_dma_cntx_size fcoe_cntx_num;
+	/* size of the Hash LUT */
+	enum i40e_hash_lut_size	hash_lut_size;
+	/* enable FDIR filters for PF and its VFs */
+	bool enable_fdir;
+	/* enable Ethertype filters for PF and its VFs */
+	bool enable_ethtype;
+	/* enable MAC/VLAN filters for PF and its VFs */
+	bool enable_macvlan;
+};
+
+/* Structure to hold device level control filter counts */
+struct i40e_control_filter_stats {
+	u16 mac_etype_used;   /* Used perfect match MAC/EtherType filters */
+	u16 etype_used;       /* Used perfect EtherType filters */
+	u16 mac_etype_free;   /* Un-used perfect match MAC/EtherType filters */
+	u16 etype_free;       /* Un-used perfect EtherType filters */
+};
+
+enum i40e_reset_type {
+	I40E_RESET_POR		= 0,
+	I40E_RESET_CORER	= 1,
+	I40E_RESET_GLOBR	= 2,
+	I40E_RESET_EMPR		= 3,
+};
+
+/* IEEE 802.1AB LLDP Agent Variables from NVM */
+#define I40E_NVM_LLDP_CFG_PTR		0xD
+struct i40e_lldp_variables {
+	u16 length;
+	u16 adminstatus;
+	u16 msgfasttx;
+	u16 msgtxinterval;
+	u16 txparams;
+	u16 timers;
+	u16 crc8;
+};
+
+/* Offsets into Alternate Ram */
+#define I40E_ALT_STRUCT_FIRST_PF_OFFSET		0   /* in dwords */
+#define I40E_ALT_STRUCT_DWORDS_PER_PF		64   /* in dwords */
+#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET	0xD  /* in dwords */
+#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET	0xC  /* in dwords */
+#define I40E_ALT_STRUCT_MIN_BW_OFFSET		0xE  /* in dwords */
+#define I40E_ALT_STRUCT_MAX_BW_OFFSET		0xF  /* in dwords */
+
+/* Alternate Ram Bandwidth Masks */
+#define I40E_ALT_BW_VALUE_MASK		0xFF
+#define I40E_ALT_BW_RELATIVE_MASK	0x40000000
+#define I40E_ALT_BW_VALID_MASK		0x80000000
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512	0x00010000
+#endif /* _I40E_TYPE_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_type.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/i40e_virtchnl.h
===================================================================
--- trunk/sys/dev/ixl/i40e_virtchnl.h	                        (rev 0)
+++ trunk/sys/dev/ixl/i40e_virtchnl.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,379 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/i40e_virtchnl.h 292100 2015-12-11 13:08:38Z smh $*/
+
+#ifndef _I40E_VIRTCHNL_H_
+#define _I40E_VIRTCHNL_H_
+
+#include "i40e_type.h"
+
+/* Description:
+ * This header file describes the VF-PF communication protocol used
+ * by the various i40e drivers.
+ *
+ * Admin queue buffer usage:
+ * desc->opcode is always i40e_aqc_opc_send_msg_to_pf
+ * flags, retval, datalen, and data addr are all used normally.
+ * Firmware copies the cookie fields when sending messages between the PF and
+ * VF, but uses all other fields internally. Due to this limitation, we
+ * must send all messages as "indirect", i.e. using an external buffer.
+ *
+ * All the vsi indexes are relative to the VF. Each VF can have maximum of
+ * three VSIs. All the queue indexes are relative to the VSI.  Each VF can
+ * have a maximum of sixteen queues for all of its VSIs.
+ *
+ * The PF is required to return a status code in v_retval for all messages
+ * except RESET_VF, which does not require any response. The return value is of
+ * i40e_status_code type, defined in the i40e_type.h.
+ *
+ * In general, VF driver initialization should roughly follow the order of these
+ * opcodes. The VF driver must first validate the API version of the PF driver,
+ * then request a reset, then get resources, then configure queues and
+ * interrupts. After these operations are complete, the VF driver may start
+ * its queues, optionally add MAC and VLAN filters, and process traffic.
+ */
+
+/* Opcodes for VF-PF communication. These are placed in the v_opcode field
+ * of the virtchnl_msg structure.
+ */
+enum i40e_virtchnl_ops {
+/* The PF sends status change events to VFs using
+ * the I40E_VIRTCHNL_OP_EVENT opcode.
+ * VFs send requests to the PF using the other ops.
+ */
+	I40E_VIRTCHNL_OP_UNKNOWN = 0,
+	I40E_VIRTCHNL_OP_VERSION = 1, /* must ALWAYS be 1 */
+	I40E_VIRTCHNL_OP_RESET_VF = 2,
+	I40E_VIRTCHNL_OP_GET_VF_RESOURCES = 3,
+	I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE = 4,
+	I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE = 5,
+	I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES = 6,
+	I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP = 7,
+	I40E_VIRTCHNL_OP_ENABLE_QUEUES = 8,
+	I40E_VIRTCHNL_OP_DISABLE_QUEUES = 9,
+	I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS = 10,
+	I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS = 11,
+	I40E_VIRTCHNL_OP_ADD_VLAN = 12,
+	I40E_VIRTCHNL_OP_DEL_VLAN = 13,
+	I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
+	I40E_VIRTCHNL_OP_GET_STATS = 15,
+	I40E_VIRTCHNL_OP_FCOE = 16,
+	I40E_VIRTCHNL_OP_EVENT = 17,
+};
+
+/* Virtual channel message descriptor. This overlays the admin queue
+ * descriptor. All other data is passed in external buffers.
+ */
+
+struct i40e_virtchnl_msg {
+	u8 pad[8];			 /* AQ flags/opcode/len/retval fields */
+	enum i40e_virtchnl_ops v_opcode; /* avoid confusion with desc->opcode */
+	enum i40e_status_code v_retval;  /* ditto for desc->retval */
+	u32 vfid;			 /* used by PF when sending to VF */
+};
+
+/* Message descriptions and data structures.*/
+
+/* I40E_VIRTCHNL_OP_VERSION
+ * VF posts its version number to the PF. PF responds with its version number
+ * in the same format, along with a return code.
+ * Reply from PF has its major/minor versions also in param0 and param1.
+ * If there is a major version mismatch, then the VF cannot operate.
+ * If there is a minor version mismatch, then the VF can operate but should
+ * add a warning to the system log.
+ *
+ * This enum element MUST always be specified as == 1, regardless of other
+ * changes in the API. The PF must always respond to this message without
+ * error regardless of version mismatch.
+ */
+#define I40E_VIRTCHNL_VERSION_MAJOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR		1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS	0
+
+struct i40e_virtchnl_version_info {
+	u32 major;
+	u32 minor;
+};
+
+/* I40E_VIRTCHNL_OP_RESET_VF
+ * VF sends this request to PF with no parameters
+ * PF does NOT respond! VF driver must delay then poll VFGEN_RSTAT register
+ * until reset completion is indicated. The admin queue must be reinitialized
+ * after this operation.
+ *
+ * When reset is complete, PF must ensure that all queues in all VSIs associated
+ * with the VF are stopped, all queue configurations in the HMC are set to 0,
+ * and all MAC and VLAN filters (except the default MAC address) on all VSIs
+ * are cleared.
+ */
+
+/* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
+ * PF responds with an indirect message containing
+ * i40e_virtchnl_vf_resource and one or more
+ * i40e_virtchnl_vsi_resource structures.
+ */
+
+struct i40e_virtchnl_vsi_resource {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	enum i40e_vsi_type vsi_type;
+	u16 qset_handle;
+	u8 default_mac_addr[I40E_ETH_LENGTH_OF_ADDRESS];
+};
+/* VF offload flags */
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2		0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP		0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE		0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ		0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG	0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN		0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING	0x00020000
+
+struct i40e_virtchnl_vf_resource {
+	u16 num_vsis;
+	u16 num_queue_pairs;
+	u16 max_vectors;
+	u16 max_mtu;
+
+	u32 vf_offload_flags;
+	u32 max_fcoe_contexts;
+	u32 max_fcoe_filters;
+
+	struct i40e_virtchnl_vsi_resource vsi_res[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE
+ * VF sends this message to set up parameters for one TX queue.
+ * External data buffer contains one instance of i40e_virtchnl_txq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Tx queue config info */
+struct i40e_virtchnl_txq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u16 ring_len;		/* number of descriptors, multiple of 8 */
+	u16 headwb_enabled;
+	u64 dma_ring_addr;
+	u64 dma_headwb_addr;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE
+ * VF sends this message to set up parameters for one RX queue.
+ * External data buffer contains one instance of i40e_virtchnl_rxq_info.
+ * PF configures requested queue and returns a status code.
+ */
+
+/* Rx queue config info */
+struct i40e_virtchnl_rxq_info {
+	u16 vsi_id;
+	u16 queue_id;
+	u32 ring_len;		/* number of descriptors, multiple of 32 */
+	u16 hdr_size;
+	u16 splithdr_enabled;
+	u32 databuffer_size;
+	u32 max_pkt_size;
+	u64 dma_ring_addr;
+	enum i40e_hmc_obj_rx_hsplit_0 rx_split_pos;
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES
+ * VF sends this message to set parameters for all active TX and RX queues
+ * associated with the specified VSI.
+ * PF configures queues and returns status.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the VSI, an error is returned and no queues are configured.
+ */
+struct i40e_virtchnl_queue_pair_info {
+	/* NOTE: vsi_id and queue_id should be identical for both queues. */
+	struct i40e_virtchnl_txq_info txq;
+	struct i40e_virtchnl_rxq_info rxq;
+};
+
+struct i40e_virtchnl_vsi_queue_config_info {
+	u16 vsi_id;
+	u16 num_queue_pairs;
+	struct i40e_virtchnl_queue_pair_info qpair[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP
+ * VF uses this message to map vectors to queues.
+ * The rxq_map and txq_map fields are bitmaps used to indicate which queues
+ * are to be associated with the specified vector.
+ * The "other" causes are always mapped to vector 0.
+ * PF configures interrupt mapping and returns status.
+ */
+struct i40e_virtchnl_vector_map {
+	u16 vsi_id;
+	u16 vector_id;
+	u16 rxq_map;
+	u16 txq_map;
+	u16 rxitr_idx;
+	u16 txitr_idx;
+};
+
+struct i40e_virtchnl_irq_map_info {
+	u16 num_vectors;
+	struct i40e_virtchnl_vector_map vecmap[1];
+};
+
+/* I40E_VIRTCHNL_OP_ENABLE_QUEUES
+ * I40E_VIRTCHNL_OP_DISABLE_QUEUES
+ * VF sends these message to enable or disable TX/RX queue pairs.
+ * The queues fields are bitmaps indicating which queues to act upon.
+ * (Currently, we only support 16 queues per VF, but we make the field
+ * u32 to allow for expansion.)
+ * PF performs requested action and returns status.
+ */
+struct i40e_virtchnl_queue_select {
+	u16 vsi_id;
+	u16 pad;
+	u32 rx_queues;
+	u32 tx_queues;
+};
+
+/* I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS
+ * VF sends this message in order to add one or more unicast or multicast
+ * address filters for the specified VSI.
+ * PF adds the filters and returns status.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS
+ * VF sends this message in order to remove one or more unicast or multicast
+ * filters for the specified VSI.
+ * PF removes the filters and returns status.
+ */
+
+struct i40e_virtchnl_ether_addr {
+	u8 addr[I40E_ETH_LENGTH_OF_ADDRESS];
+	u8 pad[2];
+};
+
+struct i40e_virtchnl_ether_addr_list {
+	u16 vsi_id;
+	u16 num_elements;
+	struct i40e_virtchnl_ether_addr list[1];
+};
+
+/* I40E_VIRTCHNL_OP_ADD_VLAN
+ * VF sends this message to add one or more VLAN tag filters for receives.
+ * PF adds the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+/* I40E_VIRTCHNL_OP_DEL_VLAN
+ * VF sends this message to remove one or more VLAN tag filters for receives.
+ * PF removes the filters and returns status.
+ * If a port VLAN is configured by the PF, this operation will return an
+ * error to the VF.
+ */
+
+struct i40e_virtchnl_vlan_filter_list {
+	u16 vsi_id;
+	u16 num_elements;
+	u16 vlan_id[1];
+};
+
+/* I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * VF sends VSI id and flags.
+ * PF returns status code in retval.
+ * Note: we assume that broadcast accept mode is always enabled.
+ */
+struct i40e_virtchnl_promisc_info {
+	u16 vsi_id;
+	u16 flags;
+};
+
+#define I40E_FLAG_VF_UNICAST_PROMISC	0x00000001
+#define I40E_FLAG_VF_MULTICAST_PROMISC	0x00000002
+
+/* I40E_VIRTCHNL_OP_GET_STATS
+ * VF sends this message to request stats for the selected VSI. VF uses
+ * the i40e_virtchnl_queue_select struct to specify the VSI. The queue_id
+ * field is ignored by the PF.
+ *
+ * PF replies with struct i40e_eth_stats in an external buffer.
+ */
+
+/* I40E_VIRTCHNL_OP_EVENT
+ * PF sends this message to inform the VF driver of events that may affect it.
+ * No direct response is expected from the VF, though it may generate other
+ * messages in response to this one.
+ */
+enum i40e_virtchnl_event_codes {
+	I40E_VIRTCHNL_EVENT_UNKNOWN = 0,
+	I40E_VIRTCHNL_EVENT_LINK_CHANGE,
+	I40E_VIRTCHNL_EVENT_RESET_IMPENDING,
+	I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+};
+#define I40E_PF_EVENT_SEVERITY_INFO		0
+#define I40E_PF_EVENT_SEVERITY_ATTENTION	1
+#define I40E_PF_EVENT_SEVERITY_ACTION_REQUIRED	2
+#define I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM	255
+
+struct i40e_virtchnl_pf_event {
+	enum i40e_virtchnl_event_codes event;
+	union {
+		struct {
+			enum i40e_aq_link_speed link_speed;
+			bool link_status;
+		} link_event;
+	} event_data;
+
+	int severity;
+};
+
+/* VF reset states - these are written into the RSTAT register:
+ * I40E_VFGEN_RSTAT1 on the PF
+ * I40E_VFGEN_RSTAT on the VF
+ * When the PF initiates a reset, it writes 0
+ * When the reset is complete, it writes 1
+ * When the PF detects that the VF has recovered, it writes 2
+ * VF checks this register periodically to determine if a reset has occurred,
+ * then polls it to know when the reset is complete.
+ * If either the PF or VF reads the register while the hardware
+ * is in a reset state, it will return DEADBEEF, which, when masked
+ * will result in 3.
+ */
+enum i40e_vfr_states {
+	I40E_VFR_INPROGRESS = 0,
+	I40E_VFR_COMPLETED,
+	I40E_VFR_VFACTIVE,
+	I40E_VFR_UNKNOWN,
+};
+
+#endif /* _I40E_VIRTCHNL_H_ */


Property changes on: trunk/sys/dev/ixl/i40e_virtchnl.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/if_ixl.c
===================================================================
--- trunk/sys/dev/ixl/if_ixl.c	                        (rev 0)
+++ trunk/sys/dev/ixl/if_ixl.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,6695 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/if_ixl.c 303107 2016-07-20 18:26:48Z sbruno $*/
+
+#ifndef IXL_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixl.h"
+#include "ixl_pf.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+/*********************************************************************
+ *  Driver version
+ *********************************************************************/
+char ixl_driver_version[] = "1.4.3";
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into ixl_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixl_vendor_info_t ixl_vendor_info_array[] =
+{
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0},
+#ifdef X722_SUPPORT
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
+#endif
+	/* required last entry */
+	{0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings
+ *********************************************************************/
+
+static char    *ixl_strings[] = {
+	"Intel(R) Ethernet Connection XL710 Driver"
+};
+
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int      ixl_probe(device_t);
+static int      ixl_attach(device_t);
+static int      ixl_detach(device_t);
+static int      ixl_shutdown(device_t);
+static int	ixl_get_hw_capabilities(struct ixl_pf *);
+static void	ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
+static int      ixl_ioctl(struct ifnet *, u_long, caddr_t);
+static void	ixl_init(void *);
+static void	ixl_init_locked(struct ixl_pf *);
+static void     ixl_stop(struct ixl_pf *);
+static void     ixl_media_status(struct ifnet *, struct ifmediareq *);
+static int      ixl_media_change(struct ifnet *);
+static void     ixl_update_link_status(struct ixl_pf *);
+static int      ixl_allocate_pci_resources(struct ixl_pf *);
+static u16	ixl_get_bus_info(struct i40e_hw *, device_t);
+static int	ixl_setup_stations(struct ixl_pf *);
+static int	ixl_switch_config(struct ixl_pf *);
+static int	ixl_initialize_vsi(struct ixl_vsi *);
+static int	ixl_assign_vsi_msix(struct ixl_pf *);
+static int	ixl_assign_vsi_legacy(struct ixl_pf *);
+static int	ixl_init_msix(struct ixl_pf *);
+static void	ixl_configure_msix(struct ixl_pf *);
+static void	ixl_configure_itr(struct ixl_pf *);
+static void	ixl_configure_legacy(struct ixl_pf *);
+static void	ixl_free_pci_resources(struct ixl_pf *);
+static void	ixl_local_timer(void *);
+static int	ixl_setup_interface(device_t, struct ixl_vsi *);
+static void	ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
+static void	ixl_config_rss(struct ixl_vsi *);
+static void	ixl_set_queue_rx_itr(struct ixl_queue *);
+static void	ixl_set_queue_tx_itr(struct ixl_queue *);
+static int	ixl_set_advertised_speeds(struct ixl_pf *, int);
+
+static int	ixl_enable_rings(struct ixl_vsi *);
+static int	ixl_disable_rings(struct ixl_vsi *);
+static void	ixl_enable_intr(struct ixl_vsi *);
+static void	ixl_disable_intr(struct ixl_vsi *);
+static void	ixl_disable_rings_intr(struct ixl_vsi *);
+
+static void     ixl_enable_adminq(struct i40e_hw *);
+static void     ixl_disable_adminq(struct i40e_hw *);
+static void     ixl_enable_queue(struct i40e_hw *, int);
+static void     ixl_disable_queue(struct i40e_hw *, int);
+static void     ixl_enable_legacy(struct i40e_hw *);
+static void     ixl_disable_legacy(struct i40e_hw *);
+
+static void     ixl_set_promisc(struct ixl_vsi *);
+static void     ixl_add_multi(struct ixl_vsi *);
+static void     ixl_del_multi(struct ixl_vsi *);
+static void	ixl_register_vlan(void *, struct ifnet *, u16);
+static void	ixl_unregister_vlan(void *, struct ifnet *, u16);
+static void	ixl_setup_vlan_filters(struct ixl_vsi *);
+
+static void	ixl_init_filters(struct ixl_vsi *);
+static void	ixl_reconfigure_filters(struct ixl_vsi *vsi);
+static void	ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
+static void	ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
+static void	ixl_add_hw_filters(struct ixl_vsi *, int, int);
+static void	ixl_del_hw_filters(struct ixl_vsi *, int);
+static struct ixl_mac_filter *
+		ixl_find_filter(struct ixl_vsi *, u8 *, s16);
+static void	ixl_add_mc_filter(struct ixl_vsi *, u8 *);
+static void	ixl_free_mac_filters(struct ixl_vsi *vsi);
+
+
+/* Sysctl debug interface */
+#ifdef IXL_DEBUG_SYSCTL
+static int	ixl_debug_info(SYSCTL_HANDLER_ARGS);
+static void	ixl_print_debug_info(struct ixl_pf *);
+#endif
+
+/* The MSI/X Interrupt handlers */
+static void	ixl_intr(void *);
+static void	ixl_msix_que(void *);
+static void	ixl_msix_adminq(void *);
+static void	ixl_handle_mdd_event(struct ixl_pf *);
+
+/* Deferred interrupt tasklets */
+static void	ixl_do_adminq(void *, int);
+
+/* Sysctl handlers */
+static int	ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int	ixl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int	ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int	ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
+
+/* Statistics */
+static void     ixl_add_hw_stats(struct ixl_pf *);
+static void	ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
+		    struct sysctl_oid_list *, struct i40e_hw_port_stats *);
+static void	ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
+		    struct sysctl_oid_list *,
+		    struct i40e_eth_stats *);
+static void	ixl_update_stats_counters(struct ixl_pf *);
+static void	ixl_update_eth_stats(struct ixl_vsi *);
+static void	ixl_update_vsi_stats(struct ixl_vsi *);
+static void	ixl_pf_reset_stats(struct ixl_pf *);
+static void	ixl_vsi_reset_stats(struct ixl_vsi *);
+static void	ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
+		    u64 *, u64 *);
+static void	ixl_stat_update32(struct i40e_hw *, u32, bool,
+		    u64 *, u64 *);
+
+#ifdef IXL_DEBUG_SYSCTL
+static int 	ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
+static int	ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
+static int	ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
+static int	ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
+static int	ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
+#endif
+
+#ifdef PCI_IOV
+static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
+
+static int	ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t*);
+static void	ixl_uninit_iov(device_t dev);
+static int	ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
+
+static void	ixl_handle_vf_msg(struct ixl_pf *,
+		    struct i40e_arq_event_info *);
+static void	ixl_handle_vflr(void *arg, int pending);
+
+static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+#endif
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixl_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe, ixl_probe),
+	DEVMETHOD(device_attach, ixl_attach),
+	DEVMETHOD(device_detach, ixl_detach),
+	DEVMETHOD(device_shutdown, ixl_shutdown),
+#ifdef PCI_IOV
+	DEVMETHOD(pci_init_iov, ixl_init_iov),
+	DEVMETHOD(pci_uninit_iov, ixl_uninit_iov),
+	DEVMETHOD(pci_add_vf, ixl_add_vf),
+#endif
+	{0, 0}
+};
+
+static driver_t ixl_driver = {
+	"ixl", ixl_methods, sizeof(struct ixl_pf),
+};
+
+devclass_t ixl_devclass;
+DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0);
+
+MODULE_DEPEND(ixl, pci, 1, 1, 1);
+MODULE_DEPEND(ixl, ether, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(ixl, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
+/*
+** Global reset mutex
+*/
+static struct mtx ixl_reset_mtx;
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0,
+                   "IXL driver parameters");
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */
+static int ixl_enable_msix = 1;
+TUNABLE_INT("hw.ixl.enable_msix", &ixl_enable_msix);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
+    "Enable MSI-X interrupts");
+
+/*
+** Number of descriptors per ring:
+**   - TX and RX are the same size
+*/
+static int ixl_ringsz = DEFAULT_RING;
+TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
+SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
+    &ixl_ringsz, 0, "Descriptor Ring Size");
+
+/* 
+** This can be set manually, if left as 0 the
+** number of queues will be calculated based
+** on cpus and msix vectors available.
+*/
+int ixl_max_queues = 0;
+TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
+SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
+    &ixl_max_queues, 0, "Number of Queues");
+
+/*
+** Controls for Interrupt Throttling 
+**	- true/false for dynamic adjustment
+** 	- default values for static ITR
+*/
+int ixl_dynamic_rx_itr = 0;
+TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
+    &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
+
+int ixl_dynamic_tx_itr = 0;
+TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
+    &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
+
+int ixl_rx_itr = IXL_ITR_8K;
+TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
+    &ixl_rx_itr, 0, "RX Interrupt Rate");
+
+int ixl_tx_itr = IXL_ITR_4K;
+TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
+SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
+    &ixl_tx_itr, 0, "TX Interrupt Rate");
+
+#ifdef IXL_FDIR
+static int ixl_enable_fdir = 1;
+TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
+/* Rate at which we sample */
+int ixl_atr_rate = 20;
+TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
+#endif
+
+#ifdef DEV_NETMAP
+#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
+#include <dev/netmap/if_ixl_netmap.h>
+#endif /* DEV_NETMAP */
+
+static char *ixl_fc_string[6] = {
+	"None",
+	"Rx",
+	"Tx",
+	"Full",
+	"Priority",
+	"Default"
+};
+
+static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
+
+static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
+    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  ixl_probe determines if the driver should be loaded on
+ *  the hardware based on PCI vendor/device id of the device.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_probe(device_t dev)
+{
+	ixl_vendor_info_t *ent;
+
+	u16	pci_vendor_id, pci_device_id;
+	u16	pci_subvendor_id, pci_subdevice_id;
+	char	device_name[256];
+	static bool lock_init = FALSE;
+
+	INIT_DEBUGOUT("ixl_probe: begin");
+
+	pci_vendor_id = pci_get_vendor(dev);
+	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
+		return (ENXIO);
+
+	pci_device_id = pci_get_device(dev);
+	pci_subvendor_id = pci_get_subvendor(dev);
+	pci_subdevice_id = pci_get_subdevice(dev);
+
+	ent = ixl_vendor_info_array;
+	while (ent->vendor_id != 0) {
+		if ((pci_vendor_id == ent->vendor_id) &&
+		    (pci_device_id == ent->device_id) &&
+
+		    ((pci_subvendor_id == ent->subvendor_id) ||
+		     (ent->subvendor_id == 0)) &&
+
+		    ((pci_subdevice_id == ent->subdevice_id) ||
+		     (ent->subdevice_id == 0))) {
+			sprintf(device_name, "%s, Version - %s",
+				ixl_strings[ent->index],
+				ixl_driver_version);
+			device_set_desc_copy(dev, device_name);
+			/* One shot mutex init */
+			if (lock_init == FALSE) {
+				lock_init = TRUE;
+				mtx_init(&ixl_reset_mtx,
+				    "ixl_reset",
+				    "IXL RESET Lock", MTX_DEF);
+			}
+			return (BUS_PROBE_DEFAULT);
+		}
+		ent++;
+	}
+	return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_attach(device_t dev)
+{
+	struct ixl_pf	*pf;
+	struct i40e_hw	*hw;
+	struct ixl_vsi *vsi;
+	u16		bus;
+	int             error = 0;
+#ifdef PCI_IOV
+	nvlist_t	*pf_schema, *vf_schema;
+	int		iov_error;
+#endif
+
+	INIT_DEBUGOUT("ixl_attach: begin");
+
+	/* Allocate, clear, and link in our primary soft structure */
+	pf = device_get_softc(dev);
+	pf->dev = pf->osdep.dev = dev;
+	hw = &pf->hw;
+
+	/*
+	** Note this assumes we have a single embedded VSI,
+	** this could be enhanced later to allocate multiple
+	*/
+	vsi = &pf->vsi;
+	vsi->dev = pf->dev;
+
+	/* Core Lock Init*/
+	IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
+
+	/* Set up the timer callout */
+	callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
+
+	/* Set up sysctls */
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+	    pf, 0, ixl_set_flowcntl, "I", "Flow Control");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+	    pf, 0, ixl_set_advertise, "I", "Advertised Speed");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_current_speed, "A", "Current Port Speed");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
+
+	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "rx_itr", CTLFLAG_RW,
+	    &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
+
+	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
+	    &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
+
+	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "tx_itr", CTLFLAG_RW,
+	    &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
+
+	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
+	    &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
+
+#ifdef IXL_DEBUG_SYSCTL
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
+	    ixl_debug_info, "I", "Debug Information");
+
+	/* Debug shared-code message level */
+	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "debug_mask", CTLFLAG_RW,
+	    &pf->hw.debug_mask, 0, "Debug Message Level");
+
+	SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
+	    0, "PF/VF Virtual Channel debug level");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_link_status, "A", "Current Link Status");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
+
+	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+	    OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
+	    pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
+#endif
+
+	/* Save off the PCI information */
+	hw->vendor_id = pci_get_vendor(dev);
+	hw->device_id = pci_get_device(dev);
+	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+	hw->subsystem_vendor_id =
+	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
+	hw->subsystem_device_id =
+	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+	hw->bus.device = pci_get_slot(dev);
+	hw->bus.func = pci_get_function(dev);
+
+	pf->vc_debug_lvl = 1;
+
+	/* Do PCI setup - map BAR0, etc */
+	if (ixl_allocate_pci_resources(pf)) {
+		device_printf(dev, "Allocation of PCI resources failed\n");
+		error = ENXIO;
+		goto err_out;
+	}
+
+	/* Establish a clean starting point */
+	i40e_clear_hw(hw);
+	error = i40e_pf_reset(hw);
+	if (error) {
+		device_printf(dev,"PF reset failure %x\n", error);
+		error = EIO;
+		goto err_out;
+	}
+
+	/* Set admin queue parameters */
+	hw->aq.num_arq_entries = IXL_AQ_LEN;
+	hw->aq.num_asq_entries = IXL_AQ_LEN;
+	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
+	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+
+	/* Initialize the shared code */
+	error = i40e_init_shared_code(hw);
+	if (error) {
+		device_printf(dev,"Unable to initialize the shared code\n");
+		error = EIO;
+		goto err_out;
+	}
+
+	/* Set up the admin queue */
+	error = i40e_init_adminq(hw);
+	if (error) {
+		device_printf(dev, "The driver for the device stopped "
+		    "because the NVM image is newer than expected.\n"
+		    "You must install the most recent version of "
+		    " the network driver.\n");
+		goto err_out;
+	}
+	device_printf(dev, "%s\n", ixl_fw_version_str(hw));
+
+        if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+	    hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+		device_printf(dev, "The driver for the device detected "
+		    "a newer version of the NVM image than expected.\n"
+		    "Please install the most recent version of the network driver.\n");
+	else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+	    hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+		device_printf(dev, "The driver for the device detected "
+		    "an older version of the NVM image than expected.\n"
+		    "Please update the NVM image.\n");
+
+	/* Clear PXE mode */
+	i40e_clear_pxe_mode(hw);
+
+	/* Get capabilities from the device */
+	error = ixl_get_hw_capabilities(pf);
+	if (error) {
+		device_printf(dev, "HW capabilities failure!\n");
+		goto err_get_cap;
+	}
+
+	/* Set up host memory cache */
+	error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+	    hw->func_caps.num_rx_qp, 0, 0);
+	if (error) {
+		device_printf(dev, "init_lan_hmc failed: %d\n", error);
+		goto err_get_cap;
+	}
+
+	error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+	if (error) {
+		device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+		goto err_mac_hmc;
+	}
+
+	/* Disable LLDP from the firmware */
+	i40e_aq_stop_lldp(hw, TRUE, NULL);
+
+	i40e_get_mac_addr(hw, hw->mac.addr);
+	error = i40e_validate_mac_addr(hw->mac.addr);
+	if (error) {
+		device_printf(dev, "validate_mac_addr failed: %d\n", error);
+		goto err_mac_hmc;
+	}
+	bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
+	i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+
+	/* Set up VSI and queues */
+	if (ixl_setup_stations(pf) != 0) { 
+		device_printf(dev, "setup stations failed!\n");
+		error = ENOMEM;
+		goto err_mac_hmc;
+	}
+
+	/* Initialize mac filter list for VSI */
+	SLIST_INIT(&vsi->ftl);
+
+	/* Set up interrupt routing here */
+	if (pf->msix > 1)
+		error = ixl_assign_vsi_msix(pf);
+	else
+		error = ixl_assign_vsi_legacy(pf);
+	if (error) 
+		goto err_late;
+
+	if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+	    (hw->aq.fw_maj_ver < 4)) {
+		i40e_msec_delay(75);
+		error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+		if (error)
+			device_printf(dev, "link restart failed, aq_err=%d\n",
+			    pf->hw.aq.asq_last_status);
+	}
+
+	/* Determine link state */
+	i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
+	i40e_get_link_status(hw, &pf->link_up);
+
+	/* Setup OS specific network interface */
+	if (ixl_setup_interface(dev, vsi) != 0) {
+		device_printf(dev, "interface setup failed!\n");
+		error = EIO;
+		goto err_late;
+	}
+
+	error = ixl_switch_config(pf);
+	if (error) {
+		device_printf(dev, "Initial switch config failed: %d\n", error);
+		goto err_mac_hmc;
+	}
+
+	/* Limit phy interrupts to link and modules failure */
+	error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN |
+		I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
+	if (error)
+		device_printf(dev, "set phy mask failed: %d\n", error);
+
+	/* Get the bus configuration and set the shared code */
+	bus = ixl_get_bus_info(hw, dev);
+	i40e_set_pci_config_data(hw, bus);
+
+	/* Initialize statistics */
+	ixl_pf_reset_stats(pf);
+	ixl_update_stats_counters(pf);
+	ixl_add_hw_stats(pf);
+
+	/* Register for VLAN events */
+	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+	    ixl_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+	    ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+
+#ifdef PCI_IOV
+	/* SR-IOV is only supported when MSI-X is in use. */
+	if (pf->msix > 1) {
+		pf_schema = pci_iov_schema_alloc_node();
+		vf_schema = pci_iov_schema_alloc_node();
+		pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+		pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+		    IOV_SCHEMA_HASDEFAULT, TRUE);
+		pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+		pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+		    IOV_SCHEMA_HASDEFAULT, FALSE);
+
+		iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
+		if (iov_error != 0)
+			device_printf(dev,
+			    "Failed to initialize SR-IOV (error=%d)\n",
+			    iov_error);
+	}
+#endif
+
+#ifdef DEV_NETMAP
+	ixl_netmap_attach(vsi);
+#endif /* DEV_NETMAP */
+	INIT_DEBUGOUT("ixl_attach: end");
+	return (0);
+
+err_late:
+	if (vsi->ifp != NULL)
+		if_free(vsi->ifp);
+err_mac_hmc:
+	i40e_shutdown_lan_hmc(hw);
+err_get_cap:
+	i40e_shutdown_adminq(hw);
+err_out:
+	ixl_free_pci_resources(pf);
+	ixl_free_vsi(vsi);
+	IXL_PF_LOCK_DESTROY(pf);
+	return (error);
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixl_detach(device_t dev)
+{
+	struct ixl_pf		*pf = device_get_softc(dev);
+	struct i40e_hw		*hw = &pf->hw;
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	i40e_status		status;
+#ifdef PCI_IOV
+	int			error;
+#endif
+
+	INIT_DEBUGOUT("ixl_detach: begin");
+
+	/* Make sure VLANS are not using driver */
+	if (vsi->ifp->if_vlantrunk != NULL) {
+		device_printf(dev,"Vlan in use, detach first\n");
+		return (EBUSY);
+	}
+
+#ifdef PCI_IOV
+	error = pci_iov_detach(dev);
+	if (error != 0) {
+		device_printf(dev, "SR-IOV in use; detach first.\n");
+		return (error);
+	}
+#endif
+
+	ether_ifdetach(vsi->ifp);
+	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		IXL_PF_LOCK(pf);
+		ixl_stop(pf);
+		IXL_PF_UNLOCK(pf);
+	}
+
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		if (que->tq) {
+			taskqueue_drain(que->tq, &que->task);
+			taskqueue_drain(que->tq, &que->tx_task);
+			taskqueue_free(que->tq);
+		}
+	}
+
+	/* Shutdown LAN HMC */
+	status = i40e_shutdown_lan_hmc(hw);
+	if (status)
+		device_printf(dev,
+		    "Shutdown LAN HMC failed with code %d\n", status);
+
+	/* Shutdown admin queue */
+	status = i40e_shutdown_adminq(hw);
+	if (status)
+		device_printf(dev,
+		    "Shutdown Admin queue failed with code %d\n", status);
+
+	/* Unregister VLAN events */
+	if (vsi->vlan_attach != NULL)
+		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
+	if (vsi->vlan_detach != NULL)
+		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
+
+	callout_drain(&pf->timer);
+#ifdef DEV_NETMAP
+	netmap_detach(vsi->ifp);
+#endif /* DEV_NETMAP */
+	ixl_free_pci_resources(pf);
+	bus_generic_detach(dev);
+	if_free(vsi->ifp);
+	ixl_free_vsi(vsi);
+	IXL_PF_LOCK_DESTROY(pf);
+	return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixl_shutdown(device_t dev)
+{
+	struct ixl_pf *pf = device_get_softc(dev);
+	IXL_PF_LOCK(pf);
+	ixl_stop(pf);
+	IXL_PF_UNLOCK(pf);
+	return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Get the hardware capabilities
+ *
+ **********************************************************************/
+
+static int
+ixl_get_hw_capabilities(struct ixl_pf *pf)
+{
+	struct i40e_aqc_list_capabilities_element_resp *buf;
+	struct i40e_hw	*hw = &pf->hw;
+	device_t 	dev = pf->dev;
+	int             error, len;
+	u16		needed;
+	bool		again = TRUE;
+
+	len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+retry:
+	if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
+	    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+		device_printf(dev, "Unable to allocate cap memory\n");
+                return (ENOMEM);
+	}
+
+	/* This populates the hw struct */
+        error = i40e_aq_discover_capabilities(hw, buf, len,
+	    &needed, i40e_aqc_opc_list_func_capabilities, NULL);
+	free(buf, M_DEVBUF);
+	if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
+	    (again == TRUE)) {
+		/* retry once with a larger buffer */
+		again = FALSE;
+		len = needed;
+		goto retry;
+	} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+		device_printf(dev, "capability discovery failed: %d\n",
+		    pf->hw.aq.asq_last_status);
+		return (ENODEV);
+	}
+
+	/* Capture this PF's starting queue pair */
+	pf->qbase = hw->func_caps.base_queue;
+
+#ifdef IXL_DEBUG
+	device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
+	    "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
+	    hw->pf_id, hw->func_caps.num_vfs,
+	    hw->func_caps.num_msix_vectors,
+	    hw->func_caps.num_msix_vectors_vf,
+	    hw->func_caps.fd_filters_guaranteed,
+	    hw->func_caps.fd_filters_best_effort,
+	    hw->func_caps.num_tx_qp,
+	    hw->func_caps.num_rx_qp,
+	    hw->func_caps.base_queue);
+#endif
+	return (error);
+}
+
+static void
+ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+	device_t 	dev = vsi->dev;
+
+	/* Enable/disable TXCSUM/TSO4 */
+	if (!(ifp->if_capenable & IFCAP_TXCSUM)
+	    && !(ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM) {
+			ifp->if_capenable |= IFCAP_TXCSUM;
+			/* enable TXCSUM, restore TSO if previously enabled */
+			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+				ifp->if_capenable |= IFCAP_TSO4;
+			}
+		}
+		else if (mask & IFCAP_TSO4) {
+			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+			device_printf(dev,
+			    "TSO4 requires txcsum, enabling both...\n");
+		}
+	} else if((ifp->if_capenable & IFCAP_TXCSUM)
+	    && !(ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM)
+			ifp->if_capenable &= ~IFCAP_TXCSUM;
+		else if (mask & IFCAP_TSO4)
+			ifp->if_capenable |= IFCAP_TSO4;
+	} else if((ifp->if_capenable & IFCAP_TXCSUM)
+	    && (ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM) {
+			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+			device_printf(dev, 
+			    "TSO4 requires txcsum, disabling both...\n");
+		} else if (mask & IFCAP_TSO4)
+			ifp->if_capenable &= ~IFCAP_TSO4;
+	}
+
+	/* Enable/disable TXCSUM_IPV6/TSO6 */
+	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && !(ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6) {
+			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+				ifp->if_capenable |= IFCAP_TSO6;
+			}
+		} else if (mask & IFCAP_TSO6) {
+			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+			device_printf(dev,
+			    "TSO6 requires txcsum6, enabling both...\n");
+		}
+	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && !(ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6)
+			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+		else if (mask & IFCAP_TSO6)
+			ifp->if_capenable |= IFCAP_TSO6;
+	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && (ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6) {
+			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+			device_printf(dev,
+			    "TSO6 requires txcsum6, disabling both...\n");
+		} else if (mask & IFCAP_TSO6)
+			ifp->if_capenable &= ~IFCAP_TSO6;
+	}
+}
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  ixl_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+	struct ixl_vsi	*vsi = ifp->if_softc;
+	struct ixl_pf	*pf = vsi->back;
+	struct ifreq	*ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+	struct ifaddr *ifa = (struct ifaddr *)data;
+	bool		avoid_reset = FALSE;
+#endif
+	int             error = 0;
+
+	switch (command) {
+
+        case SIOCSIFADDR:
+#ifdef INET
+		if (ifa->ifa_addr->sa_family == AF_INET)
+			avoid_reset = TRUE;
+#endif
+#ifdef INET6
+		if (ifa->ifa_addr->sa_family == AF_INET6)
+			avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+		/*
+		** Calling init results in link renegotiation,
+		** so we avoid doing it when possible.
+		*/
+		if (avoid_reset) {
+			ifp->if_flags |= IFF_UP;
+			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+				ixl_init(pf);
+#ifdef INET
+			if (!(ifp->if_flags & IFF_NOARP))
+				arp_ifinit(ifp, ifa);
+#endif
+		} else
+			error = ether_ioctl(ifp, command, data);
+		break;
+#endif
+	case SIOCSIFMTU:
+		IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+		if (ifr->ifr_mtu > IXL_MAX_FRAME -
+		   ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+			error = EINVAL;
+		} else {
+			IXL_PF_LOCK(pf);
+			ifp->if_mtu = ifr->ifr_mtu;
+			vsi->max_frame_size =
+				ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+			    + ETHER_VLAN_ENCAP_LEN;
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				ixl_init_locked(pf);
+			IXL_PF_UNLOCK(pf);
+		}
+		break;
+	case SIOCSIFFLAGS:
+		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+		IXL_PF_LOCK(pf);
+		if (ifp->if_flags & IFF_UP) {
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+				if ((ifp->if_flags ^ pf->if_flags) &
+				    (IFF_PROMISC | IFF_ALLMULTI)) {
+					ixl_set_promisc(vsi);
+				}
+			} else
+				ixl_init_locked(pf);
+		} else
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				ixl_stop(pf);
+		pf->if_flags = ifp->if_flags;
+		IXL_PF_UNLOCK(pf);
+		break;
+	case SIOCADDMULTI:
+		IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			IXL_PF_LOCK(pf);
+			ixl_disable_intr(vsi);
+			ixl_add_multi(vsi);
+			ixl_enable_intr(vsi);
+			IXL_PF_UNLOCK(pf);
+		}
+		break;
+	case SIOCDELMULTI:
+		IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			IXL_PF_LOCK(pf);
+			ixl_disable_intr(vsi);
+			ixl_del_multi(vsi);
+			ixl_enable_intr(vsi);
+			IXL_PF_UNLOCK(pf);
+		}
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+#ifdef IFM_ETH_XTYPE
+	case SIOCGIFXMEDIA:
+#endif
+		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+		error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
+		break;
+	case SIOCSIFCAP:
+	{
+		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+		IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+
+		ixl_cap_txcsum_tso(vsi, ifp, mask);
+
+		if (mask & IFCAP_RXCSUM)
+			ifp->if_capenable ^= IFCAP_RXCSUM;
+		if (mask & IFCAP_RXCSUM_IPV6)
+			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+		if (mask & IFCAP_LRO)
+			ifp->if_capenable ^= IFCAP_LRO;
+		if (mask & IFCAP_VLAN_HWTAGGING)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+		if (mask & IFCAP_VLAN_HWFILTER)
+			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+		if (mask & IFCAP_VLAN_HWTSO)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			IXL_PF_LOCK(pf);
+			ixl_init_locked(pf);
+			IXL_PF_UNLOCK(pf);
+		}
+		VLAN_CAPABILITIES(ifp);
+
+		break;
+	}
+
+	default:
+		IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
+		error = ether_ioctl(ifp, command, data);
+		break;
+	}
+
+	return (error);
+}
+
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static void
+ixl_init_locked(struct ixl_pf *pf)
+{
+	struct i40e_hw	*hw = &pf->hw;
+	struct ixl_vsi	*vsi = &pf->vsi;
+	struct ifnet	*ifp = vsi->ifp;
+	device_t 	dev = pf->dev;
+	struct i40e_filter_control_settings	filter;
+	u8		tmpaddr[ETHER_ADDR_LEN];
+	int		ret;
+
+	mtx_assert(&pf->pf_mtx, MA_OWNED);
+	INIT_DEBUGOUT("ixl_init: begin");
+	ixl_stop(pf);
+
+	/* Get the latest mac address... User might use a LAA */
+	bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+	      I40E_ETH_LENGTH_OF_ADDRESS);
+	if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && 
+	    (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+		ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+		bcopy(tmpaddr, hw->mac.addr,
+		    I40E_ETH_LENGTH_OF_ADDRESS);
+		ret = i40e_aq_mac_address_write(hw,
+		    I40E_AQC_WRITE_TYPE_LAA_ONLY,
+		    hw->mac.addr, NULL);
+		if (ret) {
+			device_printf(dev, "LLA address"
+			 "change failed!!\n");
+			return;
+		} else {
+			ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+		}
+	}
+
+	/* Set the various hardware offload abilities */
+	ifp->if_hwassist = 0;
+	if (ifp->if_capenable & IFCAP_TSO)
+		ifp->if_hwassist |= CSUM_TSO;
+	if (ifp->if_capenable & IFCAP_TXCSUM)
+		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+		ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
+
+	/* Set up the device filtering */
+	bzero(&filter, sizeof(filter));
+	filter.enable_ethtype = TRUE;
+	filter.enable_macvlan = TRUE;
+#ifdef IXL_FDIR
+	filter.enable_fdir = TRUE;
+#endif
+	if (i40e_set_filter_control(hw, &filter))
+		device_printf(dev, "set_filter_control() failed\n");
+
+	/* Set up RSS */
+	ixl_config_rss(vsi);
+
+	/*
+	** Prepare the VSI: rings, hmc contexts, etc...
+	*/
+	if (ixl_initialize_vsi(vsi)) {
+		device_printf(dev, "initialize vsi failed!!\n");
+		return;
+	}
+
+	/* Add protocol filters to list */
+	ixl_init_filters(vsi);
+
+	/* Setup vlan's if needed */
+	ixl_setup_vlan_filters(vsi);
+
+	/* Start the local timer */
+	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+
+	/* Set up MSI/X routing and the ITR settings */
+	if (ixl_enable_msix) {
+		ixl_configure_msix(pf);
+		ixl_configure_itr(pf);
+	} else
+		ixl_configure_legacy(pf);
+
+	ixl_enable_rings(vsi);
+
+	i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+
+	ixl_reconfigure_filters(vsi);
+
+	/* Set MTU in hardware*/
+	int aq_error = i40e_aq_set_mac_config(hw, vsi->max_frame_size,
+	    TRUE, 0, NULL);
+	if (aq_error)
+		device_printf(vsi->dev,
+			"aq_set_mac_config in init error, code %d\n",
+		    aq_error);
+
+	/* And now turn on interrupts */
+	ixl_enable_intr(vsi);
+
+	/* Now inform the stack we're ready */
+	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+	return;
+}
+
+static void
+ixl_init(void *arg)
+{
+	struct ixl_pf *pf = arg;
+
+	IXL_PF_LOCK(pf);
+	ixl_init_locked(pf);
+	IXL_PF_UNLOCK(pf);
+	return;
+}
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+static void
+ixl_handle_que(void *context, int pending)
+{
+	struct ixl_queue *que = context;
+	struct ixl_vsi *vsi = que->vsi;
+	struct i40e_hw  *hw = vsi->hw;
+	struct tx_ring  *txr = &que->txr;
+	struct ifnet    *ifp = vsi->ifp;
+	bool		more;
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		more = ixl_rxeof(que, IXL_RX_LIMIT);
+		IXL_TX_LOCK(txr);
+		ixl_txeof(que);
+		if (!drbr_empty(ifp, txr->br))
+			ixl_mq_start_locked(ifp, txr);
+		IXL_TX_UNLOCK(txr);
+		if (more) {
+			taskqueue_enqueue(que->tq, &que->task);
+			return;
+		}
+	}
+
+	/* Reenable this interrupt - hmmm */
+	ixl_enable_queue(hw, que->me);
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_intr(void *arg)
+{
+	struct ixl_pf		*pf = arg;
+	struct i40e_hw		*hw =  &pf->hw;
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	struct ifnet		*ifp = vsi->ifp;
+	struct tx_ring		*txr = &que->txr;
+        u32			reg, icr0, mask;
+	bool			more_tx, more_rx;
+
+	++que->irqs;
+
+	/* Protect against spurious interrupts */
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+		return;
+
+	icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+        mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+#ifdef PCI_IOV
+	if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
+		taskqueue_enqueue(pf->tq, &pf->vflr_task);
+#endif
+
+	if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+		taskqueue_enqueue(pf->tq, &pf->adminq);
+		return;
+	}
+
+	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+	IXL_TX_LOCK(txr);
+	more_tx = ixl_txeof(que);
+	if (!drbr_empty(vsi->ifp, txr->br))
+		more_tx = 1;
+	IXL_TX_UNLOCK(txr);
+
+	/* re-enable other interrupt causes */
+	wr32(hw, I40E_PFINT_ICR0_ENA, mask);
+
+	/* And now the queues */
+	reg = rd32(hw, I40E_QINT_RQCTL(0));
+	reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+	wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+	reg = rd32(hw, I40E_QINT_TQCTL(0));
+	reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+	reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
+	wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+	ixl_enable_legacy(hw);
+
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX VSI Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_que(void *arg)
+{
+	struct ixl_queue	*que = arg;
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct tx_ring	*txr = &que->txr;
+	bool		more_tx, more_rx;
+
+	/* Protect against spurious interrupts */
+	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	++que->irqs;
+
+	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+	IXL_TX_LOCK(txr);
+	more_tx = ixl_txeof(que);
+	/*
+	** Make certain that if the stack 
+	** has anything queued the task gets
+	** scheduled to handle it.
+	*/
+	if (!drbr_empty(vsi->ifp, txr->br))
+		more_tx = 1;
+	IXL_TX_UNLOCK(txr);
+
+	ixl_set_queue_rx_itr(que);
+	ixl_set_queue_tx_itr(que);
+
+	if (more_tx || more_rx)
+		taskqueue_enqueue(que->tq, &que->task);
+	else
+		ixl_enable_queue(hw, que->me);
+
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Admin Queue Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+ixl_msix_adminq(void *arg)
+{
+	struct ixl_pf	*pf = arg;
+	struct i40e_hw	*hw = &pf->hw;
+	u32		reg, mask;
+
+	++pf->admin_irq;
+
+	reg = rd32(hw, I40E_PFINT_ICR0);
+	mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+	/* Check on the cause */
+	if (reg & I40E_PFINT_ICR0_ADMINQ_MASK)
+		mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+
+	if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+		ixl_handle_mdd_event(pf);
+		mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+	}
+
+#ifdef PCI_IOV
+	if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
+		mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+		taskqueue_enqueue(pf->tq, &pf->vflr_task);
+	}
+#endif
+
+	reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+	reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+	taskqueue_enqueue(pf->tq, &pf->adminq);
+	return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+	struct ixl_vsi	*vsi = ifp->if_softc;
+	struct ixl_pf	*pf = vsi->back;
+	struct i40e_hw  *hw = &pf->hw;
+
+	INIT_DEBUGOUT("ixl_media_status: begin");
+	IXL_PF_LOCK(pf);
+
+	hw->phy.get_link_info = TRUE;
+	i40e_get_link_status(hw, &pf->link_up);
+	ixl_update_link_status(pf);
+
+	ifmr->ifm_status = IFM_AVALID;
+	ifmr->ifm_active = IFM_ETHER;
+
+	if (!pf->link_up) {
+		IXL_PF_UNLOCK(pf);
+		return;
+	}
+
+	ifmr->ifm_status |= IFM_ACTIVE;
+	/* Hardware is always full-duplex */
+	ifmr->ifm_active |= IFM_FDX;
+
+	switch (hw->phy.link_info.phy_type) {
+		/* 100 M */
+		case I40E_PHY_TYPE_100BASE_TX:
+			ifmr->ifm_active |= IFM_100_TX;
+			break;
+		/* 1 G */
+		case I40E_PHY_TYPE_1000BASE_T:
+			ifmr->ifm_active |= IFM_1000_T;
+			break;
+		case I40E_PHY_TYPE_1000BASE_SX:
+			ifmr->ifm_active |= IFM_1000_SX;
+			break;
+		case I40E_PHY_TYPE_1000BASE_LX:
+			ifmr->ifm_active |= IFM_1000_LX;
+			break;
+		/* 10 G */
+		case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+			ifmr->ifm_active |= IFM_10G_TWINAX;
+			break;
+		case I40E_PHY_TYPE_10GBASE_SR:
+			ifmr->ifm_active |= IFM_10G_SR;
+			break;
+		case I40E_PHY_TYPE_10GBASE_LR:
+			ifmr->ifm_active |= IFM_10G_LR;
+			break;
+		case I40E_PHY_TYPE_10GBASE_T:
+			ifmr->ifm_active |= IFM_10G_T;
+			break;
+		/* 40 G */
+		case I40E_PHY_TYPE_40GBASE_CR4:
+		case I40E_PHY_TYPE_40GBASE_CR4_CU:
+			ifmr->ifm_active |= IFM_40G_CR4;
+			break;
+		case I40E_PHY_TYPE_40GBASE_SR4:
+			ifmr->ifm_active |= IFM_40G_SR4;
+			break;
+		case I40E_PHY_TYPE_40GBASE_LR4:
+			ifmr->ifm_active |= IFM_40G_LR4;
+			break;
+#ifndef IFM_ETH_XTYPE
+		case I40E_PHY_TYPE_1000BASE_KX:
+			ifmr->ifm_active |= IFM_1000_CX;
+			break;
+		case I40E_PHY_TYPE_10GBASE_CR1_CU:
+		case I40E_PHY_TYPE_10GBASE_CR1:
+			ifmr->ifm_active |= IFM_10G_TWINAX;
+			break;
+		case I40E_PHY_TYPE_10GBASE_KX4:
+			ifmr->ifm_active |= IFM_10G_CX4;
+			break;
+		case I40E_PHY_TYPE_10GBASE_KR:
+			ifmr->ifm_active |= IFM_10G_SR;
+			break;
+		case I40E_PHY_TYPE_40GBASE_KR4:
+		case I40E_PHY_TYPE_XLPPI:
+			ifmr->ifm_active |= IFM_40G_SR4;
+			break;
+#else
+		case I40E_PHY_TYPE_1000BASE_KX:
+			ifmr->ifm_active |= IFM_1000_KX;
+			break;
+		/* ERJ: What's the difference between these? */
+		case I40E_PHY_TYPE_10GBASE_CR1_CU:
+		case I40E_PHY_TYPE_10GBASE_CR1:
+			ifmr->ifm_active |= IFM_10G_CR1;
+			break;
+		case I40E_PHY_TYPE_10GBASE_KX4:
+			ifmr->ifm_active |= IFM_10G_KX4;
+			break;
+		case I40E_PHY_TYPE_10GBASE_KR:
+			ifmr->ifm_active |= IFM_10G_KR;
+			break;
+		case I40E_PHY_TYPE_20GBASE_KR2:
+			ifmr->ifm_active |= IFM_20G_KR2;
+			break;
+		case I40E_PHY_TYPE_40GBASE_KR4:
+			ifmr->ifm_active |= IFM_40G_KR4;
+			break;
+		case I40E_PHY_TYPE_XLPPI:
+			ifmr->ifm_active |= IFM_40G_XLPPI;
+			break;
+#endif
+		default:
+			ifmr->ifm_active |= IFM_UNKNOWN;
+			break;
+	}
+	/* Report flow control status as well */
+	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+		ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+	if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+		ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+
+	IXL_PF_UNLOCK(pf);
+
+	return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixl_media_change(struct ifnet * ifp)
+{
+	struct ixl_vsi *vsi = ifp->if_softc;
+	struct ifmedia *ifm = &vsi->media;
+
+	INIT_DEBUGOUT("ixl_media_change: begin");
+
+	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+		return (EINVAL);
+
+	if_printf(ifp, "Media change is currently not supported.\n");
+
+	return (ENODEV);
+}
+
+
+#ifdef IXL_FDIR
+/*
+** ATR: Application Targetted Receive - creates a filter
+**	based on TX flow info that will keep the receive
+**	portion of the flow on the same queue. Based on the
+**	implementation this is only available for TCP connections
+*/
+void
+ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
+{
+	struct ixl_vsi			*vsi = que->vsi;
+	struct tx_ring			*txr = &que->txr;
+	struct i40e_filter_program_desc	*FDIR;
+	u32				ptype, dtype;
+	int				idx;
+
+	/* check if ATR is enabled and sample rate */
+	if ((!ixl_enable_fdir) || (!txr->atr_rate))
+		return;
+	/*
+	** We sample all TCP SYN/FIN packets,
+	** or at the selected sample rate 
+	*/
+	txr->atr_count++;
+	if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
+	    (txr->atr_count < txr->atr_rate))
+                return;
+	txr->atr_count = 0;
+
+	/* Get a descriptor to use */
+	idx = txr->next_avail;
+	FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
+	if (++idx == que->num_desc)
+		idx = 0;
+	txr->avail--;
+	txr->next_avail = idx;
+
+	ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+	    I40E_TXD_FLTR_QW0_QINDEX_MASK;
+
+	ptype |= (etype == ETHERTYPE_IP) ?
+	    (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
+	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
+	    (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
+	    I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
+
+	ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
+
+	dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
+
+	/*
+	** We use the TCP TH_FIN as a trigger to remove
+	** the filter, otherwise its an update.
+	*/
+	dtype |= (th->th_flags & TH_FIN) ?
+	    (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+	    I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
+	    (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+	    I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+
+	dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
+	    I40E_TXD_FLTR_QW1_DEST_SHIFT;
+
+	dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
+	    I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+
+	FDIR->qindex_flex_ptype_vsi = htole32(ptype);
+	FDIR->dtype_cmd_cntindex = htole32(dtype);
+	return;
+}
+#endif
+
+
+static void
+ixl_set_promisc(struct ixl_vsi *vsi)
+{
+	struct ifnet	*ifp = vsi->ifp;
+	struct i40e_hw	*hw = vsi->hw;
+	int		err, mcnt = 0;
+	bool		uni = FALSE, multi = FALSE;
+
+	if (ifp->if_flags & IFF_ALLMULTI)
+                multi = TRUE;
+	else { /* Need to count the multicast addresses */
+		struct  ifmultiaddr *ifma;
+		if_maddr_rlock(ifp);
+		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+                        if (ifma->ifma_addr->sa_family != AF_LINK)
+                                continue;
+                        if (mcnt == MAX_MULTICAST_ADDR)
+                                break;
+                        mcnt++;
+		}
+		if_maddr_runlock(ifp);
+	}
+
+	if (mcnt >= MAX_MULTICAST_ADDR)
+                multi = TRUE;
+        if (ifp->if_flags & IFF_PROMISC)
+		uni = TRUE;
+
+	err = i40e_aq_set_vsi_unicast_promiscuous(hw,
+	    vsi->seid, uni, NULL);
+	err = i40e_aq_set_vsi_multicast_promiscuous(hw,
+	    vsi->seid, multi, NULL);
+	return;
+}
+
+/*********************************************************************
+ * 	Filter Routines
+ *
+ *	Routines for multicast and vlan filter management.
+ *
+ *********************************************************************/
+static void
+ixl_add_multi(struct ixl_vsi *vsi)
+{
+	struct	ifmultiaddr	*ifma;
+	struct ifnet		*ifp = vsi->ifp;
+	struct i40e_hw		*hw = vsi->hw;
+	int			mcnt = 0, flags;
+
+	IOCTL_DEBUGOUT("ixl_add_multi: begin");
+
+	if_maddr_rlock(ifp);
+	/*
+	** First just get a count, to decide if we
+	** we simply use multicast promiscuous.
+	*/
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		mcnt++;
+	}
+	if_maddr_runlock(ifp);
+
+	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+		/* delete existing MC filters */
+		ixl_del_hw_filters(vsi, mcnt);
+		i40e_aq_set_vsi_multicast_promiscuous(hw,
+		    vsi->seid, TRUE, NULL);
+		return;
+	}
+
+	mcnt = 0;
+	if_maddr_rlock(ifp);
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		ixl_add_mc_filter(vsi,
+		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
+		mcnt++;
+	}
+	if_maddr_runlock(ifp);
+	if (mcnt > 0) {
+		flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+		ixl_add_hw_filters(vsi, flags, mcnt);
+	}
+
+	IOCTL_DEBUGOUT("ixl_add_multi: end");
+	return;
+}
+
+static void
+ixl_del_multi(struct ixl_vsi *vsi)
+{
+	struct ifnet		*ifp = vsi->ifp;
+	struct ifmultiaddr	*ifma;
+	struct ixl_mac_filter	*f;
+	int			mcnt = 0;
+	bool		match = FALSE;
+
+	IOCTL_DEBUGOUT("ixl_del_multi: begin");
+
+	/* Search for removed multicast addresses */
+	if_maddr_rlock(ifp);
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
+			match = FALSE;
+			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+				if (ifma->ifma_addr->sa_family != AF_LINK)
+					continue;
+				u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+				if (cmp_etheraddr(f->macaddr, mc_addr)) {
+					match = TRUE;
+					break;
+				}
+			}
+			if (match == FALSE) {
+				f->flags |= IXL_FILTER_DEL;
+				mcnt++;
+			}
+		}
+	}
+	if_maddr_runlock(ifp);
+
+	if (mcnt > 0)
+		ixl_del_hw_filters(vsi, mcnt);
+}
+
+
+/*********************************************************************
+ *  Timer routine
+ *
+ *  This routine checks for link status,updates statistics,
+ *  and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixl_local_timer(void *arg)
+{
+	struct ixl_pf		*pf = arg;
+	struct i40e_hw		*hw = &pf->hw;
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	device_t		dev = pf->dev;
+	int			hung = 0;
+	u32			mask;
+
+	mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+	/* Fire off the adminq task */
+	taskqueue_enqueue(pf->tq, &pf->adminq);
+
+	/* Update stats */
+	ixl_update_stats_counters(pf);
+
+	/*
+	** Check status of the queues
+	*/
+	mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+		I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+ 
+	for (int i = 0; i < vsi->num_queues; i++,que++) {
+		/* Any queues with outstanding work get a sw irq */
+		if (que->busy)
+			wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
+		/*
+		** Each time txeof runs without cleaning, but there
+		** are uncleaned descriptors it increments busy. If
+		** we get to 5 we declare it hung.
+		*/
+		if (que->busy == IXL_QUEUE_HUNG) {
+			++hung;
+			/* Mark the queue as inactive */
+			vsi->active_queues &= ~((u64)1 << que->me);
+			continue;
+		} else {
+			/* Check if we've come back from hung */
+			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
+				vsi->active_queues |= ((u64)1 << que->me);
+		}
+		if (que->busy >= IXL_MAX_TX_BUSY) {
+#ifdef IXL_DEBUG
+			device_printf(dev,"Warning queue %d "
+			    "appears to be hung!\n", i);
+#endif
+			que->busy = IXL_QUEUE_HUNG;
+			++hung;
+		}
+	}
+	/* Only reinit if all queues show hung */
+	if (hung == vsi->num_queues)
+		goto hung;
+
+	callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+	return;
+
+hung:
+	device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
+	ixl_init_locked(pf);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+**	the real check of the hardware only happens with
+**	a link interrupt.
+*/
+static void
+ixl_update_link_status(struct ixl_pf *pf)
+{
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct i40e_hw		*hw = &pf->hw;
+	struct ifnet		*ifp = vsi->ifp;
+	device_t		dev = pf->dev;
+
+	if (pf->link_up){ 
+		if (vsi->link_active == FALSE) {
+			pf->fc = hw->fc.current_mode;
+			if (bootverbose) {
+				device_printf(dev,"Link is up %d Gbps %s,"
+				    " Flow Control: %s\n",
+				    ((pf->link_speed ==
+				    I40E_LINK_SPEED_40GB)? 40:10),
+				    "Full Duplex", ixl_fc_string[pf->fc]);
+			}
+			vsi->link_active = TRUE;
+			/*
+			** Warn user if link speed on NPAR enabled
+			** partition is not at least 10GB
+			*/
+			if (hw->func_caps.npar_enable &&
+			   (hw->phy.link_info.link_speed ==
+			   I40E_LINK_SPEED_1GB ||
+			   hw->phy.link_info.link_speed ==
+			   I40E_LINK_SPEED_100MB))
+				device_printf(dev, "The partition detected"
+				    "link speed that is less than 10Gbps\n");
+			if_link_state_change(ifp, LINK_STATE_UP);
+		}
+	} else { /* Link down */
+		if (vsi->link_active == TRUE) {
+			if (bootverbose)
+				device_printf(dev,"Link is Down\n");
+			if_link_state_change(ifp, LINK_STATE_DOWN);
+			vsi->link_active = FALSE;
+		}
+	}
+
+	return;
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixl_stop(struct ixl_pf *pf)
+{
+	struct ixl_vsi	*vsi = &pf->vsi;
+	struct ifnet	*ifp = vsi->ifp;
+
+	mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+	INIT_DEBUGOUT("ixl_stop: begin\n");
+	if (pf->num_vfs == 0)
+		ixl_disable_intr(vsi);
+	else
+		ixl_disable_rings_intr(vsi);
+	ixl_disable_rings(vsi);
+
+	/* Tell the stack that the interface is no longer active */
+	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+	/* Stop the local timer */
+	callout_stop(&pf->timer);
+
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+static int
+ixl_assign_vsi_legacy(struct ixl_pf *pf)
+{
+	device_t        dev = pf->dev;
+	struct 		ixl_vsi *vsi = &pf->vsi;
+	struct		ixl_queue *que = vsi->queues;
+	int 		error, rid = 0;
+
+	if (pf->msix == 1)
+		rid = 1;
+	pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+	    &rid, RF_SHAREABLE | RF_ACTIVE);
+	if (pf->res == NULL) {
+		device_printf(dev,"Unable to allocate"
+		    " bus resource: vsi legacy/msi interrupt\n");
+		return (ENXIO);
+	}
+
+	/* Set the handler function */
+	error = bus_setup_intr(dev, pf->res,
+	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
+	    ixl_intr, pf, &pf->tag);
+	if (error) {
+		pf->res = NULL;
+		device_printf(dev, "Failed to register legacy/msi handler");
+		return (error);
+	}
+	bus_describe_intr(dev, pf->res, pf->tag, "irq0");
+	TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+	TASK_INIT(&que->task, 0, ixl_handle_que, que);
+	que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+	    taskqueue_thread_enqueue, &que->tq);
+	taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+	    device_get_nameunit(dev));
+	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+#ifdef PCI_IOV
+	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+
+	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+	    taskqueue_thread_enqueue, &pf->tq);
+	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+	    device_get_nameunit(dev));
+
+	return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+static int
+ixl_assign_vsi_msix(struct ixl_pf *pf)
+{
+	device_t	dev = pf->dev;
+	struct 		ixl_vsi *vsi = &pf->vsi;
+	struct 		ixl_queue *que = vsi->queues;
+	struct		tx_ring	 *txr;
+	int 		error, rid, vector = 0;
+
+	/* Admin Que is vector 0*/
+	rid = vector + 1;
+	pf->res = bus_alloc_resource_any(dev,
+    	    SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+	if (!pf->res) {
+		device_printf(dev,"Unable to allocate"
+    	    " bus resource: Adminq interrupt [%d]\n", rid);
+		return (ENXIO);
+	}
+	/* Set the adminq vector and handler */
+	error = bus_setup_intr(dev, pf->res,
+	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
+	    ixl_msix_adminq, pf, &pf->tag);
+	if (error) {
+		pf->res = NULL;
+		device_printf(dev, "Failed to register Admin que handler");
+		return (error);
+	}
+	bus_describe_intr(dev, pf->res, pf->tag, "aq");
+	pf->admvec = vector;
+	/* Tasklet for Admin Queue */
+	TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+#ifdef PCI_IOV
+	TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+
+	pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+	    taskqueue_thread_enqueue, &pf->tq);
+	taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+	    device_get_nameunit(pf->dev));
+	++vector;
+
+	/* Now set up the stations */
+	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+		int cpu_id = i;
+		rid = vector + 1;
+		txr = &que->txr;
+		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+		    RF_SHAREABLE | RF_ACTIVE);
+		if (que->res == NULL) {
+			device_printf(dev,"Unable to allocate"
+		    	    " bus resource: que interrupt [%d]\n", vector);
+			return (ENXIO);
+		}
+		/* Set the handler function */
+		error = bus_setup_intr(dev, que->res,
+		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
+		    ixl_msix_que, que, &que->tag);
+		if (error) {
+			que->res = NULL;
+			device_printf(dev, "Failed to register que handler");
+			return (error);
+		}
+		bus_describe_intr(dev, que->res, que->tag, "q%d", i);
+		/* Bind the vector to a CPU */
+#ifdef RSS
+		cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+		bus_bind_intr(dev, que->res, cpu_id);
+		que->msix = vector;
+		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+		TASK_INIT(&que->task, 0, ixl_handle_que, que);
+		que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+		    taskqueue_thread_enqueue, &que->tq);
+#ifdef RSS
+		taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
+		    cpu_id, "%s (bucket %d)",
+		    device_get_nameunit(dev), cpu_id);
+#else
+		taskqueue_start_threads(&que->tq, 1, PI_NET,
+		    "%s que", device_get_nameunit(dev));
+#endif
+	}
+
+	return (0);
+}
+
+
+/*
+ * Allocate MSI/X vectors
+ */
+static int
+ixl_init_msix(struct ixl_pf *pf)
+{
+	device_t dev = pf->dev;
+	int rid, want, vectors, queues, available;
+
+	/* Override by tuneable */
+	if (ixl_enable_msix == 0)
+		goto msi;
+
+	/*
+	** When used in a virtualized environment 
+	** PCI BUSMASTER capability may not be set
+	** so explicity set it here and rewrite
+	** the ENABLE in the MSIX control register
+	** at this point to cause the host to
+	** successfully initialize us.
+	*/
+	{
+		u16 pci_cmd_word;
+		int msix_ctrl;
+		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+		pci_find_cap(dev, PCIY_MSIX, &rid);
+		rid += PCIR_MSIX_CTRL;
+		msix_ctrl = pci_read_config(dev, rid, 2);
+		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+		pci_write_config(dev, rid, msix_ctrl, 2);
+	}
+
+	/* First try MSI/X */
+	rid = PCIR_BAR(IXL_BAR);
+	pf->msix_mem = bus_alloc_resource_any(dev,
+	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       	if (!pf->msix_mem) {
+		/* May not be enabled */
+		device_printf(pf->dev,
+		    "Unable to map MSIX table \n");
+		goto msi;
+	}
+
+	available = pci_msix_count(dev); 
+	if (available == 0) { /* system has msix disabled */
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    rid, pf->msix_mem);
+		pf->msix_mem = NULL;
+		goto msi;
+	}
+
+	/* Figure out a reasonable auto config value */
+	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+
+	/* Override with hardcoded value if sane */
+	if ((ixl_max_queues != 0) && (ixl_max_queues <= queues)) 
+		queues = ixl_max_queues;
+
+#ifdef  RSS
+	/* If we're doing RSS, clamp at the number of RSS buckets */
+	if (queues > rss_getnumbuckets())
+		queues = rss_getnumbuckets();
+#endif
+
+	/*
+	** Want one vector (RX/TX pair) per queue
+	** plus an additional for the admin queue.
+	*/
+	want = queues + 1;
+	if (want <= available)	/* Have enough */
+		vectors = want;
+	else {
+               	device_printf(pf->dev,
+		    "MSIX Configuration Problem, "
+		    "%d vectors available but %d wanted!\n",
+		    available, want);
+		return (0); /* Will go to Legacy setup */
+	}
+
+	if (pci_alloc_msix(dev, &vectors) == 0) {
+               	device_printf(pf->dev,
+		    "Using MSIX interrupts with %d vectors\n", vectors);
+		pf->msix = vectors;
+		pf->vsi.num_queues = queues;
+#ifdef RSS
+		/*
+		 * If we're doing RSS, the number of queues needs to
+		 * match the number of RSS buckets that are configured.
+		 *
+		 * + If there's more queues than RSS buckets, we'll end
+		 *   up with queues that get no traffic.
+		 *
+		 * + If there's more RSS buckets than queues, we'll end
+		 *   up having multiple RSS buckets map to the same queue,
+		 *   so there'll be some contention.
+		 */
+		if (queues != rss_getnumbuckets()) {
+			device_printf(dev,
+			    "%s: queues (%d) != RSS buckets (%d)"
+			    "; performance will be impacted.\n",
+			    __func__, queues, rss_getnumbuckets());
+		}
+#endif
+		return (vectors);
+	}
+msi:
+       	vectors = pci_msi_count(dev);
+	pf->vsi.num_queues = 1;
+	pf->msix = 1;
+	ixl_max_queues = 1;
+	ixl_enable_msix = 0;
+       	if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
+               	device_printf(pf->dev,"Using an MSI interrupt\n");
+	else {
+		pf->msix = 0;
+               	device_printf(pf->dev,"Using a Legacy interrupt\n");
+	}
+	return (vectors);
+}
+
+
+/*
+ * Plumb MSI/X vectors
+ */
+static void
+ixl_configure_msix(struct ixl_pf *pf)
+{
+	struct i40e_hw	*hw = &pf->hw;
+	struct ixl_vsi *vsi = &pf->vsi;
+	u32		reg;
+	u16		vector = 1;
+
+	/* First set up the adminq - vector 0 */
+	wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
+	rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
+
+	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+	    I40E_PFINT_ICR0_ENA_GRST_MASK |
+	    I40E_PFINT_ICR0_HMC_ERR_MASK |
+	    I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
+	    I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+	    I40E_PFINT_ICR0_ENA_VFLR_MASK |
+	    I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+	wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+	wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x003E);
+
+	wr32(hw, I40E_PFINT_DYN_CTL0,
+	    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+	    I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+	/* Next configure the queues */
+	for (int i = 0; i < vsi->num_queues; i++, vector++) {
+		wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
+		wr32(hw, I40E_PFINT_LNKLSTN(i), i);
+
+		reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+		(IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+		(vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+		(i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+		(I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+		wr32(hw, I40E_QINT_RQCTL(i), reg);
+
+		reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+		(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+		(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+		((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+		(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+		if (i == (vsi->num_queues - 1))
+			reg |= (IXL_QUEUE_EOL
+			    << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+		wr32(hw, I40E_QINT_TQCTL(i), reg);
+	}
+}
+
+/*
+ * Configure for MSI single vector operation 
+ */
+static void
+ixl_configure_legacy(struct ixl_pf *pf)
+{
+	struct i40e_hw	*hw = &pf->hw;
+	u32		reg;
+
+
+	wr32(hw, I40E_PFINT_ITR0(0), 0);
+	wr32(hw, I40E_PFINT_ITR0(1), 0);
+
+
+	/* Setup "other" causes */
+	reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+	    | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+	    | I40E_PFINT_ICR0_ENA_GRST_MASK
+	    | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+	    | I40E_PFINT_ICR0_ENA_GPIO_MASK
+	    | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
+	    | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+	    | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+	    | I40E_PFINT_ICR0_ENA_VFLR_MASK
+	    | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+	    ;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+	/* SW_ITR_IDX = 0, but don't change INTENA */
+	wr32(hw, I40E_PFINT_DYN_CTL0,
+	    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+	    I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+	/* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
+	wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+	/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+	wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+	/* Associate the queue pair to the vector and enable the q int */
+	reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+	    | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+	    | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+	wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+	reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+	    | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+	    | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+	wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+	/* Next enable the queue pair */
+	reg = rd32(hw, I40E_QTX_ENA(0));
+	reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+	wr32(hw, I40E_QTX_ENA(0), reg);
+
+	reg = rd32(hw, I40E_QRX_ENA(0));
+	reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+	wr32(hw, I40E_QRX_ENA(0), reg);
+}
+
+
+/*
+ * Set the Initial ITR state
+ */
+static void
+ixl_configure_itr(struct ixl_pf *pf)
+{
+	struct i40e_hw		*hw = &pf->hw;
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+
+	vsi->rx_itr_setting = ixl_rx_itr;
+	if (ixl_dynamic_rx_itr)
+		vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
+	vsi->tx_itr_setting = ixl_tx_itr;
+	if (ixl_dynamic_tx_itr)
+		vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
+	
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		struct tx_ring	*txr = &que->txr;
+		struct rx_ring 	*rxr = &que->rxr;
+
+		wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
+		    vsi->rx_itr_setting);
+		rxr->itr = vsi->rx_itr_setting;
+		rxr->latency = IXL_AVE_LATENCY;
+		wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
+		    vsi->tx_itr_setting);
+		txr->itr = vsi->tx_itr_setting;
+		txr->latency = IXL_AVE_LATENCY;
+	}
+}
+
+
+static int
+ixl_allocate_pci_resources(struct ixl_pf *pf)
+{
+	int             rid;
+	device_t        dev = pf->dev;
+
+	rid = PCIR_BAR(0);
+	pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+	    &rid, RF_ACTIVE);
+
+	if (!(pf->pci_mem)) {
+		device_printf(dev,"Unable to allocate bus resource: memory\n");
+		return (ENXIO);
+	}
+
+	pf->osdep.mem_bus_space_tag =
+		rman_get_bustag(pf->pci_mem);
+	pf->osdep.mem_bus_space_handle =
+		rman_get_bushandle(pf->pci_mem);
+	pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+	pf->osdep.flush_reg = I40E_GLGEN_STAT;
+	pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
+
+	pf->hw.back = &pf->osdep;
+
+	/*
+	** Now setup MSI or MSI/X, should
+	** return us the number of supported
+	** vectors. (Will be 1 for MSI)
+	*/
+	pf->msix = ixl_init_msix(pf);
+	return (0);
+}
+
+static void
+ixl_free_pci_resources(struct ixl_pf * pf)
+{
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	device_t		dev = pf->dev;
+	int			rid, memrid;
+
+	memrid = PCIR_BAR(IXL_BAR);
+
+	/* We may get here before stations are setup */
+	if ((!ixl_enable_msix) || (que == NULL))
+		goto early;
+
+	/*
+	**  Release all msix VSI resources:
+	*/
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		rid = que->msix + 1;
+		if (que->tag != NULL) {
+			bus_teardown_intr(dev, que->res, que->tag);
+			que->tag = NULL;
+		}
+		if (que->res != NULL)
+			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+	}
+
+early:
+	/* Clean the AdminQ interrupt last */
+	if (pf->admvec) /* we are doing MSIX */
+		rid = pf->admvec + 1;
+	else
+		(pf->msix != 0) ? (rid = 1):(rid = 0);
+
+	if (pf->tag != NULL) {
+		bus_teardown_intr(dev, pf->res, pf->tag);
+		pf->tag = NULL;
+	}
+	if (pf->res != NULL)
+		bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
+
+	if (pf->msix)
+		pci_release_msi(dev);
+
+	if (pf->msix_mem != NULL)
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    memrid, pf->msix_mem);
+
+	if (pf->pci_mem != NULL)
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    PCIR_BAR(0), pf->pci_mem);
+
+	return;
+}
+
+static void
+ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
+{
+	/* Display supported media types */
+	if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
+	    phy_type & (1 << I40E_PHY_TYPE_XFI) ||
+	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
+	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
+	    phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
+	    phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+#ifndef IFM_ETH_XTYPE
+	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
+	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
+	    phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
+	    phy_type & (1 << I40E_PHY_TYPE_SFI))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+#else
+	if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
+	    || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+
+	if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+	if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+		ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+#endif
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
+{
+	struct ifnet		*ifp;
+	struct i40e_hw		*hw = vsi->hw;
+	struct ixl_queue	*que = vsi->queues;
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	enum i40e_status_code aq_error = 0;
+
+	INIT_DEBUGOUT("ixl_setup_interface: begin");
+
+	ifp = vsi->ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "can not allocate ifnet structure\n");
+		return (-1);
+	}
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+	ifp->if_mtu = ETHERMTU;
+	if_initbaudrate(ifp, IF_Gbps(40));
+	ifp->if_init = ixl_init;
+	ifp->if_softc = vsi;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_ioctl = ixl_ioctl;
+
+#if __FreeBSD_version >= 1100036
+	if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
+	ifp->if_transmit = ixl_mq_start;
+
+	ifp->if_qflush = ixl_qflush;
+
+	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+	vsi->max_frame_size =
+	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+	    + ETHER_VLAN_ENCAP_LEN;
+
+	/*
+	 * Tell the upper layer(s) we support long frames.
+	 */
+	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+	ifp->if_capabilities |= IFCAP_HWCSUM;
+	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+	ifp->if_capabilities |= IFCAP_TSO;
+	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+	ifp->if_capabilities |= IFCAP_LRO;
+
+	/* VLAN capabilties */
+	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+			     |  IFCAP_VLAN_HWTSO
+			     |  IFCAP_VLAN_MTU
+			     |  IFCAP_VLAN_HWCSUM;
+	ifp->if_capenable = ifp->if_capabilities;
+
+	/*
+	** Don't turn this on by default, if vlans are
+	** created on another pseudo device (eg. lagg)
+	** then vlan events are not passed thru, breaking
+	** operation, but with HW FILTER off it works. If
+	** using vlans directly on the ixl driver you can
+	** enable this and get full hardware tag filtering.
+	*/
+	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+	/*
+	 * Specify the media types supported by this adapter and register
+	 * callbacks to update media and link information
+	 */
+	ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
+		     ixl_media_status);
+
+	aq_error = i40e_aq_get_phy_capabilities(hw,
+	    FALSE, TRUE, &abilities, NULL);
+	/* May need delay to detect fiber correctly */
+	if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+		i40e_msec_delay(200);
+		aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+		    TRUE, &abilities, NULL);
+	}
+	if (aq_error) {
+		if (aq_error == I40E_ERR_UNKNOWN_PHY)
+			device_printf(dev, "Unknown PHY type detected!\n");
+		else
+			device_printf(dev,
+			    "Error getting supported media types, err %d,"
+			    " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+		return (0);
+	}
+
+	ixl_add_ifmedia(vsi, abilities.phy_type);
+
+	/* Use autoselect media by default */
+	ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+	ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
+
+	ether_ifattach(ifp, hw->mac.addr);
+
+	return (0);
+}
+
+/*
+** Run when the Admin Queue gets a
+** link transition interrupt.
+*/
+static void
+ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
+{
+	struct i40e_hw	*hw = &pf->hw; 
+	struct i40e_aqc_get_link_status *status =
+	    (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+	bool check;
+
+	hw->phy.get_link_info = TRUE;
+	i40e_get_link_status(hw, &check);
+	pf->link_up = check;
+#ifdef IXL_DEBUG
+	printf("Link is %s\n", check ? "up":"down");
+#endif
+	/* Report if Unqualified modules are found */
+	if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+	    (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+	    (!(status->link_info & I40E_AQ_LINK_UP)))
+		device_printf(pf->dev, "Link failed because "
+		    "an unqualified module was detected\n");
+
+	return;
+}
+
+/*********************************************************************
+ *
+ *  Get Firmware Switch configuration
+ *	- this will need to be more robust when more complex
+ *	  switch configurations are enabled.
+ *
+ **********************************************************************/
+static int
+ixl_switch_config(struct ixl_pf *pf)
+{
+	struct i40e_hw	*hw = &pf->hw; 
+	struct ixl_vsi	*vsi = &pf->vsi;
+	device_t 	dev = vsi->dev;
+	struct i40e_aqc_get_switch_config_resp *sw_config;
+	u8	aq_buf[I40E_AQ_LARGE_BUF];
+	int	ret;
+	u16	next = 0;
+
+	memset(&aq_buf, 0, sizeof(aq_buf));
+	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+	ret = i40e_aq_get_switch_config(hw, sw_config,
+	    sizeof(aq_buf), &next, NULL);
+	if (ret) {
+		device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
+		    ret);
+		return (ret);
+	}
+#ifdef IXL_DEBUG
+	device_printf(dev,
+	    "Switch config: header reported: %d in structure, %d total\n",
+    	    sw_config->header.num_reported, sw_config->header.num_total);
+	for (int i = 0; i < sw_config->header.num_reported; i++) {
+		device_printf(dev,
+		    "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+		    sw_config->element[i].element_type,
+		    sw_config->element[i].seid,
+		    sw_config->element[i].uplink_seid,
+		    sw_config->element[i].downlink_seid);
+	}
+#endif
+	/* Simplified due to a single VSI at the moment */
+	vsi->uplink_seid = sw_config->element[0].uplink_seid;
+	vsi->downlink_seid = sw_config->element[0].downlink_seid;
+	vsi->seid = sw_config->element[0].seid;
+	return (ret);
+}
+
+/*********************************************************************
+ *
+ *  Initialize the VSI:  this handles contexts, which means things
+ *  			 like the number of descriptors, buffer size,
+ *			 plus we init the rings thru this function.
+ *
+ **********************************************************************/
+static int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+	struct ixl_pf		*pf = vsi->back;
+	struct ixl_queue	*que = vsi->queues;
+	device_t		dev = vsi->dev;
+	struct i40e_hw		*hw = vsi->hw;
+	struct i40e_vsi_context	ctxt;
+	int			err = 0;
+
+	memset(&ctxt, 0, sizeof(ctxt));
+	ctxt.seid = vsi->seid;
+	if (pf->veb_seid != 0)
+		ctxt.uplink_seid = pf->veb_seid;
+	ctxt.pf_num = hw->pf_id;
+	err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+	if (err) {
+		device_printf(dev,"get vsi params failed %x!!\n", err);
+		return (err);
+	}
+#ifdef IXL_DEBUG
+	printf("get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
+	    "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
+	    "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
+	    ctxt.uplink_seid, ctxt.vsi_number,
+	    ctxt.vsis_allocated, ctxt.vsis_unallocated,
+	    ctxt.flags, ctxt.pf_num, ctxt.vf_num,
+	    ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
+#endif
+	/*
+	** Set the queue and traffic class bits
+	**  - when multiple traffic classes are supported
+	**    this will need to be more robust.
+	*/
+	ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+	ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
+	ctxt.info.queue_mapping[0] = 0; 
+	ctxt.info.tc_mapping[0] = 0x0800; 
+
+	/* Set VLAN receive stripping mode */
+	ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+	ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
+	if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+	else
+	    ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+	/* Keep copy of VSI info in VSI for statistic counters */
+	memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
+
+	/* Reset VSI statistics */
+	ixl_vsi_reset_stats(vsi);
+	vsi->hw_filters_add = 0;
+	vsi->hw_filters_del = 0;
+
+	ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
+
+	err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+	if (err) {
+		device_printf(dev,"update vsi params failed %x!!\n",
+		   hw->aq.asq_last_status);
+		return (err);
+	}
+
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		struct tx_ring		*txr = &que->txr;
+		struct rx_ring 		*rxr = &que->rxr;
+		struct i40e_hmc_obj_txq tctx;
+		struct i40e_hmc_obj_rxq rctx;
+		u32			txctl;
+		u16			size;
+
+
+		/* Setup the HMC TX Context  */
+		size = que->num_desc * sizeof(struct i40e_tx_desc);
+		memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+		tctx.new_context = 1;
+		tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
+		tctx.qlen = que->num_desc;
+		tctx.fc_ena = 0;
+		tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
+		/* Enable HEAD writeback */
+		tctx.head_wb_ena = 1;
+		tctx.head_wb_addr = txr->dma.pa +
+		    (que->num_desc * sizeof(struct i40e_tx_desc));
+		tctx.rdylist_act = 0;
+		err = i40e_clear_lan_tx_queue_context(hw, i);
+		if (err) {
+			device_printf(dev, "Unable to clear TX context\n");
+			break;
+		}
+		err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
+		if (err) {
+			device_printf(dev, "Unable to set TX context\n");
+			break;
+		}
+		/* Associate the ring with this PF */
+		txctl = I40E_QTX_CTL_PF_QUEUE;
+		txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+		    I40E_QTX_CTL_PF_INDX_MASK);
+		wr32(hw, I40E_QTX_CTL(i), txctl);
+		ixl_flush(hw);
+
+		/* Do ring (re)init */
+		ixl_init_tx_ring(que);
+
+		/* Next setup the HMC RX Context  */
+		if (vsi->max_frame_size <= MCLBYTES)
+			rxr->mbuf_sz = MCLBYTES;
+		else
+			rxr->mbuf_sz = MJUMPAGESIZE;
+
+		u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
+
+		/* Set up an RX context for the HMC */
+		memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+		rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
+		/* ignore header split for now */
+		rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+		rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
+		    vsi->max_frame_size : max_rxmax;
+		rctx.dtype = 0;
+		rctx.dsize = 1;	/* do 32byte descriptors */
+		rctx.hsplit_0 = 0;  /* no HDR split initially */
+		rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
+		rctx.qlen = que->num_desc;
+		rctx.tphrdesc_ena = 1;
+		rctx.tphwdesc_ena = 1;
+		rctx.tphdata_ena = 0;
+		rctx.tphhead_ena = 0;
+		rctx.lrxqthresh = 2;
+		rctx.crcstrip = 1;
+		rctx.l2tsel = 1;
+		rctx.showiv = 1;
+		rctx.fc_ena = 0;
+		rctx.prefena = 1;
+
+		err = i40e_clear_lan_rx_queue_context(hw, i);
+		if (err) {
+			device_printf(dev,
+			    "Unable to clear RX context %d\n", i);
+			break;
+		}
+		err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
+		if (err) {
+			device_printf(dev, "Unable to set RX context %d\n", i);
+			break;
+		}
+		err = ixl_init_rx_ring(que);
+		if (err) {
+			device_printf(dev, "Fail in init_rx_ring %d\n", i);
+			break;
+		}
+		wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
+#ifdef DEV_NETMAP
+		/* preserve queue */
+		if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
+			struct netmap_adapter *na = NA(vsi->ifp);
+			struct netmap_kring *kring = &na->rx_rings[i];
+			int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
+			wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
+		} else
+#endif /* DEV_NETMAP */
+		wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+	}
+	return (err);
+}
+
+
+/*********************************************************************
+ *
+ *  Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+	struct ixl_pf		*pf = (struct ixl_pf *)vsi->back;
+	struct ixl_queue	*que = vsi->queues;
+
+	/* Free station queues */
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		struct tx_ring *txr = &que->txr;
+		struct rx_ring *rxr = &que->rxr;
+	
+		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+			continue;
+		IXL_TX_LOCK(txr);
+		ixl_free_que_tx(que);
+		if (txr->base)
+			i40e_free_dma_mem(&pf->hw, &txr->dma);
+		IXL_TX_UNLOCK(txr);
+		IXL_TX_LOCK_DESTROY(txr);
+
+		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+			continue;
+		IXL_RX_LOCK(rxr);
+		ixl_free_que_rx(que);
+		if (rxr->base)
+			i40e_free_dma_mem(&pf->hw, &rxr->dma);
+		IXL_RX_UNLOCK(rxr);
+		IXL_RX_LOCK_DESTROY(rxr);
+		
+	}
+	free(vsi->queues, M_DEVBUF);
+
+	/* Free VSI filter list */
+	ixl_free_mac_filters(vsi);
+}
+
+static void
+ixl_free_mac_filters(struct ixl_vsi *vsi)
+{
+	struct ixl_mac_filter *f;
+
+	while (!SLIST_EMPTY(&vsi->ftl)) {
+		f = SLIST_FIRST(&vsi->ftl);
+		SLIST_REMOVE_HEAD(&vsi->ftl, next);
+		free(f, M_DEVBUF);
+	}
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the VSI (virtual station interface) and their
+ *  associated queues, rings and the descriptors associated with each,
+ *  called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixl_setup_stations(struct ixl_pf *pf)
+{
+	device_t		dev = pf->dev;
+	struct ixl_vsi		*vsi;
+	struct ixl_queue	*que;
+	struct tx_ring		*txr;
+	struct rx_ring		*rxr;
+	int 			rsize, tsize;
+	int			error = I40E_SUCCESS;
+
+	vsi = &pf->vsi;
+	vsi->back = (void *)pf;
+	vsi->hw = &pf->hw;
+	vsi->id = 0;
+	vsi->num_vlans = 0;
+	vsi->back = pf;
+
+	/* Get memory for the station queues */
+        if (!(vsi->queues =
+            (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+            vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+                device_printf(dev, "Unable to allocate queue memory\n");
+                error = ENOMEM;
+                goto early;
+        }
+
+	for (int i = 0; i < vsi->num_queues; i++) {
+		que = &vsi->queues[i];
+		que->num_desc = ixl_ringsz;
+		que->me = i;
+		que->vsi = vsi;
+		/* mark the queue as active */
+		vsi->active_queues |= (u64)1 << que->me;
+		txr = &que->txr;
+		txr->que = que;
+		txr->tail = I40E_QTX_TAIL(que->me);
+
+		/* Initialize the TX lock */
+		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+		    device_get_nameunit(dev), que->me);
+		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+		/* Create the TX descriptor ring */
+		tsize = roundup2((que->num_desc *
+		    sizeof(struct i40e_tx_desc)) +
+		    sizeof(u32), DBA_ALIGN);
+		if (i40e_allocate_dma_mem(&pf->hw,
+		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+			device_printf(dev,
+			    "Unable to allocate TX Descriptor memory\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		txr->base = (struct i40e_tx_desc *)txr->dma.va;
+		bzero((void *)txr->base, tsize);
+       		/* Now allocate transmit soft structs for the ring */
+       		if (ixl_allocate_tx_data(que)) {
+			device_printf(dev,
+			    "Critical Failure setting up TX structures\n");
+			error = ENOMEM;
+			goto fail;
+       		}
+		/* Allocate a buf ring */
+		txr->br = buf_ring_alloc(4096, M_DEVBUF,
+		    M_WAITOK, &txr->mtx);
+		if (txr->br == NULL) {
+			device_printf(dev,
+			    "Critical Failure setting up TX buf ring\n");
+			error = ENOMEM;
+			goto fail;
+       		}
+
+		/*
+		 * Next the RX queues...
+		 */ 
+		rsize = roundup2(que->num_desc *
+		    sizeof(union i40e_rx_desc), DBA_ALIGN);
+		rxr = &que->rxr;
+		rxr->que = que;
+		rxr->tail = I40E_QRX_TAIL(que->me);
+
+		/* Initialize the RX side lock */
+		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+		    device_get_nameunit(dev), que->me);
+		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+		if (i40e_allocate_dma_mem(&pf->hw,
+		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
+			device_printf(dev,
+			    "Unable to allocate RX Descriptor memory\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+		bzero((void *)rxr->base, rsize);
+
+        	/* Allocate receive soft structs for the ring*/
+		if (ixl_allocate_rx_data(que)) {
+			device_printf(dev,
+			    "Critical Failure setting up receive structs\n");
+			error = ENOMEM;
+			goto fail;
+		}
+	}
+
+	return (0);
+
+fail:
+	for (int i = 0; i < vsi->num_queues; i++) {
+		que = &vsi->queues[i];
+		rxr = &que->rxr;
+		txr = &que->txr;
+		if (rxr->base)
+			i40e_free_dma_mem(&pf->hw, &rxr->dma);
+		if (txr->base)
+			i40e_free_dma_mem(&pf->hw, &txr->dma);
+	}
+
+early:
+	return (error);
+}
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+static void
+ixl_set_queue_rx_itr(struct ixl_queue *que)
+{
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct rx_ring	*rxr = &que->rxr;
+	u16		rx_itr;
+	u16		rx_latency = 0;
+	int		rx_bytes;
+
+
+	/* Idle, do nothing */
+	if (rxr->bytes == 0)
+		return;
+
+	if (ixl_dynamic_rx_itr) {
+		rx_bytes = rxr->bytes/rxr->itr;
+		rx_itr = rxr->itr;
+
+		/* Adjust latency range */
+		switch (rxr->latency) {
+		case IXL_LOW_LATENCY:
+			if (rx_bytes > 10) {
+				rx_latency = IXL_AVE_LATENCY;
+				rx_itr = IXL_ITR_20K;
+			}
+			break;
+		case IXL_AVE_LATENCY:
+			if (rx_bytes > 20) {
+				rx_latency = IXL_BULK_LATENCY;
+				rx_itr = IXL_ITR_8K;
+			} else if (rx_bytes <= 10) {
+				rx_latency = IXL_LOW_LATENCY;
+				rx_itr = IXL_ITR_100K;
+			}
+			break;
+		case IXL_BULK_LATENCY:
+			if (rx_bytes <= 20) {
+				rx_latency = IXL_AVE_LATENCY;
+				rx_itr = IXL_ITR_20K;
+			}
+			break;
+       		 }
+
+		rxr->latency = rx_latency;
+
+		if (rx_itr != rxr->itr) {
+			/* do an exponential smoothing */
+			rx_itr = (10 * rx_itr * rxr->itr) /
+			    ((9 * rx_itr) + rxr->itr);
+			rxr->itr = rx_itr & IXL_MAX_ITR;
+			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+			    que->me), rxr->itr);
+		}
+	} else { /* We may have have toggled to non-dynamic */
+		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+			vsi->rx_itr_setting = ixl_rx_itr;
+		/* Update the hardware if needed */
+		if (rxr->itr != vsi->rx_itr_setting) {
+			rxr->itr = vsi->rx_itr_setting;
+			wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+			    que->me), rxr->itr);
+		}
+	}
+	rxr->bytes = 0;
+	rxr->packets = 0;
+	return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+static void
+ixl_set_queue_tx_itr(struct ixl_queue *que)
+{
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct tx_ring	*txr = &que->txr;
+	u16		tx_itr;
+	u16		tx_latency = 0;
+	int		tx_bytes;
+
+
+	/* Idle, do nothing */
+	if (txr->bytes == 0)
+		return;
+
+	if (ixl_dynamic_tx_itr) {
+		tx_bytes = txr->bytes/txr->itr;
+		tx_itr = txr->itr;
+
+		switch (txr->latency) {
+		case IXL_LOW_LATENCY:
+			if (tx_bytes > 10) {
+				tx_latency = IXL_AVE_LATENCY;
+				tx_itr = IXL_ITR_20K;
+			}
+			break;
+		case IXL_AVE_LATENCY:
+			if (tx_bytes > 20) {
+				tx_latency = IXL_BULK_LATENCY;
+				tx_itr = IXL_ITR_8K;
+			} else if (tx_bytes <= 10) {
+				tx_latency = IXL_LOW_LATENCY;
+				tx_itr = IXL_ITR_100K;
+			}
+			break;
+		case IXL_BULK_LATENCY:
+			if (tx_bytes <= 20) {
+				tx_latency = IXL_AVE_LATENCY;
+				tx_itr = IXL_ITR_20K;
+			}
+			break;
+		}
+
+		txr->latency = tx_latency;
+
+		if (tx_itr != txr->itr) {
+       	         /* do an exponential smoothing */
+			tx_itr = (10 * tx_itr * txr->itr) /
+			    ((9 * tx_itr) + txr->itr);
+			txr->itr = tx_itr & IXL_MAX_ITR;
+			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+			    que->me), txr->itr);
+		}
+
+	} else { /* We may have have toggled to non-dynamic */
+		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+			vsi->tx_itr_setting = ixl_tx_itr;
+		/* Update the hardware if needed */
+		if (txr->itr != vsi->tx_itr_setting) {
+			txr->itr = vsi->tx_itr_setting;
+			wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+			    que->me), txr->itr);
+		}
+	}
+	txr->bytes = 0;
+	txr->packets = 0;
+	return;
+}
+
+#define QUEUE_NAME_LEN 32
+
+static void
+ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
+    struct sysctl_ctx_list *ctx, const char *sysctl_name)
+{
+	struct sysctl_oid *tree;
+	struct sysctl_oid_list *child;
+	struct sysctl_oid_list *vsi_list;
+
+	tree = device_get_sysctl_tree(pf->dev);
+	child = SYSCTL_CHILDREN(tree);
+	vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
+				   CTLFLAG_RD, NULL, "VSI Number");
+	vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+	ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
+}
+
+static void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
+	device_t dev = pf->dev;
+	struct ixl_vsi *vsi = &pf->vsi;
+	struct ixl_queue *queues = vsi->queues;
+	struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+	struct sysctl_oid_list *vsi_list;
+
+	struct sysctl_oid *queue_node;
+	struct sysctl_oid_list *queue_list;
+
+	struct tx_ring *txr;
+	struct rx_ring *rxr;
+	char queue_namebuf[QUEUE_NAME_LEN];
+
+	/* Driver statistics */
+	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+			CTLFLAG_RD, &pf->watchdog_events,
+			"Watchdog timeouts");
+	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+			CTLFLAG_RD, &pf->admin_irq,
+			"Admin Queue IRQ Handled");
+
+	ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
+	vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
+
+	/* Queue statistics */
+	for (int q = 0; q < vsi->num_queues; q++) {
+		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+		    OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
+		queue_list = SYSCTL_CHILDREN(queue_node);
+
+		txr = &(queues[q].txr);
+		rxr = &(queues[q].rxr);
+
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+				"m_defrag() failed");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
+				CTLFLAG_RD, &(queues[q].dropped_pkts),
+				"Driver dropped packets");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+				CTLFLAG_RD, &(queues[q].irqs),
+				"irqs on this queue");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+				CTLFLAG_RD, &(queues[q].tso),
+				"TSO");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
+				CTLFLAG_RD, &(queues[q].tx_dma_setup),
+				"Driver tx dma failure in xmit");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+				CTLFLAG_RD, &(txr->no_desc),
+				"Queue No Descriptor Available");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+				CTLFLAG_RD, &(txr->total_packets),
+				"Queue Packets Transmitted");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+				CTLFLAG_RD, &(txr->tx_bytes),
+				"Queue Bytes Transmitted");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+				CTLFLAG_RD, &(rxr->rx_packets),
+				"Queue Packets Received");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+				CTLFLAG_RD, &(rxr->rx_bytes),
+				"Queue Bytes Received");
+	}
+
+	/* MAC stats */
+	ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+static void
+ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+	struct sysctl_oid_list *child,
+	struct i40e_eth_stats *eth_stats)
+{
+	struct ixl_sysctl_info ctls[] =
+	{
+		{&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+		{&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+			"Unicast Packets Received"},
+		{&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+			"Multicast Packets Received"},
+		{&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+			"Broadcast Packets Received"},
+		{&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+		{&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+		{&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+		{&eth_stats->tx_multicast, "mcast_pkts_txd",
+			"Multicast Packets Transmitted"},
+		{&eth_stats->tx_broadcast, "bcast_pkts_txd",
+			"Broadcast Packets Transmitted"},
+		// end
+		{0,0,0}
+	};
+
+	struct ixl_sysctl_info *entry = ctls;
+	while (entry->stat != 0)
+	{
+		SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+				CTLFLAG_RD, entry->stat,
+				entry->description);
+		entry++;
+	}
+}
+
+static void
+ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+	struct sysctl_oid_list *child,
+	struct i40e_hw_port_stats *stats)
+{
+	struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+				    CTLFLAG_RD, NULL, "Mac Statistics");
+	struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
+
+	struct i40e_eth_stats *eth_stats = &stats->eth;
+	ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
+
+	struct ixl_sysctl_info ctls[] = 
+	{
+		{&stats->crc_errors, "crc_errors", "CRC Errors"},
+		{&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
+		{&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
+		{&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
+		{&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
+		/* Packet Reception Stats */
+		{&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
+		{&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
+		{&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
+		{&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
+		{&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
+		{&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
+		{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
+		{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
+		{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
+		{&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
+		{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
+		{&stats->checksum_error, "checksum_errors", "Checksum Errors"},
+		/* Packet Transmission Stats */
+		{&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
+		{&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
+		{&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
+		{&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
+		{&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
+		{&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
+		{&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
+		/* Flow control */
+		{&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
+		{&stats->link_xon_rx, "xon_recvd", "Link XON received"},
+		{&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
+		{&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
+		/* End */
+		{0,0,0}
+	};
+
+	struct ixl_sysctl_info *entry = ctls;
+	while (entry->stat != 0)
+	{
+		SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
+				CTLFLAG_RD, entry->stat,
+				entry->description);
+		entry++;
+	}
+}
+
+
+/*
+** ixl_config_rss - setup RSS 
+**  - note this is done for the single vsi
+*/
+static void ixl_config_rss(struct ixl_vsi *vsi)
+{
+	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
+	struct i40e_hw	*hw = vsi->hw;
+	u32		lut = 0;
+	u64		set_hena = 0, hena;
+	int		i, j, que_id;
+#ifdef RSS
+	u32		rss_hash_config;
+	u32		rss_seed[IXL_KEYSZ];
+#else
+	u32             rss_seed[IXL_KEYSZ] = {0x41b01687,
+			    0x183cfd8c, 0xce880440, 0x580cbc3c,
+			    0x35897377, 0x328b25e1, 0x4fa98922,
+			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
+#endif
+
+#ifdef RSS
+        /* Fetch the configured RSS key */
+        rss_getkey((uint8_t *) &rss_seed);
+#endif
+
+	/* Fill out hash function seed */
+	for (i = 0; i < IXL_KEYSZ; i++)
+                wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
+
+	/* Enable PCTYPES for RSS: */
+#ifdef RSS
+	rss_hash_config = rss_gethashconfig();
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
+	set_hena =
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
+		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+#endif
+	hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
+	    ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+	hena |= set_hena;
+	wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+		if (j == vsi->num_queues)
+			j = 0;
+#ifdef RSS
+		/*
+		 * Fetch the RSS bucket id for the given indirection entry.
+		 * Cap it at the number of configured buckets (which is
+		 * num_queues.)
+		 */
+		que_id = rss_get_indirection_to_bucket(i);
+		que_id = que_id % vsi->num_queues;
+#else
+		que_id = j;
+#endif
+		/* lut = 4-byte sliding window of 4 lut entries */
+		lut = (lut << 8) | (que_id &
+		    ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+		/* On i = 3, we have 4 entries in lut; write to the register */
+		if ((i & 3) == 3)
+			wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
+	}
+	ixl_flush(hw);
+}
+
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+	struct ixl_vsi	*vsi = ifp->if_softc;
+	struct i40e_hw	*hw = vsi->hw;
+	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
+
+	if (ifp->if_softc !=  arg)   /* Not our event */
+		return;
+
+	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
+		return;
+
+	IXL_PF_LOCK(pf);
+	++vsi->num_vlans;
+	ixl_add_filter(vsi, hw->mac.addr, vtag);
+	IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+	struct ixl_vsi	*vsi = ifp->if_softc;
+	struct i40e_hw	*hw = vsi->hw;
+	struct ixl_pf	*pf = (struct ixl_pf *)vsi->back;
+
+	if (ifp->if_softc !=  arg)
+		return;
+
+	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
+		return;
+
+	IXL_PF_LOCK(pf);
+	--vsi->num_vlans;
+	ixl_del_filter(vsi, hw->mac.addr, vtag);
+	IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine updates vlan filters, called by init
+** it scans the filter table and then updates the hw
+** after a soft reset.
+*/
+static void
+ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+{
+	struct ixl_mac_filter	*f;
+	int			cnt = 0, flags;
+
+	if (vsi->num_vlans == 0)
+		return;
+	/*
+	** Scan the filter list for vlan entries,
+	** mark them for addition and then call
+	** for the AQ update.
+	*/
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		if (f->flags & IXL_FILTER_VLAN) {
+			f->flags |=
+			    (IXL_FILTER_ADD |
+			    IXL_FILTER_USED);
+			cnt++;
+		}
+	}
+	if (cnt == 0) {
+		printf("setup vlan: no filters found!\n");
+		return;
+	}
+	flags = IXL_FILTER_VLAN;
+	flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+	ixl_add_hw_filters(vsi, flags, cnt);
+	return;
+}
+
+/*
+** Initialize filter list and add filters that the hardware
+** needs to know about.
+*/
+static void
+ixl_init_filters(struct ixl_vsi *vsi)
+{
+	/* Add broadcast address */
+	ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+}
+
+/*
+** This routine adds mulicast filters
+*/
+static void
+ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
+{
+	struct ixl_mac_filter *f;
+
+	/* Does one already exist */
+	f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+	if (f != NULL)
+		return;
+
+	f = ixl_get_filter(vsi);
+	if (f == NULL) {
+		printf("WARNING: no filter available!!\n");
+		return;
+	}
+	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+	f->vlan = IXL_VLAN_ANY;
+	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
+	    | IXL_FILTER_MC);
+
+	return;
+}
+
+static void
+ixl_reconfigure_filters(struct ixl_vsi *vsi)
+{
+
+	ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
+}
+
+/*
+** This routine adds macvlan filters
+*/
+static void
+ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+	struct ixl_mac_filter	*f, *tmp;
+	struct ixl_pf		*pf;
+	device_t		dev;
+
+	DEBUGOUT("ixl_add_filter: begin");
+
+	pf = vsi->back;
+	dev = pf->dev;
+
+	/* Does one already exist */
+	f = ixl_find_filter(vsi, macaddr, vlan);
+	if (f != NULL)
+		return;
+	/*
+	** Is this the first vlan being registered, if so we
+	** need to remove the ANY filter that indicates we are
+	** not in a vlan, and replace that with a 0 filter.
+	*/
+	if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
+		tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+		if (tmp != NULL) {
+			ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
+			ixl_add_filter(vsi, macaddr, 0);
+		}
+	}
+
+	f = ixl_get_filter(vsi);
+	if (f == NULL) {
+		device_printf(dev, "WARNING: no filter available!!\n");
+		return;
+	}
+	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+	f->vlan = vlan;
+	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+	if (f->vlan != IXL_VLAN_ANY)
+		f->flags |= IXL_FILTER_VLAN;
+	else
+		vsi->num_macs++;
+
+	ixl_add_hw_filters(vsi, f->flags, 1);
+	return;
+}
+
+static void
+ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+	struct ixl_mac_filter *f;
+
+	f = ixl_find_filter(vsi, macaddr, vlan);
+	if (f == NULL)
+		return;
+
+	f->flags |= IXL_FILTER_DEL;
+	ixl_del_hw_filters(vsi, 1);
+	vsi->num_macs--;
+
+	/* Check if this is the last vlan removal */
+	if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
+		/* Switch back to a non-vlan filter */
+		ixl_del_filter(vsi, macaddr, 0);
+		ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
+	}
+	return;
+}
+
+/*
+** Find the filter with both matching mac addr and vlan id
+*/
+static struct ixl_mac_filter *
+ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+	struct ixl_mac_filter	*f;
+	bool			match = FALSE;
+
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		if (!cmp_etheraddr(f->macaddr, macaddr))
+			continue;
+		if (f->vlan == vlan) {
+			match = TRUE;
+			break;
+		}
+	}	
+
+	if (!match)
+		f = NULL;
+	return (f);
+}
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+static void
+ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
+{
+	struct i40e_aqc_add_macvlan_element_data *a, *b;
+	struct ixl_mac_filter	*f;
+	struct ixl_pf		*pf;
+	struct i40e_hw		*hw;
+	device_t		dev;
+	int			err, j = 0;
+
+	pf = vsi->back;
+	dev = pf->dev;
+	hw = &pf->hw;
+	IXL_PF_LOCK_ASSERT(pf);
+
+	a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (a == NULL) {
+		device_printf(dev, "add_hw_filters failed to get memory\n");
+		return;
+	}
+
+	/*
+	** Scan the filter list, each time we find one
+	** we add it to the admin queue array and turn off
+	** the add bit.
+	*/
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		if (f->flags == flags) {
+			b = &a[j]; // a pox on fvl long names :)
+			bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
+			if (f->vlan == IXL_VLAN_ANY) {
+				b->vlan_tag = 0;
+				b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+			} else {
+				b->vlan_tag = f->vlan;
+				b->flags = 0;
+			}
+			b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+			f->flags &= ~IXL_FILTER_ADD;
+			j++;
+		}
+		if (j == cnt)
+			break;
+	}
+	if (j > 0) {
+		err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
+		if (err) 
+			device_printf(dev, "aq_add_macvlan err %d, "
+			    "aq_error %d\n", err, hw->aq.asq_last_status);
+		else
+			vsi->hw_filters_add += j;
+	}
+	free(a, M_DEVBUF);
+	return;
+}
+
+/*
+** This routine takes removals in the vsi filter
+** table and creates an Admin Queue call to delete
+** the filters in the hardware.
+*/
+static void
+ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
+{
+	struct i40e_aqc_remove_macvlan_element_data *d, *e;
+	struct ixl_pf		*pf;
+	struct i40e_hw		*hw;
+	device_t		dev;
+	struct ixl_mac_filter	*f, *f_temp;
+	int			err, j = 0;
+
+	DEBUGOUT("ixl_del_hw_filters: begin\n");
+
+	pf = vsi->back;
+	hw = &pf->hw;
+	dev = pf->dev;
+
+	d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (d == NULL) {
+		printf("del hw filter failed to get memory\n");
+		return;
+	}
+
+	SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
+		if (f->flags & IXL_FILTER_DEL) {
+			e = &d[j]; // a pox on fvl long names :)
+			bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
+			e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
+			e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+			/* delete entry from vsi list */
+			SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
+			free(f, M_DEVBUF);
+			j++;
+		}
+		if (j == cnt)
+			break;
+	}
+	if (j > 0) {
+		err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
+		/* NOTE: returns ENOENT every time but seems to work fine,
+		   so we'll ignore that specific error. */
+		// TODO: Does this still occur on current firmwares?
+		if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
+			int sc = 0;
+			for (int i = 0; i < j; i++)
+				sc += (!d[i].error_code);
+			vsi->hw_filters_del += sc;
+			device_printf(dev,
+			    "Failed to remove %d/%d filters, aq error %d\n",
+			    j - sc, j, hw->aq.asq_last_status);
+		} else
+			vsi->hw_filters_del += j;
+	}
+	free(d, M_DEVBUF);
+
+	DEBUGOUT("ixl_del_hw_filters: end\n");
+	return;
+}
+
+static int
+ixl_enable_rings(struct ixl_vsi *vsi)
+{
+	struct ixl_pf	*pf = vsi->back;
+	struct i40e_hw	*hw = &pf->hw;
+	int		index, error;
+	u32		reg;
+
+	error = 0;
+	for (int i = 0; i < vsi->num_queues; i++) {
+		index = vsi->first_queue + i;
+		i40e_pre_tx_queue_cfg(hw, index, TRUE);
+
+		reg = rd32(hw, I40E_QTX_ENA(index));
+		reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+		    I40E_QTX_ENA_QENA_STAT_MASK;
+		wr32(hw, I40E_QTX_ENA(index), reg);
+		/* Verify the enable took */
+		for (int j = 0; j < 10; j++) {
+			reg = rd32(hw, I40E_QTX_ENA(index));
+			if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+				break;
+			i40e_msec_delay(10);
+		}
+		if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
+			device_printf(pf->dev, "TX queue %d disabled!\n",
+			    index);
+			error = ETIMEDOUT;
+		}
+
+		reg = rd32(hw, I40E_QRX_ENA(index));
+		reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+		    I40E_QRX_ENA_QENA_STAT_MASK;
+		wr32(hw, I40E_QRX_ENA(index), reg);
+		/* Verify the enable took */
+		for (int j = 0; j < 10; j++) {
+			reg = rd32(hw, I40E_QRX_ENA(index));
+			if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+				break;
+			i40e_msec_delay(10);
+		}
+		if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
+			device_printf(pf->dev, "RX queue %d disabled!\n",
+			    index);
+			error = ETIMEDOUT;
+		}
+	}
+
+	return (error);
+}
+
+static int
+ixl_disable_rings(struct ixl_vsi *vsi)
+{
+	struct ixl_pf	*pf = vsi->back;
+	struct i40e_hw	*hw = &pf->hw;
+	int		index, error;
+	u32		reg;
+
+	error = 0;
+	for (int i = 0; i < vsi->num_queues; i++) {
+		index = vsi->first_queue + i;
+
+		i40e_pre_tx_queue_cfg(hw, index, FALSE);
+		i40e_usec_delay(500);
+
+		reg = rd32(hw, I40E_QTX_ENA(index));
+		reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+		wr32(hw, I40E_QTX_ENA(index), reg);
+		/* Verify the disable took */
+		for (int j = 0; j < 10; j++) {
+			reg = rd32(hw, I40E_QTX_ENA(index));
+			if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+				break;
+			i40e_msec_delay(10);
+		}
+		if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
+			device_printf(pf->dev, "TX queue %d still enabled!\n",
+			    index);
+			error = ETIMEDOUT;
+		}
+
+		reg = rd32(hw, I40E_QRX_ENA(index));
+		reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+		wr32(hw, I40E_QRX_ENA(index), reg);
+		/* Verify the disable took */
+		for (int j = 0; j < 10; j++) {
+			reg = rd32(hw, I40E_QRX_ENA(index));
+			if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+				break;
+			i40e_msec_delay(10);
+		}
+		if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
+			device_printf(pf->dev, "RX queue %d still enabled!\n",
+			    index);
+			error = ETIMEDOUT;
+		}
+	}
+
+	return (error);
+}
+
+/**
+ * ixl_handle_mdd_event
+ *
+ * Called from interrupt handler to identify possibly malicious vfs
+ * (But also detects events from the PF, as well)
+ **/
+static void ixl_handle_mdd_event(struct ixl_pf *pf)
+{
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	bool mdd_detected = false;
+	bool pf_mdd_detected = false;
+	u32 reg;
+
+	/* find what triggered the MDD event */
+	reg = rd32(hw, I40E_GL_MDET_TX);
+	if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+		u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+				I40E_GL_MDET_TX_PF_NUM_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+				I40E_GL_MDET_TX_EVENT_SHIFT;
+		u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+				I40E_GL_MDET_TX_QUEUE_SHIFT;
+		device_printf(dev,
+			 "Malicious Driver Detection event 0x%02x"
+			 " on TX queue %d pf number 0x%02x\n",
+			 event, queue, pf_num);
+		wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+		mdd_detected = true;
+	}
+	reg = rd32(hw, I40E_GL_MDET_RX);
+	if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+		u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+				I40E_GL_MDET_RX_FUNCTION_SHIFT;
+		u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+				I40E_GL_MDET_RX_EVENT_SHIFT;
+		u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+				I40E_GL_MDET_RX_QUEUE_SHIFT;
+		device_printf(dev,
+			 "Malicious Driver Detection event 0x%02x"
+			 " on RX queue %d of function 0x%02x\n",
+			 event, queue, func);
+		wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+		mdd_detected = true;
+	}
+
+	if (mdd_detected) {
+		reg = rd32(hw, I40E_PF_MDET_TX);
+		if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+			wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+			device_printf(dev,
+				 "MDD TX event is for this function 0x%08x",
+				 reg);
+			pf_mdd_detected = true;
+		}
+		reg = rd32(hw, I40E_PF_MDET_RX);
+		if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+			wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+			device_printf(dev,
+				 "MDD RX event is for this function 0x%08x",
+				 reg);
+			pf_mdd_detected = true;
+		}
+	}
+
+	/* re-enable mdd interrupt cause */
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	ixl_flush(hw);
+}
+
+static void
+ixl_enable_intr(struct ixl_vsi *vsi)
+{
+	struct i40e_hw		*hw = vsi->hw;
+	struct ixl_queue	*que = vsi->queues;
+
+	if (ixl_enable_msix) {
+		ixl_enable_adminq(hw);
+		for (int i = 0; i < vsi->num_queues; i++, que++)
+			ixl_enable_queue(hw, que->me);
+	} else
+		ixl_enable_legacy(hw);
+}
+
+static void
+ixl_disable_rings_intr(struct ixl_vsi *vsi)
+{
+	struct i40e_hw		*hw = vsi->hw;
+	struct ixl_queue	*que = vsi->queues;
+
+	for (int i = 0; i < vsi->num_queues; i++, que++)
+		ixl_disable_queue(hw, que->me);
+}
+
+static void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+	struct i40e_hw		*hw = vsi->hw;
+
+	if (ixl_enable_msix)
+		ixl_disable_adminq(hw);
+	else
+		ixl_disable_legacy(hw);
+}
+
+static void
+ixl_enable_adminq(struct i40e_hw *hw)
+{
+	u32		reg;
+
+	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+	ixl_flush(hw);
+	return;
+}
+
+static void
+ixl_disable_adminq(struct i40e_hw *hw)
+{
+	u32		reg;
+
+	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+	return;
+}
+
+static void
+ixl_enable_queue(struct i40e_hw *hw, int id)
+{
+	u32		reg;
+
+	reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+	    I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+static void
+ixl_disable_queue(struct i40e_hw *hw, int id)
+{
+	u32		reg;
+
+	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+	wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+
+	return;
+}
+
+static void
+ixl_enable_legacy(struct i40e_hw *hw)
+{
+	u32		reg;
+	reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+	    I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+	    (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+static void
+ixl_disable_legacy(struct i40e_hw *hw)
+{
+	u32		reg;
+
+	reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+	wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+	return;
+}
+
+static void
+ixl_update_stats_counters(struct ixl_pf *pf)
+{
+	struct i40e_hw	*hw = &pf->hw;
+	struct ixl_vsi	*vsi = &pf->vsi;
+	struct ixl_vf	*vf;
+
+	struct i40e_hw_port_stats *nsd = &pf->stats;
+	struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+	/* Update hw stats */
+	ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->crc_errors, &nsd->crc_errors);
+	ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->illegal_bytes, &nsd->illegal_bytes);
+	ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+			   I40E_GLPRT_GORCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+	ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+			   I40E_GLPRT_GOTCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+	ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.rx_discards,
+			   &nsd->eth.rx_discards);
+	ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+			   I40E_GLPRT_UPRCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.rx_unicast,
+			   &nsd->eth.rx_unicast);
+	ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+			   I40E_GLPRT_UPTCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.tx_unicast,
+			   &nsd->eth.tx_unicast);
+	ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+			   I40E_GLPRT_MPRCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.rx_multicast,
+			   &nsd->eth.rx_multicast);
+	ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+			   I40E_GLPRT_MPTCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.tx_multicast,
+			   &nsd->eth.tx_multicast);
+	ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+			   I40E_GLPRT_BPRCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.rx_broadcast,
+			   &nsd->eth.rx_broadcast);
+	ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+			   I40E_GLPRT_BPTCL(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->eth.tx_broadcast,
+			   &nsd->eth.tx_broadcast);
+
+	ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_dropped_link_down,
+			   &nsd->tx_dropped_link_down);
+	ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->mac_local_faults,
+			   &nsd->mac_local_faults);
+	ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->mac_remote_faults,
+			   &nsd->mac_remote_faults);
+	ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_length_errors,
+			   &nsd->rx_length_errors);
+
+	/* Flow control (LFC) stats */
+	ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->link_xon_rx, &nsd->link_xon_rx);
+	ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->link_xon_tx, &nsd->link_xon_tx);
+	ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->link_xoff_rx, &nsd->link_xoff_rx);
+	ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+	/* Packet size stats rx */
+	ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+			   I40E_GLPRT_PRC64L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_64, &nsd->rx_size_64);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+			   I40E_GLPRT_PRC127L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_127, &nsd->rx_size_127);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+			   I40E_GLPRT_PRC255L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_255, &nsd->rx_size_255);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+			   I40E_GLPRT_PRC511L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_511, &nsd->rx_size_511);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+			   I40E_GLPRT_PRC1023L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_1023, &nsd->rx_size_1023);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+			   I40E_GLPRT_PRC1522L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_1522, &nsd->rx_size_1522);
+	ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+			   I40E_GLPRT_PRC9522L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_size_big, &nsd->rx_size_big);
+
+	/* Packet size stats tx */
+	ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+			   I40E_GLPRT_PTC64L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_64, &nsd->tx_size_64);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+			   I40E_GLPRT_PTC127L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_127, &nsd->tx_size_127);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+			   I40E_GLPRT_PTC255L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_255, &nsd->tx_size_255);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+			   I40E_GLPRT_PTC511L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_511, &nsd->tx_size_511);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+			   I40E_GLPRT_PTC1023L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_1023, &nsd->tx_size_1023);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+			   I40E_GLPRT_PTC1522L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_1522, &nsd->tx_size_1522);
+	ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+			   I40E_GLPRT_PTC9522L(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->tx_size_big, &nsd->tx_size_big);
+
+	ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_undersize, &nsd->rx_undersize);
+	ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_fragments, &nsd->rx_fragments);
+	ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_oversize, &nsd->rx_oversize);
+	ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+			   pf->stat_offsets_loaded,
+			   &osd->rx_jabber, &nsd->rx_jabber);
+	pf->stat_offsets_loaded = true;
+	/* End hw stats */
+
+	/* Update vsi stats */
+	ixl_update_vsi_stats(vsi);
+
+	for (int i = 0; i < pf->num_vfs; i++) {
+		vf = &pf->vfs[i];
+		if (vf->vf_flags & VF_FLAG_ENABLED)
+			ixl_update_eth_stats(&pf->vfs[i].vsi);
+	}
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+**  - do outside interrupt since it might sleep
+*/
+static void
+ixl_do_adminq(void *context, int pending)
+{
+	struct ixl_pf			*pf = context;
+	struct i40e_hw			*hw = &pf->hw;
+	struct ixl_vsi			*vsi = &pf->vsi;
+	struct i40e_arq_event_info	event;
+	i40e_status			ret;
+	u32				reg, loop = 0;
+	u16				opcode, result;
+
+	event.buf_len = IXL_AQ_BUF_SZ;
+	event.msg_buf = malloc(event.buf_len,
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (!event.msg_buf) {
+		printf("Unable to allocate adminq memory\n");
+		return;
+	}
+
+	IXL_PF_LOCK(pf);
+	/* clean and process any events */
+	do {
+		ret = i40e_clean_arq_element(hw, &event, &result);
+		if (ret)
+			break;
+		opcode = LE16_TO_CPU(event.desc.opcode);
+		switch (opcode) {
+		case i40e_aqc_opc_get_link_status:
+			ixl_link_event(pf, &event);
+			ixl_update_link_status(pf);
+			break;
+		case i40e_aqc_opc_send_msg_to_pf:
+#ifdef PCI_IOV
+			ixl_handle_vf_msg(pf, &event);
+#endif
+			break;
+		case i40e_aqc_opc_event_lan_overflow:
+			break;
+		default:
+#ifdef IXL_DEBUG
+			printf("AdminQ unknown event %x\n", opcode);
+#endif
+			break;
+		}
+
+	} while (result && (loop++ < IXL_ADM_LIMIT));
+
+	reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+	reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+	free(event.msg_buf, M_DEVBUF);
+
+	/*
+	 * If there are still messages to process, reschedule ourselves.
+	 * Otherwise, re-enable our interrupt and go to sleep.
+	 */
+	if (result > 0)
+		taskqueue_enqueue(pf->tq, &pf->adminq);
+	else
+		ixl_enable_intr(vsi);
+
+	IXL_PF_UNLOCK(pf);
+}
+
+#ifdef IXL_DEBUG_SYSCTL
+static int
+ixl_debug_info(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf	*pf;
+	int		error, input = 0;
+
+	error = sysctl_handle_int(oidp, &input, 0, req);
+
+	if (error || !req->newptr)
+		return (error);
+
+	if (input == 1) {
+		pf = (struct ixl_pf *)arg1;
+		ixl_print_debug_info(pf);
+	}
+
+	return (error);
+}
+
+static void
+ixl_print_debug_info(struct ixl_pf *pf)
+{
+	struct i40e_hw		*hw = &pf->hw;
+	struct ixl_vsi		*vsi = &pf->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	struct rx_ring		*rxr = &que->rxr;
+	struct tx_ring		*txr = &que->txr;
+	u32			reg;	
+
+
+	printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
+	printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
+	printf("RX next check = %x\n", rxr->next_check);
+	printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
+	printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
+	printf("TX desc avail = %x\n", txr->avail);
+
+	reg = rd32(hw, I40E_GLV_GORCL(0xc));
+	 printf("RX Bytes = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
+	 printf("Port RX Bytes = %x\n", reg);
+	reg = rd32(hw, I40E_GLV_RDPC(0xc));
+	 printf("RX discard = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
+	 printf("Port RX discard = %x\n", reg);
+
+	reg = rd32(hw, I40E_GLV_TEPC(0xc));
+	 printf("TX errors = %x\n", reg);
+	reg = rd32(hw, I40E_GLV_GOTCL(0xc));
+	 printf("TX Bytes = %x\n", reg);
+
+	reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
+	 printf("RX undersize = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
+	 printf("RX fragments = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
+	 printf("RX oversize = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
+	 printf("RX length error = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
+	 printf("mac remote fault = %x\n", reg);
+	reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
+	 printf("mac local fault = %x\n", reg);
+}
+#endif
+
+/**
+ * Update VSI-specific ethernet statistics counters.
+ **/
+void ixl_update_eth_stats(struct ixl_vsi *vsi)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_eth_stats *es;
+	struct i40e_eth_stats *oes;
+	struct i40e_hw_port_stats *nsd;
+	u16 stat_idx = vsi->info.stat_counter_idx;
+
+	es = &vsi->eth_stats;
+	oes = &vsi->eth_stats_offsets;
+	nsd = &pf->stats;
+
+	/* Gather up the stats that the hw collects */
+	ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_errors, &es->tx_errors);
+	ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_discards, &es->rx_discards);
+
+	ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+			   I40E_GLV_GORCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_bytes, &es->rx_bytes);
+	ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+			   I40E_GLV_UPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_unicast, &es->rx_unicast);
+	ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+			   I40E_GLV_MPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_multicast, &es->rx_multicast);
+	ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+			   I40E_GLV_BPRCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->rx_broadcast, &es->rx_broadcast);
+
+	ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+			   I40E_GLV_GOTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_bytes, &es->tx_bytes);
+	ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+			   I40E_GLV_UPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_unicast, &es->tx_unicast);
+	ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+			   I40E_GLV_MPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_multicast, &es->tx_multicast);
+	ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+			   I40E_GLV_BPTCL(stat_idx),
+			   vsi->stat_offsets_loaded,
+			   &oes->tx_broadcast, &es->tx_broadcast);
+	vsi->stat_offsets_loaded = true;
+}
+
+static void
+ixl_update_vsi_stats(struct ixl_vsi *vsi)
+{
+	struct ixl_pf		*pf;
+	struct ifnet		*ifp;
+	struct i40e_eth_stats	*es;
+	u64			tx_discards;
+
+	struct i40e_hw_port_stats *nsd;
+
+	pf = vsi->back;
+	ifp = vsi->ifp;
+	es = &vsi->eth_stats;
+	nsd = &pf->stats;
+
+	ixl_update_eth_stats(vsi);
+
+	tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
+	for (int i = 0; i < vsi->num_queues; i++)
+		tx_discards += vsi->queues[i].txr.br->br_drops;
+
+	/* Update ifnet stats */
+	IXL_SET_IPACKETS(vsi, es->rx_unicast +
+	                   es->rx_multicast +
+			   es->rx_broadcast);
+	IXL_SET_OPACKETS(vsi, es->tx_unicast +
+	                   es->tx_multicast +
+			   es->tx_broadcast);
+	IXL_SET_IBYTES(vsi, es->rx_bytes);
+	IXL_SET_OBYTES(vsi, es->tx_bytes);
+	IXL_SET_IMCASTS(vsi, es->rx_multicast);
+	IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+	IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
+	    nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
+	    nsd->rx_jabber);
+	IXL_SET_OERRORS(vsi, es->tx_errors);
+	IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
+	IXL_SET_OQDROPS(vsi, tx_discards);
+	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+	IXL_SET_COLLISIONS(vsi, 0);
+}
+
+/**
+ * Reset all of the stats for the given pf
+ **/
+void ixl_pf_reset_stats(struct ixl_pf *pf)
+{
+	bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
+	bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
+	pf->stat_offsets_loaded = false;
+}
+
+/**
+ * Resets all stats of the given vsi
+ **/
+void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
+{
+	bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
+	bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
+	vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * Read and update a 48 bit stat from the hw
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts.  We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+static void
+ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+	bool offset_loaded, u64 *offset, u64 *stat)
+{
+	u64 new_data;
+
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
+	new_data = rd64(hw, loreg);
+#else
+	/*
+	 * Use two rd32's instead of one rd64; FreeBSD versions before
+	 * 10 don't support 8 byte bus reads/writes.
+	 */
+	new_data = rd32(hw, loreg);
+	new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+#endif
+
+	if (!offset_loaded)
+		*offset = new_data;
+	if (new_data >= *offset)
+		*stat = new_data - *offset;
+	else
+		*stat = (new_data + ((u64)1 << 48)) - *offset;
+	*stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * Read and update a 32 bit stat from the hw
+ **/
+static void
+ixl_stat_update32(struct i40e_hw *hw, u32 reg,
+	bool offset_loaded, u64 *offset, u64 *stat)
+{
+	u32 new_data;
+
+	new_data = rd32(hw, reg);
+	if (!offset_loaded)
+		*offset = new_data;
+	if (new_data >= *offset)
+		*stat = (u32)(new_data - *offset);
+	else
+		*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+/*
+** Set flow control using sysctl:
+** 	0 - off
+**	1 - rx pause
+**	2 - tx pause
+**	3 - full
+*/
+static int
+ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+	/*
+	 * TODO: ensure flow control is disabled if
+	 * priority flow control is enabled
+	 *
+	 * TODO: ensure tx CRC by hardware should be enabled
+	 * if tx flow control is enabled.
+	 */
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	int error = 0;
+	enum i40e_status_code aq_error = 0;
+	u8 fc_aq_err = 0;
+
+	/* Get request */
+	error = sysctl_handle_int(oidp, &pf->fc, 0, req);
+	if ((error) || (req->newptr == NULL))
+		return (error);
+	if (pf->fc < 0 || pf->fc > 3) {
+		device_printf(dev,
+		    "Invalid fc mode; valid modes are 0 through 3\n");
+		return (EINVAL);
+	}
+
+	/*
+	** Changing flow control mode currently does not work on
+	** 40GBASE-CR4 PHYs
+	*/
+	if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
+	    || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
+		device_printf(dev, "Changing flow control mode unsupported"
+		    " on 40GBase-CR4 media.\n");
+		return (ENODEV);
+	}
+
+	/* Set fc ability for port */
+	hw->fc.requested_mode = pf->fc;
+	aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
+	if (aq_error) {
+		device_printf(dev,
+		    "%s: Error setting new fc mode %d; fc_err %#x\n",
+		    __func__, aq_error, fc_aq_err);
+		return (EAGAIN);
+	}
+
+	return (0);
+}
+
+static int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	int error = 0, index = 0;
+
+	char *speeds[] = {
+		"Unknown",
+		"100M",
+		"1G",
+		"10G",
+		"40G",
+		"20G"
+	};
+
+	ixl_update_link_status(pf);
+
+	switch (hw->phy.link_info.link_speed) {
+	case I40E_LINK_SPEED_100MB:
+		index = 1;
+		break;
+	case I40E_LINK_SPEED_1GB:
+		index = 2;
+		break;
+	case I40E_LINK_SPEED_10GB:
+		index = 3;
+		break;
+	case I40E_LINK_SPEED_40GB:
+		index = 4;
+		break;
+	case I40E_LINK_SPEED_20GB:
+		index = 5;
+		break;
+	case I40E_LINK_SPEED_UNKNOWN:
+	default:
+		index = 0;
+		break;
+	}
+
+	error = sysctl_handle_string(oidp, speeds[index],
+	    strlen(speeds[index]), req);
+	return (error);
+}
+
+static int
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+{
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	struct i40e_aq_get_phy_abilities_resp abilities;
+	struct i40e_aq_set_phy_config config;
+	enum i40e_status_code aq_error = 0;
+
+	/* Get current capability information */
+	aq_error = i40e_aq_get_phy_capabilities(hw,
+	    FALSE, FALSE, &abilities, NULL);
+	if (aq_error) {
+		device_printf(dev,
+		    "%s: Error getting phy capabilities %d,"
+		    " aq error: %d\n", __func__, aq_error,
+		    hw->aq.asq_last_status);
+		return (EAGAIN);
+	}
+
+	/* Prepare new config */
+	bzero(&config, sizeof(config));
+	config.phy_type = abilities.phy_type;
+	config.abilities = abilities.abilities
+	    | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+	config.eee_capability = abilities.eee_capability;
+	config.eeer = abilities.eeer_val;
+	config.low_power_ctrl = abilities.d3_lpan;
+	/* Translate into aq cmd link_speed */
+	if (speeds & 0x8)
+		config.link_speed |= I40E_LINK_SPEED_20GB;
+	if (speeds & 0x4)
+		config.link_speed |= I40E_LINK_SPEED_10GB;
+	if (speeds & 0x2)
+		config.link_speed |= I40E_LINK_SPEED_1GB;
+	if (speeds & 0x1)
+		config.link_speed |= I40E_LINK_SPEED_100MB;
+
+	/* Do aq command & restart link */
+	aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
+	if (aq_error) {
+		device_printf(dev,
+		    "%s: Error setting new phy config %d,"
+		    " aq error: %d\n", __func__, aq_error,
+		    hw->aq.asq_last_status);
+		return (EAGAIN);
+	}
+
+	/*
+	** This seems a bit heavy handed, but we
+	** need to get a reinit on some devices
+	*/
+	IXL_PF_LOCK(pf);
+	ixl_stop(pf);
+	ixl_init_locked(pf);
+	IXL_PF_UNLOCK(pf);
+
+	return (0);
+}
+
+/*
+** Control link advertise speed:
+**	Flags:
+**	0x1 - advertise 100 Mb
+**	0x2 - advertise 1G
+**	0x4 - advertise 10G
+**	0x8 - advertise 20G
+**
+** Does not work on 40G devices.
+*/
+static int
+ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	int requested_ls = 0;
+	int error = 0;
+
+	/*
+	** FW doesn't support changing advertised speed
+	** for 40G devices; speed is always 40G.
+	*/
+	if (i40e_is_40G_device(hw->device_id))
+		return (ENODEV);
+
+	/* Read in new mode */
+	requested_ls = pf->advertised_speed;
+	error = sysctl_handle_int(oidp, &requested_ls, 0, req);
+	if ((error) || (req->newptr == NULL))
+		return (error);
+	/* Check for sane value */
+	if (requested_ls < 0x1 || requested_ls > 0xE) {
+		device_printf(dev, "Invalid advertised speed; "
+		    "valid modes are 0x1 through 0xE\n");
+		return (EINVAL);
+	}
+	/* Then check for validity based on adapter type */
+	switch (hw->device_id) {
+	case I40E_DEV_ID_10G_BASE_T:
+		if (requested_ls & 0x8) {
+			device_printf(dev,
+			    "20Gbs speed not supported on this device.\n");
+			return (EINVAL);
+		}
+		break;
+	case I40E_DEV_ID_20G_KR2:
+		if (requested_ls & 0x1) {
+			device_printf(dev,
+			    "100Mbs speed not supported on this device.\n");
+			return (EINVAL);
+		}
+		break;
+	default:
+		if (requested_ls & ~0x6) {
+			device_printf(dev,
+			    "Only 1/10Gbs speeds are supported on this device.\n");
+			return (EINVAL);
+		}
+		break;
+	}
+
+	/* Exit if no change */
+	if (pf->advertised_speed == requested_ls)
+		return (0);
+
+	error = ixl_set_advertised_speeds(pf, requested_ls);
+	if (error)
+		return (error);
+
+	pf->advertised_speed = requested_ls;
+	ixl_update_link_status(pf);
+	return (0);
+}
+
+/*
+** Get the width and transaction speed of
+** the bus this adapter is plugged into.
+*/
+static u16
+ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
+{
+        u16                     link;
+        u32                     offset;
+                
+                
+        /* Get the PCI Express Capabilities offset */
+        pci_find_cap(dev, PCIY_EXPRESS, &offset);
+
+        /* ...and read the Link Status Register */
+        link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+
+        switch (link & I40E_PCI_LINK_WIDTH) {
+        case I40E_PCI_LINK_WIDTH_1:
+                hw->bus.width = i40e_bus_width_pcie_x1;
+                break;
+        case I40E_PCI_LINK_WIDTH_2:
+                hw->bus.width = i40e_bus_width_pcie_x2;
+                break;
+        case I40E_PCI_LINK_WIDTH_4:
+                hw->bus.width = i40e_bus_width_pcie_x4;
+                break;
+        case I40E_PCI_LINK_WIDTH_8:
+                hw->bus.width = i40e_bus_width_pcie_x8;
+                break;
+        default:
+                hw->bus.width = i40e_bus_width_unknown;
+                break;
+        }
+
+        switch (link & I40E_PCI_LINK_SPEED) {
+        case I40E_PCI_LINK_SPEED_2500:
+                hw->bus.speed = i40e_bus_speed_2500;
+                break;
+        case I40E_PCI_LINK_SPEED_5000:
+                hw->bus.speed = i40e_bus_speed_5000;
+                break;
+        case I40E_PCI_LINK_SPEED_8000:
+                hw->bus.speed = i40e_bus_speed_8000;
+                break;
+        default:
+                hw->bus.speed = i40e_bus_speed_unknown;
+                break;
+        }
+
+
+        device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+            ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
+            (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
+            (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
+            (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
+            (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
+            (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
+            ("Unknown"));
+
+        if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
+            (hw->bus.speed < i40e_bus_speed_8000)) {
+                device_printf(dev, "PCI-Express bandwidth available"
+                    " for this device\n     may be insufficient for"
+                    " optimal performance.\n");
+                device_printf(dev, "For expected performance a x8 "
+                    "PCIE Gen3 slot is required.\n");
+        }
+
+        return (link);
+}
+
+static int
+ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf	*pf = (struct ixl_pf *)arg1;
+	struct i40e_hw	*hw = &pf->hw;
+	char		buf[32];
+
+	snprintf(buf, sizeof(buf),
+	    "f%d.%d a%d.%d n%02x.%02x e%08x",
+	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
+	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+	    IXL_NVM_VERSION_HI_SHIFT,
+	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+	    IXL_NVM_VERSION_LO_SHIFT,
+	    hw->nvm.eetrack);
+	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+
+#ifdef IXL_DEBUG_SYSCTL
+static int
+ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	struct i40e_link_status link_status;
+	char buf[512];
+
+	enum i40e_status_code aq_error = 0;
+
+	aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
+	if (aq_error) {
+		printf("i40e_aq_get_link_info() error %d\n", aq_error);
+		return (EPERM);
+	}
+
+	sprintf(buf, "\n"
+	    "PHY Type : %#04x\n"
+	    "Speed    : %#04x\n" 
+	    "Link info: %#04x\n" 
+	    "AN info  : %#04x\n" 
+	    "Ext info : %#04x", 
+	    link_status.phy_type, link_status.link_speed, 
+	    link_status.link_info, link_status.an_info,
+	    link_status.ext_info);
+
+	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+static int
+ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf		*pf = (struct ixl_pf *)arg1;
+	struct i40e_hw		*hw = &pf->hw;
+	char			buf[512];
+	enum i40e_status_code	aq_error = 0;
+
+	struct i40e_aq_get_phy_abilities_resp abilities;
+
+	aq_error = i40e_aq_get_phy_capabilities(hw,
+	    TRUE, FALSE, &abilities, NULL);
+	if (aq_error) {
+		printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
+		return (EPERM);
+	}
+
+	sprintf(buf, "\n"
+	    "PHY Type : %#010x\n"
+	    "Speed    : %#04x\n" 
+	    "Abilities: %#04x\n" 
+	    "EEE cap  : %#06x\n" 
+	    "EEER reg : %#010x\n" 
+	    "D3 Lpan  : %#04x",
+	    abilities.phy_type, abilities.link_speed, 
+	    abilities.abilities, abilities.eee_capability,
+	    abilities.eeer_val, abilities.d3_lpan);
+
+	return (sysctl_handle_string(oidp, buf, strlen(buf), req));
+}
+
+static int
+ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct ixl_vsi *vsi = &pf->vsi;
+	struct ixl_mac_filter *f;
+	char *buf, *buf_i;
+
+	int error = 0;
+	int ftl_len = 0;
+	int ftl_counter = 0;
+	int buf_len = 0;
+	int entry_len = 42;
+
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		ftl_len++;
+	}
+
+	if (ftl_len < 1) {
+		sysctl_handle_string(oidp, "(none)", 6, req);
+		return (0);
+	}
+
+	buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
+	buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
+
+	sprintf(buf_i++, "\n");
+	SLIST_FOREACH(f, &vsi->ftl, next) {
+		sprintf(buf_i,
+		    MAC_FORMAT ", vlan %4d, flags %#06x",
+		    MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
+		buf_i += entry_len;
+		/* don't print '\n' for last entry */
+		if (++ftl_counter != ftl_len) {
+			sprintf(buf_i, "\n");
+			buf_i++;
+		}
+	}
+
+	error = sysctl_handle_string(oidp, buf, strlen(buf), req);
+	if (error)
+		printf("sysctl error: %d\n", error);
+	free(buf, M_DEVBUF);
+	return error;
+}
+
+#define IXL_SW_RES_SIZE 0x14
+static int
+ixl_res_alloc_cmp(const void *a, const void *b)
+{
+	const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
+	one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
+	two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
+
+	return ((int)one->resource_type - (int)two->resource_type);
+}
+
+static int
+ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	struct sbuf *buf;
+	int error = 0;
+
+	u8 num_entries;
+	struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
+
+	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+	if (!buf) {
+		device_printf(dev, "Could not allocate sbuf for output.\n");
+		return (ENOMEM);
+	}
+
+	bzero(resp, sizeof(resp));
+	error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
+				resp,
+				IXL_SW_RES_SIZE,
+				NULL);
+	if (error) {
+		device_printf(dev,
+		    "%s: get_switch_resource_alloc() error %d, aq error %d\n",
+		    __func__, error, hw->aq.asq_last_status);
+		sbuf_delete(buf);
+		return error;
+	}
+
+	/* Sort entries by type for display */
+	qsort(resp, num_entries,
+	    sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
+	    &ixl_res_alloc_cmp);
+
+	sbuf_cat(buf, "\n");
+	sbuf_printf(buf, "# of entries: %d\n", num_entries);
+	sbuf_printf(buf,
+	    "Type | Guaranteed | Total | Used   | Un-allocated\n"
+	    "     | (this)     | (all) | (this) | (all)       \n");
+	for (int i = 0; i < num_entries; i++) {
+		sbuf_printf(buf,
+		    "%#4x | %10d   %5d   %6d   %12d",
+		    resp[i].resource_type,
+		    resp[i].guaranteed,
+		    resp[i].total,
+		    resp[i].used,
+		    resp[i].total_unalloced);
+		if (i < num_entries - 1)
+			sbuf_cat(buf, "\n");
+	}
+
+	error = sbuf_finish(buf);
+	if (error) {
+		device_printf(dev, "Error finishing sbuf: %d\n", error);
+		sbuf_delete(buf);
+		return error;
+	}
+
+	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+	if (error)
+		device_printf(dev, "sysctl error: %d\n", error);
+	sbuf_delete(buf);
+
+	return (error);
+}
+
+/*
+** Caller must init and delete sbuf; this function will clear and
+** finish it for caller.
+*/
+static char *
+ixl_switch_element_string(struct sbuf *s, u16 seid, bool uplink)
+{
+	sbuf_clear(s);
+
+	if (seid == 0 && uplink)
+		sbuf_cat(s, "Network");
+	else if (seid == 0)
+		sbuf_cat(s, "Host");
+	else if (seid == 1)
+		sbuf_cat(s, "EMP");
+	else if (seid <= 5)
+		sbuf_printf(s, "MAC %d", seid - 2);
+	else if (seid <= 15)
+		sbuf_cat(s, "Reserved");
+	else if (seid <= 31)
+		sbuf_printf(s, "PF %d", seid - 16);
+	else if (seid <= 159)
+		sbuf_printf(s, "VF %d", seid - 32);
+	else if (seid <= 287)
+		sbuf_cat(s, "Reserved");
+	else if (seid <= 511)
+		sbuf_cat(s, "Other"); // for other structures
+	else if (seid <= 895)
+		sbuf_printf(s, "VSI %d", seid - 512);
+	else if (seid <= 1023)
+		sbuf_printf(s, "Reserved");
+	else
+		sbuf_cat(s, "Invalid");
+
+	sbuf_finish(s);
+	return sbuf_data(s);
+}
+
+static int
+ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_pf *pf = (struct ixl_pf *)arg1;
+	struct i40e_hw *hw = &pf->hw;
+	device_t dev = pf->dev;
+	struct sbuf *buf;
+	struct sbuf *nmbuf;
+	int error = 0;
+	u8 aq_buf[I40E_AQ_LARGE_BUF];
+
+	u16 next = 0;
+	struct i40e_aqc_get_switch_config_resp *sw_config;
+	sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+
+	buf = sbuf_new_for_sysctl(NULL, NULL, 0, req);
+	if (!buf) {
+		device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+		return (ENOMEM);
+	}
+
+	error = i40e_aq_get_switch_config(hw, sw_config,
+	    sizeof(aq_buf), &next, NULL);
+	if (error) {
+		device_printf(dev,
+		    "%s: aq_get_switch_config() error %d, aq error %d\n",
+		    __func__, error, hw->aq.asq_last_status);
+		sbuf_delete(buf);
+		return error;
+	}
+
+	nmbuf = sbuf_new_auto();
+	if (!nmbuf) {
+		device_printf(dev, "Could not allocate sbuf for name output.\n");
+		return (ENOMEM);
+	}
+
+	sbuf_cat(buf, "\n");
+	// Assuming <= 255 elements in switch
+	sbuf_printf(buf, "# of elements: %d\n", sw_config->header.num_reported);
+	/* Exclude:
+	** Revision -- all elements are revision 1 for now
+	*/
+	sbuf_printf(buf,
+	    "SEID (  Name  ) |  Uplink  | Downlink | Conn Type\n"
+	    "                |          |          | (uplink)\n");
+	for (int i = 0; i < sw_config->header.num_reported; i++) {
+		// "%4d (%8s) | %8s   %8s   %#8x",
+		sbuf_printf(buf, "%4d", sw_config->element[i].seid);
+		sbuf_cat(buf, " ");
+		sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+		    sw_config->element[i].seid, false));
+		sbuf_cat(buf, " | ");
+		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
+		    sw_config->element[i].uplink_seid, true));
+		sbuf_cat(buf, "   ");
+		sbuf_printf(buf, "%8s", ixl_switch_element_string(nmbuf,
+		    sw_config->element[i].downlink_seid, false));
+		sbuf_cat(buf, "   ");
+		sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
+		if (i < sw_config->header.num_reported - 1)
+			sbuf_cat(buf, "\n");
+	}
+	sbuf_delete(nmbuf);
+
+	error = sbuf_finish(buf);
+	if (error) {
+		device_printf(dev, "Error finishing sbuf: %d\n", error);
+		sbuf_delete(buf);
+		return error;
+	}
+
+	error = sysctl_handle_string(oidp, sbuf_data(buf), sbuf_len(buf), req);
+	if (error)
+		device_printf(dev, "sysctl error: %d\n", error);
+	sbuf_delete(buf);
+
+	return (error);
+}
+#endif /* IXL_DEBUG_SYSCTL */
+
+
+#ifdef PCI_IOV
+static int
+ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	struct ixl_vsi *vsi;
+	struct i40e_vsi_context vsi_ctx;
+	int i;
+	uint16_t first_queue;
+	enum i40e_status_code code;
+
+	hw = &pf->hw;
+	vsi = &pf->vsi;
+
+	vsi_ctx.pf_num = hw->pf_id;
+	vsi_ctx.uplink_seid = pf->veb_seid;
+	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
+	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
+
+	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+
+	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+	vsi_ctx.info.switch_id = htole16(0);
+
+	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+	vsi_ctx.info.sec_flags = 0;
+	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
+		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+	vsi_ctx.info.valid_sections |=
+	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+	first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
+	for (i = 0; i < IXLV_MAX_QUEUES; i++)
+		vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
+	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
+		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
+
+	vsi_ctx.info.tc_mapping[0] = htole16(
+	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+	    (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+
+	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
+	if (code != I40E_SUCCESS)
+		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+	vf->vsi.seid = vsi_ctx.seid;
+	vf->vsi.vsi_num = vsi_ctx.vsi_number;
+	vf->vsi.first_queue = first_queue;
+	vf->vsi.num_queues = IXLV_MAX_QUEUES;
+
+	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
+	if (code != I40E_SUCCESS)
+		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+
+	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
+	if (code != I40E_SUCCESS) {
+		device_printf(pf->dev, "Failed to disable BW limit: %d\n",
+		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+	}
+
+	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
+	return (0);
+}
+
+static int
+ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	int error;
+
+	hw = &pf->hw;
+
+	error = ixl_vf_alloc_vsi(pf, vf);
+	if (error != 0)
+		return (error);
+
+	vf->vsi.hw_filters_add = 0;
+	vf->vsi.hw_filters_del = 0;
+	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+	ixl_reconfigure_filters(&vf->vsi);
+
+	return (0);
+}
+
+static void
+ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
+    uint32_t val)
+{
+	uint32_t qtable;
+	int index, shift;
+
+	/*
+	 * Two queues are mapped in a single register, so we have to do some
+	 * gymnastics to convert the queue number into a register index and
+	 * shift.
+	 */
+	index = qnum / 2;
+	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
+
+	qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
+	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
+	qtable |= val << shift;
+	wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
+}
+
+static void
+ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	uint32_t qtable;
+	int i;
+
+	hw = &pf->hw;
+
+	/*
+	 * Contiguous mappings aren't actually supported by the hardware,
+	 * so we have to use non-contiguous mappings.
+	 */
+	wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
+	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
+	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+	for (i = 0; i < vf->vsi.num_queues; i++) {
+		qtable = (vf->vsi.first_queue + i) <<
+		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
+
+		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
+	}
+
+	/* Map queues allocated to VF to its VSI. */
+	for (i = 0; i < vf->vsi.num_queues; i++)
+		ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
+
+	/* Set rest of VSI queues as unused. */
+	for (; i < IXL_MAX_VSI_QUEUES; i++)
+		ixl_vf_map_vsi_queue(hw, vf, i,
+		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
+
+	ixl_flush(hw);
+}
+
+static void
+ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
+{
+	struct i40e_hw *hw;
+
+	hw = &pf->hw;
+
+	if (vsi->seid == 0)
+		return;
+
+	i40e_aq_delete_element(hw, vsi->seid, NULL);
+}
+
+static void
+ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
+{
+
+	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+	ixl_flush(hw);
+}
+
+static void
+ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
+{
+
+	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+	ixl_flush(hw);
+}
+
+static void
+ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	uint32_t vfint_reg, vpint_reg;
+	int i;
+
+	hw = &pf->hw;
+
+	ixl_vf_vsi_release(pf, &vf->vsi);
+
+	/* Index 0 has a special register. */
+	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
+
+	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
+		ixl_vf_disable_queue_intr(hw, vfint_reg);
+	}
+
+	/* Index 0 has a special register. */
+	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
+
+	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
+		ixl_vf_unregister_intr(hw, vpint_reg);
+	}
+
+	vf->vsi.num_queues = 0;
+}
+
+static int
+ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	int i;
+	uint16_t global_vf_num;
+	uint32_t ciad;
+
+	hw = &pf->hw;
+	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+
+	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
+	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+		ciad = rd32(hw, I40E_PF_PCI_CIAD);
+		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
+			return (0);
+		DELAY(1);
+	}
+
+	return (ETIMEDOUT);
+}
+
+static void
+ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	uint32_t vfrtrig;
+
+	hw = &pf->hw;
+
+	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+	ixl_flush(hw);
+
+	ixl_reinit_vf(pf, vf);
+}
+
+static void
+ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_hw *hw;
+	uint32_t vfrstat, vfrtrig;
+	int i, error;
+
+	hw = &pf->hw;
+
+	error = ixl_flush_pcie(pf, vf);
+	if (error != 0)
+		device_printf(pf->dev,
+		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
+		    vf->vf_num);
+
+	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+		DELAY(10);
+
+		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
+		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+			break;
+	}
+
+	if (i == IXL_VF_RESET_TIMEOUT)
+		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
+
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
+
+	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+
+	if (vf->vsi.seid != 0)
+		ixl_disable_rings(&vf->vsi);
+
+	ixl_vf_release_resources(pf, vf);
+	ixl_vf_setup_vsi(pf, vf);
+	ixl_vf_map_queues(pf, vf);
+
+	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
+	ixl_flush(hw);
+}
+
+static const char *
+ixl_vc_opcode_str(uint16_t op)
+{
+
+	switch (op) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		return ("VERSION");
+	case I40E_VIRTCHNL_OP_RESET_VF:
+		return ("RESET_VF");
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		return ("GET_VF_RESOURCES");
+	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		return ("CONFIG_TX_QUEUE");
+	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		return ("CONFIG_RX_QUEUE");
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		return ("CONFIG_VSI_QUEUES");
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		return ("CONFIG_IRQ_MAP");
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		return ("ENABLE_QUEUES");
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		return ("DISABLE_QUEUES");
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		return ("ADD_ETHER_ADDRESS");
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		return ("DEL_ETHER_ADDRESS");
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		return ("ADD_VLAN");
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		return ("DEL_VLAN");
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		return ("CONFIG_PROMISCUOUS_MODE");
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		return ("GET_STATS");
+	case I40E_VIRTCHNL_OP_FCOE:
+		return ("FCOE");
+	case I40E_VIRTCHNL_OP_EVENT:
+		return ("EVENT");
+	default:
+		return ("UNKNOWN");
+	}
+}
+
+static int
+ixl_vc_opcode_level(uint16_t opcode)
+{
+
+	switch (opcode) {
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		return (10);
+	default:
+		return (5);
+	}
+}
+
+static void
+ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+    enum i40e_status_code status, void *msg, uint16_t len)
+{
+	struct i40e_hw *hw;
+	int global_vf_id;
+
+	hw = &pf->hw;
+	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
+
+	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
+	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
+	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
+
+	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
+}
+
+static void
+ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
+{
+
+	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
+}
+
+static void
+ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+    enum i40e_status_code status, const char *file, int line)
+{
+
+	I40E_VC_DEBUG(pf, 1,
+	    "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
+	    ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
+	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
+}
+
+static void
+ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_version_info reply;
+
+	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
+	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
+	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
+	    sizeof(reply));
+}
+
+static void
+ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+
+	if (msg_size != 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	ixl_reset_vf(pf, vf);
+
+	/* No response to a reset message. */
+}
+
+static void
+ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_vf_resource reply;
+
+	if (msg_size != 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	bzero(&reply, sizeof(reply));
+
+	reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
+
+	reply.num_vsis = 1;
+	reply.num_queue_pairs = vf->vsi.num_queues;
+	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
+	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
+	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
+
+	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+	    I40E_SUCCESS, &reply, sizeof(reply));
+}
+
+static int
+ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+    struct i40e_virtchnl_txq_info *info)
+{
+	struct i40e_hw *hw;
+	struct i40e_hmc_obj_txq txq;
+	uint16_t global_queue_num, global_vf_num;
+	enum i40e_status_code status;
+	uint32_t qtx_ctl;
+
+	hw = &pf->hw;
+	global_queue_num = vf->vsi.first_queue + info->queue_id;
+	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+	bzero(&txq, sizeof(txq));
+
+	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
+	if (status != I40E_SUCCESS)
+		return (EINVAL);
+
+	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
+
+	txq.head_wb_ena = info->headwb_enabled;
+	txq.head_wb_addr = info->dma_headwb_addr;
+	txq.qlen = info->ring_len;
+	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
+	txq.rdylist_act = 0;
+
+	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
+	if (status != I40E_SUCCESS)
+		return (EINVAL);
+
+	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
+	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
+	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
+	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
+	ixl_flush(hw);
+
+	return (0);
+}
+
+static int
+ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+    struct i40e_virtchnl_rxq_info *info)
+{
+	struct i40e_hw *hw;
+	struct i40e_hmc_obj_rxq rxq;
+	uint16_t global_queue_num;
+	enum i40e_status_code status;
+
+	hw = &pf->hw;
+	global_queue_num = vf->vsi.first_queue + info->queue_id;
+	bzero(&rxq, sizeof(rxq));
+
+	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
+		return (EINVAL);
+
+	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
+	    info->max_pkt_size < ETHER_MIN_LEN)
+		return (EINVAL);
+
+	if (info->splithdr_enabled) {
+		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
+			return (EINVAL);
+
+		rxq.hsplit_0 = info->rx_split_pos &
+		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
+		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
+		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
+		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
+		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+		rxq.dtype = 2;
+	}
+
+	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
+	if (status != I40E_SUCCESS)
+		return (EINVAL);
+
+	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
+	rxq.qlen = info->ring_len;
+
+	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+	rxq.dsize = 1;
+	rxq.crcstrip = 1;
+	rxq.l2tsel = 1;
+
+	rxq.rxmax = info->max_pkt_size;
+	rxq.tphrdesc_ena = 1;
+	rxq.tphwdesc_ena = 1;
+	rxq.tphdata_ena = 1;
+	rxq.tphhead_ena = 1;
+	rxq.lrxqthresh = 2;
+	rxq.prefena = 1;
+
+	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
+	if (status != I40E_SUCCESS)
+		return (EINVAL);
+
+	return (0);
+}
+
+static void
+ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_vsi_queue_config_info *info;
+	struct i40e_virtchnl_queue_pair_info *pair;
+	int i;
+
+	if (msg_size < sizeof(*info)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	info = msg;
+	if (info->num_queue_pairs == 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	if (info->vsi_id != vf->vsi.vsi_num) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < info->num_queue_pairs; i++) {
+		pair = &info->qpair[i];
+
+		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
+		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
+		    pair->txq.queue_id != pair->rxq.queue_id ||
+		    pair->txq.queue_id >= vf->vsi.num_queues) {
+
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+			return;
+		}
+
+		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+			return;
+		}
+
+		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+			return;
+		}
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
+}
+
+static void
+ixl_vf_set_qctl(struct ixl_pf *pf,
+    const struct i40e_virtchnl_vector_map *vector,
+    enum i40e_queue_type cur_type, uint16_t cur_queue,
+    enum i40e_queue_type *last_type, uint16_t *last_queue)
+{
+	uint32_t offset, qctl;
+	uint16_t itr_indx;
+
+	if (cur_type == I40E_QUEUE_TYPE_RX) {
+		offset = I40E_QINT_RQCTL(cur_queue);
+		itr_indx = vector->rxitr_idx;
+	} else {
+		offset = I40E_QINT_TQCTL(cur_queue);
+		itr_indx = vector->txitr_idx;
+	}
+
+	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
+
+	wr32(&pf->hw, offset, qctl);
+
+	*last_type = cur_type;
+	*last_queue = cur_queue;
+}
+
+static void
+ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
+    const struct i40e_virtchnl_vector_map *vector)
+{
+	struct i40e_hw *hw;
+	u_int qindex;
+	enum i40e_queue_type type, last_type;
+	uint32_t lnklst_reg;
+	uint16_t rxq_map, txq_map, cur_queue, last_queue;
+
+	hw = &pf->hw;
+
+	rxq_map = vector->rxq_map;
+	txq_map = vector->txq_map;
+
+	last_queue = IXL_END_OF_INTR_LNKLST;
+	last_type = I40E_QUEUE_TYPE_RX;
+
+	/*
+	 * The datasheet says to optimize performance, RX queues and TX queues
+	 * should be interleaved in the interrupt linked list, so we process
+	 * both at once here.
+	 */
+	while ((rxq_map != 0) || (txq_map != 0)) {
+		if (txq_map != 0) {
+			qindex = ffs(txq_map) - 1;
+			type = I40E_QUEUE_TYPE_TX;
+			cur_queue = vf->vsi.first_queue + qindex;
+			ixl_vf_set_qctl(pf, vector, type, cur_queue,
+			    &last_type, &last_queue);
+			txq_map &= ~(1 << qindex);
+		}
+
+		if (rxq_map != 0) {
+			qindex = ffs(rxq_map) - 1;
+			type = I40E_QUEUE_TYPE_RX;
+			cur_queue = vf->vsi.first_queue + qindex;
+			ixl_vf_set_qctl(pf, vector, type, cur_queue,
+			    &last_type, &last_queue);
+			rxq_map &= ~(1 << qindex);
+		}
+	}
+
+	if (vector->vector_id == 0)
+		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
+	else
+		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
+		    vf->vf_num);
+	wr32(hw, lnklst_reg,
+	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+
+	ixl_flush(hw);
+}
+
+static void
+ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_irq_map_info *map;
+	struct i40e_virtchnl_vector_map *vector;
+	struct i40e_hw *hw;
+	int i, largest_txq, largest_rxq;
+
+	hw = &pf->hw;
+
+	if (msg_size < sizeof(*map)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	map = msg;
+	if (map->num_vectors == 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < map->num_vectors; i++) {
+		vector = &map->vecmap[i];
+
+		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
+		    vector->vsi_id != vf->vsi.vsi_num) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
+			return;
+		}
+
+		if (vector->rxq_map != 0) {
+			largest_rxq = fls(vector->rxq_map) - 1;
+			if (largest_rxq >= vf->vsi.num_queues) {
+				i40e_send_vf_nack(pf, vf,
+				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+				    I40E_ERR_PARAM);
+				return;
+			}
+		}
+
+		if (vector->txq_map != 0) {
+			largest_txq = fls(vector->txq_map) - 1;
+			if (largest_txq >= vf->vsi.num_queues) {
+				i40e_send_vf_nack(pf, vf,
+				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+				    I40E_ERR_PARAM);
+				return;
+			}
+		}
+
+		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
+		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+			    I40E_ERR_PARAM);
+			return;
+		}
+
+		ixl_vf_config_vector(pf, vf, vector);
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
+}
+
+static void
+ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_queue_select *select;
+	int error;
+
+	if (msg_size != sizeof(*select)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	select = msg;
+	if (select->vsi_id != vf->vsi.vsi_num ||
+	    select->rx_queues == 0 || select->tx_queues == 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	error = ixl_enable_rings(&vf->vsi);
+	if (error) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+		    I40E_ERR_TIMEOUT);
+		return;
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
+}
+
+static void
+ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+    void *msg, uint16_t msg_size)
+{
+	struct i40e_virtchnl_queue_select *select;
+	int error;
+
+	if (msg_size != sizeof(*select)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	select = msg;
+	if (select->vsi_id != vf->vsi.vsi_num ||
+	    select->rx_queues == 0 || select->tx_queues == 0) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	error = ixl_disable_rings(&vf->vsi);
+	if (error) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+		    I40E_ERR_TIMEOUT);
+		return;
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
+}
+
+static boolean_t
+ixl_zero_mac(const uint8_t *addr)
+{
+	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+	return (cmp_etheraddr(addr, zero));
+}
+
+static boolean_t
+ixl_bcast_mac(const uint8_t *addr)
+{
+
+	return (cmp_etheraddr(addr, ixl_bcast_addr));
+}
+
+static int
+ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
+{
+
+	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
+		return (EINVAL);
+
+	/*
+	 * If the VF is not allowed to change its MAC address, don't let it
+	 * set a MAC filter for an address that is not a multicast address and
+	 * is not its assigned MAC.
+	 */
+	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
+		return (EPERM);
+
+	return (0);
+}
+
+static void
+ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_ether_addr_list *addr_list;
+	struct i40e_virtchnl_ether_addr *addr;
+	struct ixl_vsi *vsi;
+	int i;
+	size_t expected_size;
+
+	vsi = &vf->vsi;
+
+	if (msg_size < sizeof(*addr_list)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	addr_list = msg;
+	expected_size = sizeof(*addr_list) +
+	    addr_list->num_elements * sizeof(*addr);
+
+	if (addr_list->num_elements == 0 ||
+	    addr_list->vsi_id != vsi->vsi_num ||
+	    msg_size != expected_size) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < addr_list->num_elements; i++) {
+		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+			return;
+		}
+	}
+
+	for (i = 0; i < addr_list->num_elements; i++) {
+		addr = &addr_list->list[i];
+		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
+}
+
+static void
+ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_ether_addr_list *addr_list;
+	struct i40e_virtchnl_ether_addr *addr;
+	size_t expected_size;
+	int i;
+
+	if (msg_size < sizeof(*addr_list)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	addr_list = msg;
+	expected_size = sizeof(*addr_list) +
+	    addr_list->num_elements * sizeof(*addr);
+
+	if (addr_list->num_elements == 0 ||
+	    addr_list->vsi_id != vf->vsi.vsi_num ||
+	    msg_size != expected_size) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < addr_list->num_elements; i++) {
+		addr = &addr_list->list[i];
+		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
+			i40e_send_vf_nack(pf, vf,
+			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+			return;
+		}
+	}
+
+	for (i = 0; i < addr_list->num_elements; i++) {
+		addr = &addr_list->list[i];
+		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
+}
+
+static enum i40e_status_code
+ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+	struct i40e_vsi_context vsi_ctx;
+
+	vsi_ctx.seid = vf->vsi.seid;
+
+	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
+}
+
+static void
+ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_vlan_filter_list *filter_list;
+	enum i40e_status_code code;
+	size_t expected_size;
+	int i;
+
+	if (msg_size < sizeof(*filter_list)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	filter_list = msg;
+	expected_size = sizeof(*filter_list) +
+	    filter_list->num_elements * sizeof(uint16_t);
+	if (filter_list->num_elements == 0 ||
+	    filter_list->vsi_id != vf->vsi.vsi_num ||
+	    msg_size != expected_size) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < filter_list->num_elements; i++) {
+		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+			    I40E_ERR_PARAM);
+			return;
+		}
+	}
+
+	code = ixl_vf_enable_vlan_strip(pf, vf);
+	if (code != I40E_SUCCESS) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+		    I40E_ERR_PARAM);
+	}
+
+	for (i = 0; i < filter_list->num_elements; i++)
+		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
+}
+
+static void
+ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_vlan_filter_list *filter_list;
+	int i;
+	size_t expected_size;
+
+	if (msg_size < sizeof(*filter_list)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	filter_list = msg;
+	expected_size = sizeof(*filter_list) +
+	    filter_list->num_elements * sizeof(uint16_t);
+	if (filter_list->num_elements == 0 ||
+	    filter_list->vsi_id != vf->vsi.vsi_num ||
+	    msg_size != expected_size) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < filter_list->num_elements; i++) {
+		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+			    I40E_ERR_PARAM);
+			return;
+		}
+	}
+
+	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	for (i = 0; i < filter_list->num_elements; i++)
+		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
+}
+
+static void
+ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+    void *msg, uint16_t msg_size)
+{
+	struct i40e_virtchnl_promisc_info *info;
+	enum i40e_status_code code;
+
+	if (msg_size != sizeof(*info)) {
+		i40e_send_vf_nack(pf, vf,
+		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+		return;
+	}
+
+	if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) {
+		i40e_send_vf_nack(pf, vf,
+		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+		return;
+	}
+
+	info = msg;
+	if (info->vsi_id != vf->vsi.vsi_num) {
+		i40e_send_vf_nack(pf, vf,
+		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+		return;
+	}
+
+	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
+	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
+	if (code != I40E_SUCCESS) {
+		i40e_send_vf_nack(pf, vf,
+		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+		return;
+	}
+
+	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
+	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
+	if (code != I40E_SUCCESS) {
+		i40e_send_vf_nack(pf, vf,
+		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+		return;
+	}
+
+	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
+}
+
+static void
+ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+    uint16_t msg_size)
+{
+	struct i40e_virtchnl_queue_select *queue;
+
+	if (msg_size != sizeof(*queue)) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	queue = msg;
+	if (queue->vsi_id != vf->vsi.vsi_num) {
+		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+		    I40E_ERR_PARAM);
+		return;
+	}
+
+	ixl_update_eth_stats(&vf->vsi);
+
+	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
+}
+
+static void
+ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
+{
+	struct ixl_vf *vf;
+	void *msg;
+	uint16_t vf_num, msg_size;
+	uint32_t opcode;
+
+	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
+	opcode = le32toh(event->desc.cookie_high);
+
+	if (vf_num >= pf->num_vfs) {
+		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
+		return;
+	}
+
+	vf = &pf->vfs[vf_num];
+	msg = event->msg_buf;
+	msg_size = event->msg_len;
+
+	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
+	    "Got msg %s(%d) from VF-%d of size %d\n",
+	    ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
+
+	switch (opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		ixl_vf_version_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+		ixl_vf_reset_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
+		break;
+
+	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
+	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+	default:
+		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
+		break;
+	}
+}
+
+/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
+static void
+ixl_handle_vflr(void *arg, int pending)
+{
+	struct ixl_pf *pf;
+	struct i40e_hw *hw;
+	uint16_t global_vf_num;
+	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
+	int i;
+
+	pf = arg;
+	hw = &pf->hw;
+
+	IXL_PF_LOCK(pf);
+	for (i = 0; i < pf->num_vfs; i++) {
+		global_vf_num = hw->func_caps.vf_base_id + i;
+
+		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
+		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
+		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
+		if (vflrstat & vflrstat_mask) {
+			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
+			    vflrstat_mask);
+
+			ixl_reinit_vf(pf, &pf->vfs[i]);
+		}
+	}
+
+	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
+	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
+	ixl_flush(hw);
+
+	IXL_PF_UNLOCK(pf);
+}
+
+static int
+ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
+{
+
+	switch (err) {
+	case I40E_AQ_RC_EPERM:
+		return (EPERM);
+	case I40E_AQ_RC_ENOENT:
+		return (ENOENT);
+	case I40E_AQ_RC_ESRCH:
+		return (ESRCH);
+	case I40E_AQ_RC_EINTR:
+		return (EINTR);
+	case I40E_AQ_RC_EIO:
+		return (EIO);
+	case I40E_AQ_RC_ENXIO:
+		return (ENXIO);
+	case I40E_AQ_RC_E2BIG:
+		return (E2BIG);
+	case I40E_AQ_RC_EAGAIN:
+		return (EAGAIN);
+	case I40E_AQ_RC_ENOMEM:
+		return (ENOMEM);
+	case I40E_AQ_RC_EACCES:
+		return (EACCES);
+	case I40E_AQ_RC_EFAULT:
+		return (EFAULT);
+	case I40E_AQ_RC_EBUSY:
+		return (EBUSY);
+	case I40E_AQ_RC_EEXIST:
+		return (EEXIST);
+	case I40E_AQ_RC_EINVAL:
+		return (EINVAL);
+	case I40E_AQ_RC_ENOTTY:
+		return (ENOTTY);
+	case I40E_AQ_RC_ENOSPC:
+		return (ENOSPC);
+	case I40E_AQ_RC_ENOSYS:
+		return (ENOSYS);
+	case I40E_AQ_RC_ERANGE:
+		return (ERANGE);
+	case I40E_AQ_RC_EFLUSHED:
+		return (EINVAL);	/* No exact equivalent in errno.h */
+	case I40E_AQ_RC_BAD_ADDR:
+		return (EFAULT);
+	case I40E_AQ_RC_EMODE:
+		return (EPERM);
+	case I40E_AQ_RC_EFBIG:
+		return (EFBIG);
+	default:
+		return (EINVAL);
+	}
+}
+
+static int
+ixl_init_iov(device_t dev, uint16_t num_vfs, const nvlist_t *params)
+{
+	struct ixl_pf *pf;
+	struct i40e_hw *hw;
+	struct ixl_vsi *pf_vsi;
+	enum i40e_status_code ret;
+	int i, error;
+
+	pf = device_get_softc(dev);
+	hw = &pf->hw;
+	pf_vsi = &pf->vsi;
+
+	IXL_PF_LOCK(pf);
+	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
+	    M_ZERO);
+
+	if (pf->vfs == NULL) {
+		error = ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0; i < num_vfs; i++)
+		sysctl_ctx_init(&pf->vfs[i].ctx);
+
+	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
+	    1, FALSE, FALSE, &pf->veb_seid, NULL);
+	if (ret != I40E_SUCCESS) {
+		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
+		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
+		    error);
+		goto fail;
+	}
+
+	ixl_configure_msix(pf);
+	ixl_enable_adminq(hw);
+
+	pf->num_vfs = num_vfs;
+	IXL_PF_UNLOCK(pf);
+	return (0);
+
+fail:
+	free(pf->vfs, M_IXL);
+	pf->vfs = NULL;
+	IXL_PF_UNLOCK(pf);
+	return (error);
+}
+
+static void
+ixl_uninit_iov(device_t dev)
+{
+	struct ixl_pf *pf;
+	struct i40e_hw *hw;
+	struct ixl_vsi *vsi;
+	struct ifnet *ifp;
+	struct ixl_vf *vfs;
+	int i, num_vfs;
+
+	pf = device_get_softc(dev);
+	hw = &pf->hw;
+	vsi = &pf->vsi;
+	ifp = vsi->ifp;
+
+	IXL_PF_LOCK(pf);
+	for (i = 0; i < pf->num_vfs; i++) {
+		if (pf->vfs[i].vsi.seid != 0)
+			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
+	}
+
+	if (pf->veb_seid != 0) {
+		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
+		pf->veb_seid = 0;
+	}
+
+	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
+		ixl_disable_intr(vsi);
+
+	vfs = pf->vfs;
+	num_vfs = pf->num_vfs;
+
+	pf->vfs = NULL;
+	pf->num_vfs = 0;
+	IXL_PF_UNLOCK(pf);
+
+	/* Do this after the unlock as sysctl_ctx_free might sleep. */
+	for (i = 0; i < num_vfs; i++)
+		sysctl_ctx_free(&vfs[i].ctx);
+	free(vfs, M_IXL);
+}
+
+static int
+ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
+{
+	char sysctl_name[QUEUE_NAME_LEN];
+	struct ixl_pf *pf;
+	struct ixl_vf *vf;
+	const void *mac;
+	size_t size;
+	int error;
+
+	pf = device_get_softc(dev);
+	vf = &pf->vfs[vfnum];
+
+	IXL_PF_LOCK(pf);
+	vf->vf_num = vfnum;
+
+	vf->vsi.back = pf;
+	vf->vf_flags = VF_FLAG_ENABLED;
+	SLIST_INIT(&vf->vsi.ftl);
+
+	error = ixl_vf_setup_vsi(pf, vf);
+	if (error != 0)
+		goto out;
+
+	if (nvlist_exists_binary(params, "mac-addr")) {
+		mac = nvlist_get_binary(params, "mac-addr", &size);
+		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
+
+		if (nvlist_get_bool(params, "allow-set-mac"))
+			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+	} else
+		/*
+		 * If the administrator has not specified a MAC address then
+		 * we must allow the VF to choose one.
+		 */
+		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+	if (nvlist_get_bool(params, "mac-anti-spoof"))
+		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+	if (nvlist_get_bool(params, "allow-promisc"))
+		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+	vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+	ixl_reset_vf(pf, vf);
+out:
+	IXL_PF_UNLOCK(pf);
+	if (error == 0) {
+		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
+		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
+	}
+
+	return (error);
+}
+#endif /* PCI_IOV */


Property changes on: trunk/sys/dev/ixl/if_ixl.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/if_ixlv.c
===================================================================
--- trunk/sys/dev/ixl/if_ixlv.c	                        (rev 0)
+++ trunk/sys/dev/ixl/if_ixlv.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,2969 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/if_ixlv.c 303107 2016-07-20 18:26:48Z sbruno $*/
+
+#ifndef IXL_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixl.h"
+#include "ixlv.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+/*********************************************************************
+ *  Driver version
+ *********************************************************************/
+char ixlv_driver_version[] = "1.2.6";
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into ixlv_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixl_vendor_info_t ixlv_vendor_info_array[] =
+{
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
+	{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
+	/* required last entry */
+	{0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings
+ *********************************************************************/
+
+static char    *ixlv_strings[] = {
+	"Intel(R) Ethernet Connection XL710 VF Driver"
+};
+
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int      ixlv_probe(device_t);
+static int      ixlv_attach(device_t);
+static int      ixlv_detach(device_t);
+static int      ixlv_shutdown(device_t);
+static void	ixlv_init_locked(struct ixlv_sc *);
+static int	ixlv_allocate_pci_resources(struct ixlv_sc *);
+static void	ixlv_free_pci_resources(struct ixlv_sc *);
+static int	ixlv_assign_msix(struct ixlv_sc *);
+static int	ixlv_init_msix(struct ixlv_sc *);
+static int	ixlv_init_taskqueue(struct ixlv_sc *);
+static int	ixlv_setup_queues(struct ixlv_sc *);
+static void	ixlv_config_rss(struct ixlv_sc *);
+static void	ixlv_stop(struct ixlv_sc *);
+static void	ixlv_add_multi(struct ixl_vsi *);
+static void	ixlv_del_multi(struct ixl_vsi *);
+static void	ixlv_free_queues(struct ixl_vsi *);
+static int	ixlv_setup_interface(device_t, struct ixlv_sc *);
+
+static int	ixlv_media_change(struct ifnet *);
+static void	ixlv_media_status(struct ifnet *, struct ifmediareq *);
+
+static void	ixlv_local_timer(void *);
+
+static int	ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16);
+static int	ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr);
+static void	ixlv_init_filters(struct ixlv_sc *);
+static void	ixlv_free_filters(struct ixlv_sc *);
+
+static void	ixlv_msix_que(void *);
+static void	ixlv_msix_adminq(void *);
+static void	ixlv_do_adminq(void *, int);
+static void	ixlv_do_adminq_locked(struct ixlv_sc *sc);
+static void	ixlv_handle_que(void *, int);
+static int	ixlv_reset(struct ixlv_sc *);
+static int	ixlv_reset_complete(struct i40e_hw *);
+static void	ixlv_set_queue_rx_itr(struct ixl_queue *);
+static void	ixlv_set_queue_tx_itr(struct ixl_queue *);
+static void	ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
+		    enum i40e_status_code);
+
+static void	ixlv_enable_adminq_irq(struct i40e_hw *);
+static void	ixlv_disable_adminq_irq(struct i40e_hw *);
+static void	ixlv_enable_queue_irq(struct i40e_hw *, int);
+static void	ixlv_disable_queue_irq(struct i40e_hw *, int);
+
+static void	ixlv_setup_vlan_filters(struct ixlv_sc *);
+static void	ixlv_register_vlan(void *, struct ifnet *, u16);
+static void	ixlv_unregister_vlan(void *, struct ifnet *, u16);
+
+static void	ixlv_init_hw(struct ixlv_sc *);
+static int	ixlv_setup_vc(struct ixlv_sc *);
+static int	ixlv_vf_config(struct ixlv_sc *);
+
+static void	ixlv_cap_txcsum_tso(struct ixl_vsi *,
+		    struct ifnet *, int);
+
+static void	ixlv_add_sysctls(struct ixlv_sc *);
+static int 	ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
+static int 	ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixlv_methods[] = {
+	/* Device interface */
+	DEVMETHOD(device_probe, ixlv_probe),
+	DEVMETHOD(device_attach, ixlv_attach),
+	DEVMETHOD(device_detach, ixlv_detach),
+	DEVMETHOD(device_shutdown, ixlv_shutdown),
+	{0, 0}
+};
+
+static driver_t ixlv_driver = {
+	"ixlv", ixlv_methods, sizeof(struct ixlv_sc),
+};
+
+devclass_t ixlv_devclass;
+DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0);
+
+MODULE_DEPEND(ixlv, pci, 1, 1, 1);
+MODULE_DEPEND(ixlv, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0,
+                   "IXLV driver parameters");
+
+/*
+** Number of descriptors per ring:
+**   - TX and RX are the same size
+*/
+static int ixlv_ringsz = DEFAULT_RING;
+TUNABLE_INT("hw.ixlv.ringsz", &ixlv_ringsz);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, ring_size, CTLFLAG_RDTUN,
+    &ixlv_ringsz, 0, "Descriptor Ring Size");
+
+/* Set to zero to auto calculate  */
+int ixlv_max_queues = 0;
+TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN,
+    &ixlv_max_queues, 0, "Number of Queues");
+
+/*
+** Number of entries in Tx queue buf_ring.
+** Increasing this will reduce the number of
+** errors when transmitting fragmented UDP
+** packets.
+*/
+static int ixlv_txbrsz = DEFAULT_TXBRSZ;
+TUNABLE_INT("hw.ixlv.txbrsz", &ixlv_txbrsz);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, txbr_size, CTLFLAG_RDTUN,
+    &ixlv_txbrsz, 0, "TX Buf Ring Size");
+
+/*
+** Controls for Interrupt Throttling
+**      - true/false for dynamic adjustment
+**      - default values for static ITR
+*/
+int ixlv_dynamic_rx_itr = 0;
+TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
+    &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
+
+int ixlv_dynamic_tx_itr = 0;
+TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
+    &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
+
+int ixlv_rx_itr = IXL_ITR_8K;
+TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
+    &ixlv_rx_itr, 0, "RX Interrupt Rate");
+
+int ixlv_tx_itr = IXL_ITR_4K;
+TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr);
+SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
+    &ixlv_tx_itr, 0, "TX Interrupt Rate");
+
+        
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  ixlv_probe determines if the driver should be loaded on
+ *  the hardware based on PCI vendor/device id of the device.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_probe(device_t dev)
+{
+	ixl_vendor_info_t *ent;
+
+	u16	pci_vendor_id, pci_device_id;
+	u16	pci_subvendor_id, pci_subdevice_id;
+	char	device_name[256];
+
+	INIT_DEBUGOUT("ixlv_probe: begin");
+
+	pci_vendor_id = pci_get_vendor(dev);
+	if (pci_vendor_id != I40E_INTEL_VENDOR_ID)
+		return (ENXIO);
+
+	pci_device_id = pci_get_device(dev);
+	pci_subvendor_id = pci_get_subvendor(dev);
+	pci_subdevice_id = pci_get_subdevice(dev);
+
+	ent = ixlv_vendor_info_array;
+	while (ent->vendor_id != 0) {
+		if ((pci_vendor_id == ent->vendor_id) &&
+		    (pci_device_id == ent->device_id) &&
+
+		    ((pci_subvendor_id == ent->subvendor_id) ||
+		     (ent->subvendor_id == 0)) &&
+
+		    ((pci_subdevice_id == ent->subdevice_id) ||
+		     (ent->subdevice_id == 0))) {
+			sprintf(device_name, "%s, Version - %s",
+				ixlv_strings[ent->index],
+				ixlv_driver_version);
+			device_set_desc_copy(dev, device_name);
+			return (BUS_PROBE_DEFAULT);
+		}
+		ent++;
+	}
+	return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_attach(device_t dev)
+{
+	struct ixlv_sc	*sc;
+	struct i40e_hw	*hw;
+	struct ixl_vsi 	*vsi;
+	int            	error = 0;
+
+	INIT_DBG_DEV(dev, "begin");
+
+	/* Allocate, clear, and link in our primary soft structure */
+	sc = device_get_softc(dev);
+	sc->dev = sc->osdep.dev = dev;
+	hw = &sc->hw;
+	vsi = &sc->vsi;
+	vsi->dev = dev;
+
+	/* Initialize hw struct */
+	ixlv_init_hw(sc);
+
+	/* Allocate filter lists */
+	ixlv_init_filters(sc);
+
+	/* Core Lock Init*/
+	mtx_init(&sc->mtx, device_get_nameunit(dev),
+	    "IXL SC Lock", MTX_DEF);
+
+	/* Set up the timer callout */
+	callout_init_mtx(&sc->timer, &sc->mtx, 0);
+
+	/* Do PCI setup - map BAR0, etc */
+	if (ixlv_allocate_pci_resources(sc)) {
+		device_printf(dev, "%s: Allocation of PCI resources failed\n",
+		    __func__);
+		error = ENXIO;
+		goto err_early;
+	}
+
+	INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors");
+
+	error = i40e_set_mac_type(hw);
+	if (error) {
+		device_printf(dev, "%s: set_mac_type failed: %d\n",
+		    __func__, error);
+		goto err_pci_res;
+	}
+
+	error = ixlv_reset_complete(hw);
+	if (error) {
+		device_printf(dev, "%s: Device is still being reset\n",
+		    __func__);
+		goto err_pci_res;
+	}
+
+	INIT_DBG_DEV(dev, "VF Device is ready for configuration");
+
+	error = ixlv_setup_vc(sc);
+	if (error) {
+		device_printf(dev, "%s: Error setting up PF comms, %d\n",
+		    __func__, error);
+		goto err_pci_res;
+	}
+
+	INIT_DBG_DEV(dev, "PF API version verified");
+
+	/* TODO: Figure out why MDD events occur when this reset is removed. */
+	/* Need API version before sending reset message */
+	error = ixlv_reset(sc);
+	if (error) {
+		device_printf(dev, "VF reset failed; reload the driver\n");
+		goto err_aq;
+	}
+
+	INIT_DBG_DEV(dev, "VF reset complete");
+
+	/* Ask for VF config from PF */
+	error = ixlv_vf_config(sc);
+	if (error) {
+		device_printf(dev, "Error getting configuration from PF: %d\n",
+		    error);
+		goto err_aq;
+	}
+
+	INIT_DBG_DEV(dev, "VF config from PF:");
+	INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
+	    sc->vf_res->num_vsis,
+	    sc->vf_res->num_queue_pairs,
+	    sc->vf_res->max_vectors,
+	    sc->vf_res->max_mtu);
+	INIT_DBG_DEV(dev, "Offload flags: %#010x",
+	    sc->vf_res->vf_offload_flags);
+
+	// TODO: Move this into ixlv_vf_config?
+	/* got VF config message back from PF, now we can parse it */
+	for (int i = 0; i < sc->vf_res->num_vsis; i++) {
+		if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+			sc->vsi_res = &sc->vf_res->vsi_res[i];
+	}
+	if (!sc->vsi_res) {
+		device_printf(dev, "%s: no LAN VSI found\n", __func__);
+		error = EIO;
+		goto err_res_buf;
+	}
+
+	INIT_DBG_DEV(dev, "Resource Acquisition complete");
+
+	/* If no mac address was assigned just make a random one */
+	if (!ixlv_check_ether_addr(hw->mac.addr)) {
+		u8 addr[ETHER_ADDR_LEN];
+		arc4rand(&addr, sizeof(addr), 0);
+		addr[0] &= 0xFE;
+		addr[0] |= 0x02;
+		bcopy(addr, hw->mac.addr, sizeof(addr));
+	}
+
+	vsi->id = sc->vsi_res->vsi_id;
+	vsi->back = (void *)sc;
+	sc->link_up = TRUE;
+
+	/* This allocates the memory and early settings */
+	if (ixlv_setup_queues(sc) != 0) {
+		device_printf(dev, "%s: setup queues failed!\n",
+		    __func__);
+		error = EIO;
+		goto out;
+	}
+
+	/* Setup the stack interface */
+	if (ixlv_setup_interface(dev, sc) != 0) {
+		device_printf(dev, "%s: setup interface failed!\n",
+		    __func__);
+		error = EIO;
+		goto out;
+	}
+
+	INIT_DBG_DEV(dev, "Queue memory and interface setup");
+
+	/* Do queue interrupt setup */
+	ixlv_assign_msix(sc);
+
+	/* Start AdminQ taskqueue */
+	ixlv_init_taskqueue(sc);
+
+	/* Initialize stats */
+	bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats));
+	ixlv_add_sysctls(sc);
+
+	/* Register for VLAN events */
+	vsi->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+	    ixlv_register_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+	vsi->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+	    ixlv_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
+
+	/* We want AQ enabled early */
+	ixlv_enable_adminq_irq(hw);
+
+	/* Set things up to run init */
+	sc->init_state = IXLV_INIT_READY;
+
+	ixl_vc_init_mgr(sc, &sc->vc_mgr);
+
+	INIT_DBG_DEV(dev, "end");
+	return (error);
+
+out:
+	ixlv_free_queues(vsi);
+err_res_buf:
+	free(sc->vf_res, M_DEVBUF);
+err_aq:
+	i40e_shutdown_adminq(hw);
+err_pci_res:
+	ixlv_free_pci_resources(sc);
+err_early:
+	mtx_destroy(&sc->mtx);
+	ixlv_free_filters(sc);
+	INIT_DBG_DEV(dev, "end: error %d", error);
+	return (error);
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixlv_detach(device_t dev)
+{
+	struct ixlv_sc	*sc = device_get_softc(dev);
+	struct ixl_vsi 	*vsi = &sc->vsi;
+
+	INIT_DBG_DEV(dev, "begin");
+
+	/* Make sure VLANS are not using driver */
+	if (vsi->ifp->if_vlantrunk != NULL) {
+		if_printf(vsi->ifp, "Vlan in use, detach first\n");
+		INIT_DBG_DEV(dev, "end");
+		return (EBUSY);
+	}
+
+	/* Stop driver */
+	ether_ifdetach(vsi->ifp);
+	if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		mtx_lock(&sc->mtx);	
+		ixlv_stop(sc);
+		mtx_unlock(&sc->mtx);	
+	}
+
+	/* Unregister VLAN events */
+	if (vsi->vlan_attach != NULL)
+		EVENTHANDLER_DEREGISTER(vlan_config, vsi->vlan_attach);
+	if (vsi->vlan_detach != NULL)
+		EVENTHANDLER_DEREGISTER(vlan_unconfig, vsi->vlan_detach);
+
+	/* Drain VC mgr */
+	callout_drain(&sc->vc_mgr.callout);
+
+	i40e_shutdown_adminq(&sc->hw);
+	taskqueue_free(sc->tq);
+	if_free(vsi->ifp);
+	free(sc->vf_res, M_DEVBUF);
+	ixlv_free_pci_resources(sc);
+	ixlv_free_queues(vsi);
+	mtx_destroy(&sc->mtx);
+	ixlv_free_filters(sc);
+
+	bus_generic_detach(dev);
+	INIT_DBG_DEV(dev, "end");
+	return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixlv_shutdown(device_t dev)
+{
+	struct ixlv_sc	*sc = device_get_softc(dev);
+
+	INIT_DBG_DEV(dev, "begin");
+
+	mtx_lock(&sc->mtx);	
+	ixlv_stop(sc);
+	mtx_unlock(&sc->mtx);	
+
+	INIT_DBG_DEV(dev, "end");
+	return (0);
+}
+
+/*
+ * Configure TXCSUM(IPV6) and TSO(4/6)
+ *	- the hardware handles these together so we
+ *	  need to tweak them 
+ */
+static void
+ixlv_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+	/* Enable/disable TXCSUM/TSO4 */
+	if (!(ifp->if_capenable & IFCAP_TXCSUM)
+	    && !(ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM) {
+			ifp->if_capenable |= IFCAP_TXCSUM;
+			/* enable TXCSUM, restore TSO if previously enabled */
+			if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+				vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+				ifp->if_capenable |= IFCAP_TSO4;
+			}
+		}
+		else if (mask & IFCAP_TSO4) {
+			ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+			vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+			if_printf(ifp,
+			    "TSO4 requires txcsum, enabling both...\n");
+		}
+	} else if((ifp->if_capenable & IFCAP_TXCSUM)
+	    && !(ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM)
+			ifp->if_capenable &= ~IFCAP_TXCSUM;
+		else if (mask & IFCAP_TSO4)
+			ifp->if_capenable |= IFCAP_TSO4;
+	} else if((ifp->if_capenable & IFCAP_TXCSUM)
+	    && (ifp->if_capenable & IFCAP_TSO4)) {
+		if (mask & IFCAP_TXCSUM) {
+			vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+			ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+			if_printf(ifp, 
+			    "TSO4 requires txcsum, disabling both...\n");
+		} else if (mask & IFCAP_TSO4)
+			ifp->if_capenable &= ~IFCAP_TSO4;
+	}
+
+	/* Enable/disable TXCSUM_IPV6/TSO6 */
+	if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && !(ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6) {
+			ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+			if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+				vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+				ifp->if_capenable |= IFCAP_TSO6;
+			}
+		} else if (mask & IFCAP_TSO6) {
+			ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+			vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+			if_printf(ifp,
+			    "TSO6 requires txcsum6, enabling both...\n");
+		}
+	} else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && !(ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6)
+			ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+		else if (mask & IFCAP_TSO6)
+			ifp->if_capenable |= IFCAP_TSO6;
+	} else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+	    && (ifp->if_capenable & IFCAP_TSO6)) {
+		if (mask & IFCAP_TXCSUM_IPV6) {
+			vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+			ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+			if_printf(ifp,
+			    "TSO6 requires txcsum6, disabling both...\n");
+		} else if (mask & IFCAP_TSO6)
+			ifp->if_capenable &= ~IFCAP_TSO6;
+	}
+}
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  ixlv_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+	struct ixl_vsi		*vsi = ifp->if_softc;
+	struct ixlv_sc	*sc = vsi->back;
+	struct ifreq		*ifr = (struct ifreq *)data;
+#if defined(INET) || defined(INET6)
+	struct ifaddr 		*ifa = (struct ifaddr *)data;
+	bool			avoid_reset = FALSE;
+#endif
+	int             	error = 0;
+
+
+	switch (command) {
+
+        case SIOCSIFADDR:
+#ifdef INET
+		if (ifa->ifa_addr->sa_family == AF_INET)
+			avoid_reset = TRUE;
+#endif
+#ifdef INET6
+		if (ifa->ifa_addr->sa_family == AF_INET6)
+			avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+		/*
+		** Calling init results in link renegotiation,
+		** so we avoid doing it when possible.
+		*/
+		if (avoid_reset) {
+			ifp->if_flags |= IFF_UP;
+			if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+				ixlv_init(vsi);
+#ifdef INET
+			if (!(ifp->if_flags & IFF_NOARP))
+				arp_ifinit(ifp, ifa);
+#endif
+		} else
+			error = ether_ioctl(ifp, command, data);
+		break;
+#endif
+	case SIOCSIFMTU:
+		IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)");
+		mtx_lock(&sc->mtx);
+		if (ifr->ifr_mtu > IXL_MAX_FRAME -
+		    ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+			error = EINVAL;
+			IOCTL_DBG_IF(ifp, "mtu too large");
+		} else {
+			IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", ifp->if_mtu, ifr->ifr_mtu);
+			// ERJ: Interestingly enough, these types don't match
+			ifp->if_mtu = (u_long)ifr->ifr_mtu;
+			vsi->max_frame_size =
+			    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+			    + ETHER_VLAN_ENCAP_LEN;
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				ixlv_init_locked(sc);
+		}
+		mtx_unlock(&sc->mtx);
+		break;
+	case SIOCSIFFLAGS:
+		IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)");
+		mtx_lock(&sc->mtx);
+		if (ifp->if_flags & IFF_UP) {
+			if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+				ixlv_init_locked(sc);
+		} else
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+				ixlv_stop(sc);
+		sc->if_flags = ifp->if_flags;
+		mtx_unlock(&sc->mtx);
+		break;
+	case SIOCADDMULTI:
+		IOCTL_DBG_IF2(ifp, "SIOCADDMULTI");
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			mtx_lock(&sc->mtx);
+			ixlv_disable_intr(vsi);
+			ixlv_add_multi(vsi);
+			ixlv_enable_intr(vsi);
+			mtx_unlock(&sc->mtx);
+		}
+		break;
+	case SIOCDELMULTI:
+		IOCTL_DBG_IF2(ifp, "SIOCDELMULTI");
+		if (sc->init_state == IXLV_RUNNING) {
+			mtx_lock(&sc->mtx);
+			ixlv_disable_intr(vsi);
+			ixlv_del_multi(vsi);
+			ixlv_enable_intr(vsi);
+			mtx_unlock(&sc->mtx);
+		}
+		break;
+	case SIOCSIFMEDIA:
+	case SIOCGIFMEDIA:
+		IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)");
+		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+		break;
+	case SIOCSIFCAP:
+	{
+		int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+		IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)");
+
+		ixlv_cap_txcsum_tso(vsi, ifp, mask);
+
+		if (mask & IFCAP_RXCSUM)
+			ifp->if_capenable ^= IFCAP_RXCSUM;
+		if (mask & IFCAP_RXCSUM_IPV6)
+			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+		if (mask & IFCAP_LRO)
+			ifp->if_capenable ^= IFCAP_LRO;
+		if (mask & IFCAP_VLAN_HWTAGGING)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+		if (mask & IFCAP_VLAN_HWFILTER)
+			ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+		if (mask & IFCAP_VLAN_HWTSO)
+			ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			ixlv_init(vsi);
+		}
+		VLAN_CAPABILITIES(ifp);
+
+		break;
+	}
+
+	default:
+		IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command);
+		error = ether_ioctl(ifp, command, data);
+		break;
+	}
+
+	return (error);
+}
+
+/*
+** To do a reinit on the VF is unfortunately more complicated
+** than a physical device, we must have the PF more or less
+** completely recreate our memory, so many things that were
+** done only once at attach in traditional drivers now must be
+** redone at each reinitialization. This function does that
+** 'prelude' so we can then call the normal locked init code.
+*/
+int
+ixlv_reinit_locked(struct ixlv_sc *sc)
+{
+	struct i40e_hw		*hw = &sc->hw;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ifnet		*ifp = vsi->ifp;
+	struct ixlv_mac_filter  *mf, *mf_temp;
+	struct ixlv_vlan_filter	*vf;
+	int			error = 0;
+
+	INIT_DBG_IF(ifp, "begin");
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+		ixlv_stop(sc);
+
+	error = ixlv_reset(sc);
+
+	INIT_DBG_IF(ifp, "VF was reset");
+
+	/* set the state in case we went thru RESET */
+	sc->init_state = IXLV_RUNNING;
+
+	/*
+	** Resetting the VF drops all filters from hardware;
+	** we need to mark them to be re-added in init.
+	*/
+	SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) {
+		if (mf->flags & IXL_FILTER_DEL) {
+			SLIST_REMOVE(sc->mac_filters, mf,
+			    ixlv_mac_filter, next);
+			free(mf, M_DEVBUF);
+		} else
+			mf->flags |= IXL_FILTER_ADD;
+	}
+	if (vsi->num_vlans != 0)
+		SLIST_FOREACH(vf, sc->vlan_filters, next)
+			vf->flags = IXL_FILTER_ADD;
+	else { /* clean any stale filters */
+		while (!SLIST_EMPTY(sc->vlan_filters)) {
+			vf = SLIST_FIRST(sc->vlan_filters);
+			SLIST_REMOVE_HEAD(sc->vlan_filters, next);
+			free(vf, M_DEVBUF);
+		}
+	}
+
+	ixlv_enable_adminq_irq(hw);
+	ixl_vc_flush(&sc->vc_mgr);
+
+	INIT_DBG_IF(ifp, "end");
+	return (error);
+}
+
+static void
+ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
+	enum i40e_status_code code)
+{
+	struct ixlv_sc *sc;
+
+	sc = arg;
+
+	/*
+	 * Ignore "Adapter Stopped" message as that happens if an ifconfig down
+	 * happens while a command is in progress, so we don't print an error
+	 * in that case.
+	 */
+	if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
+		if_printf(sc->vsi.ifp,
+		    "Error %d waiting for PF to complete operation %d\n",
+		    code, cmd->request);
+	}
+}
+
+static void
+ixlv_init_locked(struct ixlv_sc *sc)
+{
+	struct i40e_hw		*hw = &sc->hw;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	struct ifnet		*ifp = vsi->ifp;
+	int			 error = 0;
+
+	INIT_DBG_IF(ifp, "begin");
+
+	IXLV_CORE_LOCK_ASSERT(sc);
+
+	/* Do a reinit first if an init has already been done */
+	if ((sc->init_state == IXLV_RUNNING) ||
+	    (sc->init_state == IXLV_RESET_REQUIRED) ||
+	    (sc->init_state == IXLV_RESET_PENDING))
+		error = ixlv_reinit_locked(sc);
+	/* Don't bother with init if we failed reinit */
+	if (error)
+		goto init_done;
+
+	/* Remove existing MAC filter if new MAC addr is set */
+	if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) {
+		error = ixlv_del_mac_filter(sc, hw->mac.addr);
+		if (error == 0)
+			ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, 
+			    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+			    sc);
+	}
+
+	/* Check for an LAA mac address... */
+	bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN);
+
+	ifp->if_hwassist = 0;
+	if (ifp->if_capenable & IFCAP_TSO)
+		ifp->if_hwassist |= CSUM_TSO;
+	if (ifp->if_capenable & IFCAP_TXCSUM)
+		ifp->if_hwassist |= (CSUM_OFFLOAD_IPV4 & ~CSUM_IP);
+	if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+		ifp->if_hwassist |= CSUM_OFFLOAD_IPV6;
+
+	/* Add mac filter for this VF to PF */
+	if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) {
+		error = ixlv_add_mac_filter(sc, hw->mac.addr, 0);
+		if (!error || error == EEXIST)
+			ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd,
+			    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
+			    sc);
+	}
+
+	/* Setup vlan's if needed */
+	ixlv_setup_vlan_filters(sc);
+
+	/* Prepare the queues for operation */
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		struct  rx_ring	*rxr = &que->rxr;
+
+		ixl_init_tx_ring(que);
+
+		if (vsi->max_frame_size <= MCLBYTES)
+			rxr->mbuf_sz = MCLBYTES;
+		else
+			rxr->mbuf_sz = MJUMPAGESIZE;
+		ixl_init_rx_ring(que);
+	}
+
+	/* Configure queues */
+	ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
+	    IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
+
+	/* Set up RSS */
+	ixlv_config_rss(sc);
+
+	/* Map vectors */
+	ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, 
+	    IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc);
+
+	/* Enable queues */
+	ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd,
+	    IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc);
+
+	/* Start the local timer */
+	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
+
+	sc->init_state = IXLV_RUNNING;
+
+init_done:
+	INIT_DBG_IF(ifp, "end");
+	return;
+}
+
+/*
+**  Init entry point for the stack
+*/
+void
+ixlv_init(void *arg)
+{
+	struct ixl_vsi *vsi = (struct ixl_vsi *)arg;
+	struct ixlv_sc *sc = vsi->back;
+	int retries = 0;
+
+	mtx_lock(&sc->mtx);
+	ixlv_init_locked(sc);
+	mtx_unlock(&sc->mtx);
+
+	/* Wait for init_locked to finish */
+	while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
+	    && ++retries < 100) {
+		i40e_msec_delay(10);
+	}
+	if (retries >= IXLV_AQ_MAX_ERR)
+		if_printf(vsi->ifp,
+		    "Init failed to complete in alloted time!\n");
+}
+
+/*
+ * ixlv_attach() helper function; gathers information about
+ * the (virtual) hardware for use elsewhere in the driver.
+ */
+static void
+ixlv_init_hw(struct ixlv_sc *sc)
+{
+	struct i40e_hw *hw = &sc->hw;
+	device_t dev = sc->dev;
+	
+	/* Save off the information about this board */
+	hw->vendor_id = pci_get_vendor(dev);
+	hw->device_id = pci_get_device(dev);
+	hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+	hw->subsystem_vendor_id =
+	    pci_read_config(dev, PCIR_SUBVEND_0, 2);
+	hw->subsystem_device_id =
+	    pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+	hw->bus.device = pci_get_slot(dev);
+	hw->bus.func = pci_get_function(dev);
+}
+
+/*
+ * ixlv_attach() helper function; initalizes the admin queue
+ * and attempts to establish contact with the PF by
+ * retrying the initial "API version" message several times
+ * or until the PF responds.
+ */
+static int
+ixlv_setup_vc(struct ixlv_sc *sc)
+{
+	struct i40e_hw *hw = &sc->hw;
+	device_t dev = sc->dev;
+	int error = 0, ret_error = 0, asq_retries = 0;
+	bool send_api_ver_retried = 0;
+
+	/* Need to set these AQ paramters before initializing AQ */
+	hw->aq.num_arq_entries = IXL_AQ_LEN;
+	hw->aq.num_asq_entries = IXL_AQ_LEN;
+	hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
+	hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+
+	for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
+		/* Initialize admin queue */
+		error = i40e_init_adminq(hw);
+		if (error) {
+			device_printf(dev, "%s: init_adminq failed: %d\n",
+			    __func__, error);
+			ret_error = 1;
+			continue;
+		}
+
+		INIT_DBG_DEV(dev, "Initialized Admin Queue, attempt %d", i+1);
+
+retry_send:
+		/* Send VF's API version */
+		error = ixlv_send_api_ver(sc);
+		if (error) {
+			i40e_shutdown_adminq(hw);
+			ret_error = 2;
+			device_printf(dev, "%s: unable to send api"
+			    " version to PF on attempt %d, error %d\n",
+			    __func__, i+1, error);
+		}
+
+		asq_retries = 0;
+		while (!i40e_asq_done(hw)) {
+			if (++asq_retries > IXLV_AQ_MAX_ERR) {
+				i40e_shutdown_adminq(hw);
+				DDPRINTF(dev, "Admin Queue timeout "
+				    "(waiting for send_api_ver), %d more retries...",
+				    IXLV_AQ_MAX_ERR - (i + 1));
+				ret_error = 3;
+				break;
+			} 
+			i40e_msec_delay(10);
+		}
+		if (asq_retries > IXLV_AQ_MAX_ERR)
+			continue;
+
+		INIT_DBG_DEV(dev, "Sent API version message to PF");
+
+		/* Verify that the VF accepts the PF's API version */
+		error = ixlv_verify_api_ver(sc);
+		if (error == ETIMEDOUT) {
+			if (!send_api_ver_retried) {
+				/* Resend message, one more time */
+				send_api_ver_retried++;
+				device_printf(dev,
+				    "%s: Timeout while verifying API version on first"
+				    " try!\n", __func__);
+				goto retry_send;
+			} else {
+				device_printf(dev,
+				    "%s: Timeout while verifying API version on second"
+				    " try!\n", __func__);
+				ret_error = 4;
+				break;
+			}
+		}
+		if (error) {
+			device_printf(dev,
+			    "%s: Unable to verify API version,"
+			    " error %d\n", __func__, error);
+			ret_error = 5;
+		}
+		break;
+	}
+
+	if (ret_error >= 4)
+		i40e_shutdown_adminq(hw);
+	return (ret_error);
+}
+
+/*
+ * ixlv_attach() helper function; asks the PF for this VF's
+ * configuration, and saves the information if it receives it.
+ */
+static int
+ixlv_vf_config(struct ixlv_sc *sc)
+{
+	struct i40e_hw *hw = &sc->hw;
+	device_t dev = sc->dev;
+	int bufsz, error = 0, ret_error = 0;
+	int asq_retries, retried = 0;
+
+retry_config:
+	error = ixlv_send_vf_config_msg(sc);
+	if (error) {
+		device_printf(dev,
+		    "%s: Unable to send VF config request, attempt %d,"
+		    " error %d\n", __func__, retried + 1, error);
+		ret_error = 2;
+	}
+
+	asq_retries = 0;
+	while (!i40e_asq_done(hw)) {
+		if (++asq_retries > IXLV_AQ_MAX_ERR) {
+			device_printf(dev, "%s: Admin Queue timeout "
+			    "(waiting for send_vf_config_msg), attempt %d\n",
+			    __func__, retried + 1);
+			ret_error = 3;
+			goto fail;
+		}
+		i40e_msec_delay(10);
+	}
+
+	INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
+	    retried + 1);
+
+	if (!sc->vf_res) {
+		bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
+		    (I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
+		sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT);
+		if (!sc->vf_res) {
+			device_printf(dev,
+			    "%s: Unable to allocate memory for VF configuration"
+			    " message from PF on attempt %d\n", __func__, retried + 1);
+			ret_error = 1;
+			goto fail;
+		}
+	}
+
+	/* Check for VF config response */
+	error = ixlv_get_vf_config(sc);
+	if (error == ETIMEDOUT) {
+		/* The 1st time we timeout, send the configuration message again */
+		if (!retried) {
+			retried++;
+			goto retry_config;
+		}
+	}
+	if (error) {
+		device_printf(dev,
+		    "%s: Unable to get VF configuration from PF after %d tries!\n",
+		    __func__, retried + 1);
+		ret_error = 4;
+	}
+	goto done;
+
+fail:
+	free(sc->vf_res, M_DEVBUF);
+done:
+	return (ret_error);
+}
+
+/*
+ * Allocate MSI/X vectors, setup the AQ vector early
+ */
+static int
+ixlv_init_msix(struct ixlv_sc *sc)
+{
+	device_t dev = sc->dev;
+	int rid, want, vectors, queues, available;
+
+	rid = PCIR_BAR(IXL_BAR);
+	sc->msix_mem = bus_alloc_resource_any(dev,
+	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       	if (!sc->msix_mem) {
+		/* May not be enabled */
+		device_printf(sc->dev,
+		    "Unable to map MSIX table \n");
+		goto fail;
+	}
+
+	available = pci_msix_count(dev); 
+	if (available == 0) { /* system has msix disabled */
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    rid, sc->msix_mem);
+		sc->msix_mem = NULL;
+		goto fail;
+	}
+
+	/* Figure out a reasonable auto config value */
+	queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+
+	/* Override with hardcoded value if sane */
+	if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues)) 
+		queues = ixlv_max_queues;
+#ifdef  RSS
+	/* If we're doing RSS, clamp at the number of RSS buckets */
+	if (queues > rss_getnumbuckets())
+		queues = rss_getnumbuckets();
+#endif
+	/* Enforce the VF max value */
+	if (queues > IXLV_MAX_QUEUES)
+		queues = IXLV_MAX_QUEUES;
+
+	/*
+	** Want one vector (RX/TX pair) per queue
+	** plus an additional for the admin queue.
+	*/
+	want = queues + 1;
+	if (want <= available)	/* Have enough */
+		vectors = want;
+	else {
+		device_printf(sc->dev,
+		    "MSIX Configuration Problem, "
+		    "%d vectors available but %d wanted!\n",
+		    available, want);
+		goto fail;
+	}
+
+#ifdef RSS
+	/*
+	* If we're doing RSS, the number of queues needs to
+	* match the number of RSS buckets that are configured.
+	*
+	* + If there's more queues than RSS buckets, we'll end
+	*   up with queues that get no traffic.
+	*
+	* + If there's more RSS buckets than queues, we'll end
+	*   up having multiple RSS buckets map to the same queue,
+	*   so there'll be some contention.
+	*/
+	if (queues != rss_getnumbuckets()) {
+		device_printf(dev,
+		    "%s: queues (%d) != RSS buckets (%d)"
+		    "; performance will be impacted.\n",
+		     __func__, queues, rss_getnumbuckets());
+	}
+#endif
+
+	if (pci_alloc_msix(dev, &vectors) == 0) {
+		device_printf(sc->dev,
+		    "Using MSIX interrupts with %d vectors\n", vectors);
+		sc->msix = vectors;
+		sc->vsi.num_queues = queues;
+	}
+
+	/*
+	** Explicitly set the guest PCI BUSMASTER capability
+	** and we must rewrite the ENABLE in the MSIX control
+	** register again at this point to cause the host to
+	** successfully initialize us.
+	*/
+	{
+		u16 pci_cmd_word;
+		int msix_ctrl;
+		pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+		pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+		pci_find_cap(dev, PCIY_MSIX, &rid);
+		rid += PCIR_MSIX_CTRL;
+		msix_ctrl = pci_read_config(dev, rid, 2);
+		msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+		pci_write_config(dev, rid, msix_ctrl, 2);
+	}
+
+	/* Next we need to setup the vector for the Admin Queue */
+	rid = 1;	// zero vector + 1
+	sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+	    &rid, RF_SHAREABLE | RF_ACTIVE);
+	if (sc->res == NULL) {
+		device_printf(dev,"Unable to allocate"
+		    " bus resource: AQ interrupt \n");
+		goto fail;
+	}
+	if (bus_setup_intr(dev, sc->res,
+	    INTR_TYPE_NET | INTR_MPSAFE, NULL,
+	    ixlv_msix_adminq, sc, &sc->tag)) {
+		sc->res = NULL;
+		device_printf(dev, "Failed to register AQ handler");
+		goto fail;
+	}
+	bus_describe_intr(dev, sc->res, sc->tag, "adminq");
+
+	return (vectors);
+
+fail:
+	/* The VF driver MUST use MSIX */
+	return (0);
+}
+
+static int
+ixlv_allocate_pci_resources(struct ixlv_sc *sc)
+{
+	int             rid;
+	device_t        dev = sc->dev;
+
+	rid = PCIR_BAR(0);
+	sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+	    &rid, RF_ACTIVE);
+
+	if (!(sc->pci_mem)) {
+		device_printf(dev,"Unable to allocate bus resource: memory\n");
+		return (ENXIO);
+	}
+
+	sc->osdep.mem_bus_space_tag =
+		rman_get_bustag(sc->pci_mem);
+	sc->osdep.mem_bus_space_handle =
+		rman_get_bushandle(sc->pci_mem);
+	sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem);
+	sc->osdep.flush_reg = I40E_VFGEN_RSTAT;
+	sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle;
+
+	sc->hw.back = &sc->osdep;
+
+	/* Disable adminq interrupts */
+	ixlv_disable_adminq_irq(&sc->hw);
+
+	/*
+	** Now setup MSI/X, it will return
+	** us the number of supported vectors
+	*/
+	sc->msix = ixlv_init_msix(sc);
+
+	/* We fail without MSIX support */
+	if (sc->msix == 0)
+		return (ENXIO);
+
+	return (0);
+}
+
+static void
+ixlv_free_pci_resources(struct ixlv_sc *sc)
+{
+	struct ixl_vsi         *vsi = &sc->vsi;
+	struct ixl_queue       *que = vsi->queues;
+	device_t                dev = sc->dev;
+
+	/* We may get here before stations are setup */
+	if (que == NULL)
+		goto early;
+
+	/*
+	**  Release all msix queue resources:
+	*/
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		int rid = que->msix + 1;
+		if (que->tag != NULL) {
+			bus_teardown_intr(dev, que->res, que->tag);
+			que->tag = NULL;
+		}
+		if (que->res != NULL)
+			bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+	}
+        
+early:
+	/* Clean the AdminQ interrupt */
+	if (sc->tag != NULL) {
+		bus_teardown_intr(dev, sc->res, sc->tag);
+		sc->tag = NULL;
+	}
+	if (sc->res != NULL)
+		bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
+
+	pci_release_msi(dev);
+
+	if (sc->msix_mem != NULL)
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    PCIR_BAR(IXL_BAR), sc->msix_mem);
+
+	if (sc->pci_mem != NULL)
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    PCIR_BAR(0), sc->pci_mem);
+
+	return;
+}
+
+/*
+ * Create taskqueue and tasklet for Admin Queue interrupts.
+ */
+static int
+ixlv_init_taskqueue(struct ixlv_sc *sc)
+{
+	int error = 0;
+
+	TASK_INIT(&sc->aq_irq, 0, ixlv_do_adminq, sc);
+
+	sc->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+	    taskqueue_thread_enqueue, &sc->tq);
+	taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s sc->tq",
+	    device_get_nameunit(sc->dev));
+
+	return (error);
+}
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers for the VSI queues
+ *
+ **********************************************************************/
+static int
+ixlv_assign_msix(struct ixlv_sc *sc)
+{
+	device_t	dev = sc->dev;
+	struct 		ixl_vsi *vsi = &sc->vsi;
+	struct 		ixl_queue *que = vsi->queues;
+	struct		tx_ring	 *txr;
+	int 		error, rid, vector = 1;
+
+	for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+		int cpu_id = i;
+		rid = vector + 1;
+		txr = &que->txr;
+		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+		    RF_SHAREABLE | RF_ACTIVE);
+		if (que->res == NULL) {
+			device_printf(dev,"Unable to allocate"
+		    	    " bus resource: que interrupt [%d]\n", vector);
+			return (ENXIO);
+		}
+		/* Set the handler function */
+		error = bus_setup_intr(dev, que->res,
+		    INTR_TYPE_NET | INTR_MPSAFE, NULL,
+		    ixlv_msix_que, que, &que->tag);
+		if (error) {
+			que->res = NULL;
+			device_printf(dev, "Failed to register que handler");
+			return (error);
+		}
+		bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+		/* Bind the vector to a CPU */
+#ifdef RSS
+		cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+		bus_bind_intr(dev, que->res, cpu_id);
+		que->msix = vector;
+		vsi->que_mask |= (u64)(1 << que->msix);
+		TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+		TASK_INIT(&que->task, 0, ixlv_handle_que, que);
+		que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
+		    taskqueue_thread_enqueue, &que->tq);
+#ifdef RSS
+		taskqueue_start_threads_pinned(&que->tq, 1, PI_NET,
+		    cpu_id, "%s (bucket %d)",
+		    device_get_nameunit(dev), cpu_id);
+#else
+                taskqueue_start_threads(&que->tq, 1, PI_NET,
+                    "%s que", device_get_nameunit(dev));
+#endif
+
+	}
+
+	return (0);
+}
+
+/*
+** Requests a VF reset from the PF.
+**
+** Requires the VF's Admin Queue to be initialized.
+*/
+static int
+ixlv_reset(struct ixlv_sc *sc)
+{
+	struct i40e_hw	*hw = &sc->hw;
+	device_t	dev = sc->dev;
+	int		error = 0;
+
+	/* Ask the PF to reset us if we are initiating */
+	if (sc->init_state != IXLV_RESET_PENDING)
+		ixlv_request_reset(sc);
+
+	i40e_msec_delay(100);
+	error = ixlv_reset_complete(hw);
+	if (error) {
+		device_printf(dev, "%s: VF reset failed\n",
+		    __func__);
+		return (error);
+	}
+
+	error = i40e_shutdown_adminq(hw);
+	if (error) {
+		device_printf(dev, "%s: shutdown_adminq failed: %d\n",
+		    __func__, error);
+		return (error);
+	}
+
+	error = i40e_init_adminq(hw);
+	if (error) {
+		device_printf(dev, "%s: init_adminq failed: %d\n",
+		    __func__, error);
+		return(error);
+	}
+
+	return (0);
+}
+
+static int
+ixlv_reset_complete(struct i40e_hw *hw)
+{
+	u32 reg;
+
+	for (int i = 0; i < 100; i++) {
+		reg = rd32(hw, I40E_VFGEN_RSTAT) &
+		    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+
+                if ((reg == I40E_VFR_VFACTIVE) ||
+		    (reg == I40E_VFR_COMPLETED))
+			return (0);
+		i40e_msec_delay(100);
+	}
+
+	return (EBUSY);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
+{
+	struct ifnet		*ifp;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ixl_queue	*que = vsi->queues;
+
+	INIT_DBG_DEV(dev, "begin");
+
+	ifp = vsi->ifp = if_alloc(IFT_ETHER);
+	if (ifp == NULL) {
+		device_printf(dev, "%s: could not allocate ifnet"
+		    " structure!\n", __func__);
+		return (-1);
+	}
+
+	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+
+	ifp->if_mtu = ETHERMTU;
+	ifp->if_baudrate = 4000000000;  // ??
+	ifp->if_init = ixlv_init;
+	ifp->if_softc = vsi;
+	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+	ifp->if_ioctl = ixlv_ioctl;
+
+#if __FreeBSD_version >= 1100000
+	if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
+	ifp->if_transmit = ixl_mq_start;
+
+	ifp->if_qflush = ixl_qflush;
+	ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+	ether_ifattach(ifp, sc->hw.mac.addr);
+
+	vsi->max_frame_size =
+	    ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+	    + ETHER_VLAN_ENCAP_LEN;
+
+	/*
+	 * Tell the upper layer(s) we support long frames.
+	 */
+	ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+	ifp->if_capabilities |= IFCAP_HWCSUM;
+	ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+	ifp->if_capabilities |= IFCAP_TSO;
+	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+
+	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+			     |  IFCAP_VLAN_HWTSO
+			     |  IFCAP_VLAN_MTU
+			     |  IFCAP_VLAN_HWCSUM
+			     |  IFCAP_LRO;
+	ifp->if_capenable = ifp->if_capabilities;
+
+	/*
+	** Don't turn this on by default, if vlans are
+	** created on another pseudo device (eg. lagg)
+	** then vlan events are not passed thru, breaking
+	** operation, but with HW FILTER off it works. If
+	** using vlans directly on the ixl driver you can
+	** enable this and get full hardware tag filtering.
+	*/
+	ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+	/*
+	 * Specify the media types supported by this adapter and register
+	 * callbacks to update media and link information
+	 */
+	ifmedia_init(&sc->media, IFM_IMASK, ixlv_media_change,
+		     ixlv_media_status);
+
+	// JFV Add media types later?
+
+	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+	INIT_DBG_DEV(dev, "end");
+	return (0);
+}
+
+/*
+** Allocate and setup the interface queues
+*/
+static int
+ixlv_setup_queues(struct ixlv_sc *sc)
+{
+	device_t		dev = sc->dev;
+	struct ixl_vsi		*vsi;
+	struct ixl_queue	*que;
+	struct tx_ring		*txr;
+	struct rx_ring		*rxr;
+	int 			rsize, tsize;
+	int			error = I40E_SUCCESS;
+
+	vsi = &sc->vsi;
+	vsi->back = (void *)sc;
+	vsi->hw = &sc->hw;
+	vsi->num_vlans = 0;
+
+	/* Get memory for the station queues */
+	if (!(vsi->queues =
+		(struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+		vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+			device_printf(dev, "Unable to allocate queue memory\n");
+			error = ENOMEM;
+			goto early;
+	}
+
+	for (int i = 0; i < vsi->num_queues; i++) {
+		que = &vsi->queues[i];
+		que->num_desc = ixlv_ringsz;
+		que->me = i;
+		que->vsi = vsi;
+		/* mark the queue as active */
+		vsi->active_queues |= (u64)1 << que->me;
+
+		txr = &que->txr;
+		txr->que = que;
+		txr->tail = I40E_QTX_TAIL1(que->me);
+		/* Initialize the TX lock */
+		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+		    device_get_nameunit(dev), que->me);
+		mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+		/*
+		** Create the TX descriptor ring, the extra int is
+		** added as the location for HEAD WB.
+		*/
+		tsize = roundup2((que->num_desc *
+		    sizeof(struct i40e_tx_desc)) +
+		    sizeof(u32), DBA_ALIGN);
+		if (i40e_allocate_dma_mem(&sc->hw,
+		    &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+			device_printf(dev,
+			    "Unable to allocate TX Descriptor memory\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		txr->base = (struct i40e_tx_desc *)txr->dma.va;
+		bzero((void *)txr->base, tsize);
+		/* Now allocate transmit soft structs for the ring */
+		if (ixl_allocate_tx_data(que)) {
+			device_printf(dev,
+			    "Critical Failure setting up TX structures\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		/* Allocate a buf ring */
+		txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF,
+		    M_WAITOK, &txr->mtx);
+		if (txr->br == NULL) {
+			device_printf(dev,
+			    "Critical Failure setting up TX buf ring\n");
+			error = ENOMEM;
+			goto fail;
+		}
+
+		/*
+		 * Next the RX queues...
+		 */ 
+		rsize = roundup2(que->num_desc *
+		    sizeof(union i40e_rx_desc), DBA_ALIGN);
+		rxr = &que->rxr;
+		rxr->que = que;
+		rxr->tail = I40E_QRX_TAIL1(que->me);
+
+		/* Initialize the RX side lock */
+		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+		    device_get_nameunit(dev), que->me);
+		mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+		if (i40e_allocate_dma_mem(&sc->hw,
+		    &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA?
+			device_printf(dev,
+			    "Unable to allocate RX Descriptor memory\n");
+			error = ENOMEM;
+			goto fail;
+		}
+		rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+		bzero((void *)rxr->base, rsize);
+
+		/* Allocate receive soft structs for the ring*/
+		if (ixl_allocate_rx_data(que)) {
+			device_printf(dev,
+			    "Critical Failure setting up receive structs\n");
+			error = ENOMEM;
+			goto fail;
+		}
+	}
+
+	return (0);
+
+fail:
+	for (int i = 0; i < vsi->num_queues; i++) {
+		que = &vsi->queues[i];
+		rxr = &que->rxr;
+		txr = &que->txr;
+		if (rxr->base)
+			i40e_free_dma_mem(&sc->hw, &rxr->dma);
+		if (txr->base)
+			i40e_free_dma_mem(&sc->hw, &txr->dma);
+	}
+	free(vsi->queues, M_DEVBUF);
+
+early:
+	return (error);
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+	struct ixl_vsi		*vsi = arg;
+	struct ixlv_sc		*sc = vsi->back;
+	struct ixlv_vlan_filter	*v;
+
+
+	if (ifp->if_softc != arg)   /* Not our event */
+		return;
+
+	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
+		return;
+
+	/* Sanity check - make sure it doesn't already exist */
+	SLIST_FOREACH(v, sc->vlan_filters, next) {
+		if (v->vlan == vtag)
+			return;
+	}
+
+	mtx_lock(&sc->mtx);
+	++vsi->num_vlans;
+	v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO);
+	SLIST_INSERT_HEAD(sc->vlan_filters, v, next);
+	v->vlan = vtag;
+	v->flags = IXL_FILTER_ADD;
+	ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
+	    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
+	mtx_unlock(&sc->mtx);
+	return;
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+	struct ixl_vsi		*vsi = arg;
+	struct ixlv_sc		*sc = vsi->back;
+	struct ixlv_vlan_filter	*v;
+	int			i = 0;
+	
+	if (ifp->if_softc != arg)
+		return;
+
+	if ((vtag == 0) || (vtag > 4095))	/* Invalid */
+		return;
+
+	mtx_lock(&sc->mtx);
+	SLIST_FOREACH(v, sc->vlan_filters, next) {
+		if (v->vlan == vtag) {
+			v->flags = IXL_FILTER_DEL;
+			++i;
+			--vsi->num_vlans;
+		}
+	}
+	if (i)
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd,
+		    IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc);
+	mtx_unlock(&sc->mtx);
+	return;
+}
+
+/*
+** Get a new filter and add it to the mac filter list.
+*/
+static struct ixlv_mac_filter *
+ixlv_get_mac_filter(struct ixlv_sc *sc)
+{
+	struct ixlv_mac_filter	*f;
+
+	f = malloc(sizeof(struct ixlv_mac_filter),
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (f)
+		SLIST_INSERT_HEAD(sc->mac_filters, f, next);
+
+	return (f);
+}
+
+/*
+** Find the filter with matching MAC address
+*/
+static struct ixlv_mac_filter *
+ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
+{
+	struct ixlv_mac_filter	*f;
+	bool				match = FALSE;
+
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if (cmp_etheraddr(f->macaddr, macaddr)) {
+			match = TRUE;
+			break;
+		}
+	}	
+
+	if (!match)
+		f = NULL;
+	return (f);
+}
+
+/*
+** Admin Queue interrupt handler
+*/
+static void
+ixlv_msix_adminq(void *arg)
+{
+	struct ixlv_sc	*sc = arg;
+	struct i40e_hw	*hw = &sc->hw;
+	u32		reg, mask;
+
+        reg = rd32(hw, I40E_VFINT_ICR01);
+        mask = rd32(hw, I40E_VFINT_ICR0_ENA1);
+
+        reg = rd32(hw, I40E_VFINT_DYN_CTL01);
+        reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK;
+        wr32(hw, I40E_VFINT_DYN_CTL01, reg);
+
+	/* schedule task */
+	taskqueue_enqueue(sc->tq, &sc->aq_irq);
+	return;
+}
+
+void
+ixlv_enable_intr(struct ixl_vsi *vsi)
+{
+	struct i40e_hw		*hw = vsi->hw;
+	struct ixl_queue	*que = vsi->queues;
+
+	ixlv_enable_adminq_irq(hw);
+	for (int i = 0; i < vsi->num_queues; i++, que++)
+		ixlv_enable_queue_irq(hw, que->me);
+}
+
+void
+ixlv_disable_intr(struct ixl_vsi *vsi)
+{
+        struct i40e_hw          *hw = vsi->hw;
+        struct ixl_queue       *que = vsi->queues;
+
+	ixlv_disable_adminq_irq(hw);
+	for (int i = 0; i < vsi->num_queues; i++, que++)
+		ixlv_disable_queue_irq(hw, que->me);
+}
+
+
+static void
+ixlv_disable_adminq_irq(struct i40e_hw *hw)
+{
+	wr32(hw, I40E_VFINT_DYN_CTL01, 0);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, 0);
+	/* flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+	return;
+}
+
+static void
+ixlv_enable_adminq_irq(struct i40e_hw *hw)
+{
+	wr32(hw, I40E_VFINT_DYN_CTL01,
+	    I40E_VFINT_DYN_CTL01_INTENA_MASK |
+	    I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
+	wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK);
+	/* flush */
+	rd32(hw, I40E_VFGEN_RSTAT);
+	return;
+}
+
+static void
+ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
+{
+	u32		reg;
+
+	reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+	    I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK; 
+	wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
+}
+
+static void
+ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
+{
+	wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
+	rd32(hw, I40E_VFGEN_RSTAT);
+	return;
+}
+
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+static void
+ixlv_set_queue_rx_itr(struct ixl_queue *que)
+{
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct rx_ring	*rxr = &que->rxr;
+	u16		rx_itr;
+	u16		rx_latency = 0;
+	int		rx_bytes;
+
+
+	/* Idle, do nothing */
+	if (rxr->bytes == 0)
+		return;
+
+	if (ixlv_dynamic_rx_itr) {
+		rx_bytes = rxr->bytes/rxr->itr;
+		rx_itr = rxr->itr;
+
+		/* Adjust latency range */
+		switch (rxr->latency) {
+		case IXL_LOW_LATENCY:
+			if (rx_bytes > 10) {
+				rx_latency = IXL_AVE_LATENCY;
+				rx_itr = IXL_ITR_20K;
+			}
+			break;
+		case IXL_AVE_LATENCY:
+			if (rx_bytes > 20) {
+				rx_latency = IXL_BULK_LATENCY;
+				rx_itr = IXL_ITR_8K;
+			} else if (rx_bytes <= 10) {
+				rx_latency = IXL_LOW_LATENCY;
+				rx_itr = IXL_ITR_100K;
+			}
+			break;
+		case IXL_BULK_LATENCY:
+			if (rx_bytes <= 20) {
+				rx_latency = IXL_AVE_LATENCY;
+				rx_itr = IXL_ITR_20K;
+			}
+			break;
+       		 }
+
+		rxr->latency = rx_latency;
+
+		if (rx_itr != rxr->itr) {
+			/* do an exponential smoothing */
+			rx_itr = (10 * rx_itr * rxr->itr) /
+			    ((9 * rx_itr) + rxr->itr);
+			rxr->itr = rx_itr & IXL_MAX_ITR;
+			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
+			    que->me), rxr->itr);
+		}
+	} else { /* We may have have toggled to non-dynamic */
+		if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+			vsi->rx_itr_setting = ixlv_rx_itr;
+		/* Update the hardware if needed */
+		if (rxr->itr != vsi->rx_itr_setting) {
+			rxr->itr = vsi->rx_itr_setting;
+			wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR,
+			    que->me), rxr->itr);
+		}
+	}
+	rxr->bytes = 0;
+	rxr->packets = 0;
+	return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+static void
+ixlv_set_queue_tx_itr(struct ixl_queue *que)
+{
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct tx_ring	*txr = &que->txr;
+	u16		tx_itr;
+	u16		tx_latency = 0;
+	int		tx_bytes;
+
+
+	/* Idle, do nothing */
+	if (txr->bytes == 0)
+		return;
+
+	if (ixlv_dynamic_tx_itr) {
+		tx_bytes = txr->bytes/txr->itr;
+		tx_itr = txr->itr;
+
+		switch (txr->latency) {
+		case IXL_LOW_LATENCY:
+			if (tx_bytes > 10) {
+				tx_latency = IXL_AVE_LATENCY;
+				tx_itr = IXL_ITR_20K;
+			}
+			break;
+		case IXL_AVE_LATENCY:
+			if (tx_bytes > 20) {
+				tx_latency = IXL_BULK_LATENCY;
+				tx_itr = IXL_ITR_8K;
+			} else if (tx_bytes <= 10) {
+				tx_latency = IXL_LOW_LATENCY;
+				tx_itr = IXL_ITR_100K;
+			}
+			break;
+		case IXL_BULK_LATENCY:
+			if (tx_bytes <= 20) {
+				tx_latency = IXL_AVE_LATENCY;
+				tx_itr = IXL_ITR_20K;
+			}
+			break;
+		}
+
+		txr->latency = tx_latency;
+
+		if (tx_itr != txr->itr) {
+       	         /* do an exponential smoothing */
+			tx_itr = (10 * tx_itr * txr->itr) /
+			    ((9 * tx_itr) + txr->itr);
+			txr->itr = tx_itr & IXL_MAX_ITR;
+			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
+			    que->me), txr->itr);
+		}
+
+	} else { /* We may have have toggled to non-dynamic */
+		if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+			vsi->tx_itr_setting = ixlv_tx_itr;
+		/* Update the hardware if needed */
+		if (txr->itr != vsi->tx_itr_setting) {
+			txr->itr = vsi->tx_itr_setting;
+			wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR,
+			    que->me), txr->itr);
+		}
+	}
+	txr->bytes = 0;
+	txr->packets = 0;
+	return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+static void
+ixlv_handle_que(void *context, int pending)
+{
+	struct ixl_queue *que = context;
+	struct ixl_vsi *vsi = que->vsi;
+	struct i40e_hw  *hw = vsi->hw;
+	struct tx_ring  *txr = &que->txr;
+	struct ifnet    *ifp = vsi->ifp;
+	bool		more;
+
+	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+		more = ixl_rxeof(que, IXL_RX_LIMIT);
+		mtx_lock(&txr->mtx);
+		ixl_txeof(que);
+		if (!drbr_empty(ifp, txr->br))
+			ixl_mq_start_locked(ifp, txr);
+		mtx_unlock(&txr->mtx);
+		if (more) {
+			taskqueue_enqueue(que->tq, &que->task);
+			return;
+		}
+	}
+
+	/* Reenable this interrupt - hmmm */
+	ixlv_enable_queue_irq(hw, que->me);
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Queue Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+ixlv_msix_que(void *arg)
+{
+	struct ixl_queue	*que = arg;
+	struct ixl_vsi	*vsi = que->vsi;
+	struct i40e_hw	*hw = vsi->hw;
+	struct tx_ring	*txr = &que->txr;
+	bool		more_tx, more_rx;
+
+	/* Spurious interrupts are ignored */
+	if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+		return;
+
+	++que->irqs;
+
+	more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+	mtx_lock(&txr->mtx);
+	more_tx = ixl_txeof(que);
+	/*
+	** Make certain that if the stack 
+	** has anything queued the task gets
+	** scheduled to handle it.
+	*/
+	if (!drbr_empty(vsi->ifp, txr->br))
+		more_tx = 1;
+	mtx_unlock(&txr->mtx);
+
+	ixlv_set_queue_rx_itr(que);
+	ixlv_set_queue_tx_itr(que);
+
+	if (more_tx || more_rx)
+		taskqueue_enqueue(que->tq, &que->task);
+	else
+		ixlv_enable_queue_irq(hw, que->me);
+
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+	struct ixl_vsi		*vsi = ifp->if_softc;
+	struct ixlv_sc	*sc = vsi->back;
+
+	INIT_DBG_IF(ifp, "begin");
+
+	mtx_lock(&sc->mtx);
+
+	ixlv_update_link_status(sc);
+
+	ifmr->ifm_status = IFM_AVALID;
+	ifmr->ifm_active = IFM_ETHER;
+
+	if (!sc->link_up) {
+		mtx_unlock(&sc->mtx);
+		INIT_DBG_IF(ifp, "end: link not up");
+		return;
+	}
+
+	ifmr->ifm_status |= IFM_ACTIVE;
+	/* Hardware is always full-duplex */
+	ifmr->ifm_active |= IFM_FDX;
+	mtx_unlock(&sc->mtx);
+	INIT_DBG_IF(ifp, "end");
+	return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixlv_media_change(struct ifnet * ifp)
+{
+	struct ixl_vsi *vsi = ifp->if_softc;
+	struct ifmedia *ifm = &vsi->media;
+
+	INIT_DBG_IF(ifp, "begin");
+
+	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+		return (EINVAL);
+
+	INIT_DBG_IF(ifp, "end");
+	return (0);
+}
+
+
+/*********************************************************************
+ *  Multicast Initialization
+ *
+ *  This routine is called by init to reset a fresh state.
+ *
+ **********************************************************************/
+
+static void
+ixlv_init_multi(struct ixl_vsi *vsi)
+{
+	struct ixlv_mac_filter *f;
+	struct ixlv_sc	*sc = vsi->back;
+	int			mcnt = 0;
+
+	IOCTL_DBG_IF(vsi->ifp, "begin");
+
+	/* First clear any multicast filters */
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if ((f->flags & IXL_FILTER_USED)
+		    && (f->flags & IXL_FILTER_MC)) {
+			f->flags |= IXL_FILTER_DEL;
+			mcnt++;
+		}
+	}
+	if (mcnt > 0)
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
+		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+		    sc);
+
+	IOCTL_DBG_IF(vsi->ifp, "end");
+}
+
+static void
+ixlv_add_multi(struct ixl_vsi *vsi)
+{
+	struct ifmultiaddr	*ifma;
+	struct ifnet		*ifp = vsi->ifp;
+	struct ixlv_sc	*sc = vsi->back;
+	int			mcnt = 0;
+
+	IOCTL_DBG_IF(ifp, "begin");
+
+	if_maddr_rlock(ifp);
+	/*
+	** Get a count, to decide if we
+	** simply use multicast promiscuous.
+	*/
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		mcnt++;
+	}
+	if_maddr_runlock(ifp);
+
+	// TODO: Remove -- cannot set promiscuous mode in a VF
+	if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+		/* delete all multicast filters */
+		ixlv_init_multi(vsi);
+		sc->promiscuous_flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
+		    IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete,
+		    sc);
+		IOCTL_DEBUGOUT("%s: end: too many filters", __func__);
+		return;
+	}
+
+	mcnt = 0;
+	if_maddr_rlock(ifp);
+	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+		if (ifma->ifma_addr->sa_family != AF_LINK)
+			continue;
+		if (!ixlv_add_mac_filter(sc,
+		    (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+		    IXL_FILTER_MC))
+			mcnt++;
+	}
+	if_maddr_runlock(ifp);
+	/*
+	** Notify AQ task that sw filters need to be
+	** added to hw list
+	*/
+	if (mcnt > 0)
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd,
+		    IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete,
+		    sc);
+
+	IOCTL_DBG_IF(ifp, "end");
+}
+
+static void
+ixlv_del_multi(struct ixl_vsi *vsi)
+{
+	struct ixlv_mac_filter *f;
+	struct ifmultiaddr	*ifma;
+	struct ifnet		*ifp = vsi->ifp;
+	struct ixlv_sc	*sc = vsi->back;
+	int			mcnt = 0;
+	bool		match = FALSE;
+
+	IOCTL_DBG_IF(ifp, "begin");
+
+	/* Search for removed multicast addresses */
+	if_maddr_rlock(ifp);
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if ((f->flags & IXL_FILTER_USED)
+		    && (f->flags & IXL_FILTER_MC)) {
+			/* check if mac address in filter is in sc's list */
+			match = FALSE;
+			TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+				if (ifma->ifma_addr->sa_family != AF_LINK)
+					continue;
+				u8 *mc_addr =
+				    (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+				if (cmp_etheraddr(f->macaddr, mc_addr)) {
+					match = TRUE;
+					break;
+				}
+			}
+			/* if this filter is not in the sc's list, remove it */
+			if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) {
+				f->flags |= IXL_FILTER_DEL;
+				mcnt++;
+				IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT,
+				    MAC_FORMAT_ARGS(f->macaddr));
+			}
+			else if (match == FALSE)
+				IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT,
+				    MAC_FORMAT_ARGS(f->macaddr));
+		}
+	}
+	if_maddr_runlock(ifp);
+
+	if (mcnt > 0)
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd,
+		    IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete,
+		    sc);
+
+	IOCTL_DBG_IF(ifp, "end");
+}
+
+/*********************************************************************
+ *  Timer routine
+ *
+ *  This routine checks for link status,updates statistics,
+ *  and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixlv_local_timer(void *arg)
+{
+	struct ixlv_sc	*sc = arg;
+	struct i40e_hw		*hw = &sc->hw;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	device_t		dev = sc->dev;
+	int			hung = 0;
+	u32			mask, val;
+
+	IXLV_CORE_LOCK_ASSERT(sc);
+
+	/* If Reset is in progress just bail */
+	if (sc->init_state == IXLV_RESET_PENDING)
+		return;
+
+	/* Check for when PF triggers a VF reset */
+	val = rd32(hw, I40E_VFGEN_RSTAT) &
+	    I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+
+	if (val != I40E_VFR_VFACTIVE
+	    && val != I40E_VFR_COMPLETED) {
+		DDPRINTF(dev, "reset in progress! (%d)", val);
+		return;
+	}
+
+	ixlv_request_stats(sc);
+
+	/* clean and process any events */
+	taskqueue_enqueue(sc->tq, &sc->aq_irq);
+
+	/*
+	** Check status on the queues for a hang
+	*/
+	mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
+	    I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
+
+	for (int i = 0; i < vsi->num_queues; i++,que++) {
+		/* Any queues with outstanding work get a sw irq */
+		if (que->busy)
+			wr32(hw, I40E_VFINT_DYN_CTLN1(que->me), mask);
+		/*
+		** Each time txeof runs without cleaning, but there
+		** are uncleaned descriptors it increments busy. If
+		** we get to 5 we declare it hung.
+		*/
+		if (que->busy == IXL_QUEUE_HUNG) {
+			++hung;
+			/* Mark the queue as inactive */
+			vsi->active_queues &= ~((u64)1 << que->me);
+			continue;
+		} else {
+			/* Check if we've come back from hung */
+			if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
+				vsi->active_queues |= ((u64)1 << que->me);
+		}
+		if (que->busy >= IXL_MAX_TX_BUSY) {
+			device_printf(dev,"Warning queue %d "
+			    "appears to be hung!\n", i);
+			que->busy = IXL_QUEUE_HUNG;
+			++hung;
+		}
+	}
+	/* Only reset when all queues show hung */
+	if (hung == vsi->num_queues)
+		goto hung;
+	callout_reset(&sc->timer, hz, ixlv_local_timer, sc);
+	return;
+
+hung:
+	device_printf(dev, "Local Timer: TX HANG DETECTED - Resetting!!\n");
+	sc->init_state = IXLV_RESET_REQUIRED;
+	ixlv_init_locked(sc);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+**	the real check of the hardware only happens with
+**	a link interrupt.
+*/
+void
+ixlv_update_link_status(struct ixlv_sc *sc)
+{
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ifnet		*ifp = vsi->ifp;
+
+	if (sc->link_up){ 
+		if (vsi->link_active == FALSE) {
+			if (bootverbose)
+				if_printf(ifp,"Link is Up, %d Gbps\n",
+				    (sc->link_speed == I40E_LINK_SPEED_40GB) ? 40:10);
+			vsi->link_active = TRUE;
+			if_link_state_change(ifp, LINK_STATE_UP);
+		}
+	} else { /* Link down */
+		if (vsi->link_active == TRUE) {
+			if (bootverbose)
+				if_printf(ifp,"Link is Down\n");
+			if_link_state_change(ifp, LINK_STATE_DOWN);
+			vsi->link_active = FALSE;
+		}
+	}
+
+	return;
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixlv_stop(struct ixlv_sc *sc)
+{
+	struct ifnet *ifp;
+	int start;
+
+	ifp = sc->vsi.ifp;
+	INIT_DBG_IF(ifp, "begin");
+
+	IXLV_CORE_LOCK_ASSERT(sc);
+
+	ixl_vc_flush(&sc->vc_mgr);
+	ixlv_disable_queues(sc);
+
+	start = ticks;
+	while ((ifp->if_drv_flags & IFF_DRV_RUNNING) &&
+	    ((ticks - start) < hz/10))
+		ixlv_do_adminq_locked(sc);
+
+	/* Stop the local timer */
+	callout_stop(&sc->timer);
+
+	INIT_DBG_IF(ifp, "end");
+}
+
+
+/*********************************************************************
+ *
+ *  Free all station queue structs.
+ *
+ **********************************************************************/
+static void
+ixlv_free_queues(struct ixl_vsi *vsi)
+{
+	struct ixlv_sc	*sc = (struct ixlv_sc *)vsi->back;
+	struct ixl_queue	*que = vsi->queues;
+
+	for (int i = 0; i < vsi->num_queues; i++, que++) {
+		struct tx_ring *txr = &que->txr;
+		struct rx_ring *rxr = &que->rxr;
+	
+		if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+			continue;
+		IXL_TX_LOCK(txr);
+		ixl_free_que_tx(que);
+		if (txr->base)
+			i40e_free_dma_mem(&sc->hw, &txr->dma);
+		IXL_TX_UNLOCK(txr);
+		IXL_TX_LOCK_DESTROY(txr);
+
+		if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+			continue;
+		IXL_RX_LOCK(rxr);
+		ixl_free_que_rx(que);
+		if (rxr->base)
+			i40e_free_dma_mem(&sc->hw, &rxr->dma);
+		IXL_RX_UNLOCK(rxr);
+		IXL_RX_LOCK_DESTROY(rxr);
+		
+	}
+	free(vsi->queues, M_DEVBUF);
+}
+
+
+/*
+** ixlv_config_rss - setup RSS 
+**
+** RSS keys and table are cleared on VF reset.
+*/
+static void
+ixlv_config_rss(struct ixlv_sc *sc)
+{
+	struct i40e_hw	*hw = &sc->hw;
+	struct ixl_vsi	*vsi = &sc->vsi;
+	u32		lut = 0;
+	u64		set_hena = 0, hena;
+	int		i, j, que_id;
+#ifdef RSS
+	u32		rss_hash_config;
+	u32		rss_seed[IXL_KEYSZ];
+#else
+	u32		rss_seed[IXL_KEYSZ] = {0x41b01687,
+			    0x183cfd8c, 0xce880440, 0x580cbc3c,
+			    0x35897377, 0x328b25e1, 0x4fa98922,
+			    0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
+#endif
+        
+	/* Don't set up RSS if using a single queue */
+	if (vsi->num_queues == 1) {
+		wr32(hw, I40E_VFQF_HENA(0), 0);
+		wr32(hw, I40E_VFQF_HENA(1), 0);
+		ixl_flush(hw);
+		return;
+	}
+
+#ifdef RSS
+	/* Fetch the configured RSS key */
+	rss_getkey((uint8_t *) &rss_seed);
+#endif
+	/* Fill out hash function seed */
+	for (i = 0; i <= IXL_KEYSZ; i++)
+                wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
+
+	/* Enable PCTYPES for RSS: */
+#ifdef RSS
+	rss_hash_config = rss_gethashconfig();
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+		set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+	if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+        if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+                set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
+	set_hena =
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
+		((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+		((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
+		((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+#endif
+	hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+	    ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+	hena |= set_hena;
+	wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+	wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+
+	/* Populate the LUT with max no. of queues in round robin fashion */
+	for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+                if (j == vsi->num_queues)
+                        j = 0;
+#ifdef RSS
+		/*
+		 * Fetch the RSS bucket id for the given indirection entry.
+		 * Cap it at the number of configured buckets (which is
+		 * num_queues.)
+		 */
+		que_id = rss_get_indirection_to_bucket(i);
+		que_id = que_id % vsi->num_queues;
+#else
+		que_id = j;
+#endif
+                /* lut = 4-byte sliding window of 4 lut entries */
+                lut = (lut << 8) | (que_id & 0xF);
+                /* On i = 3, we have 4 entries in lut; write to the register */
+                if ((i & 3) == 3) {
+                        wr32(hw, I40E_VFQF_HLUT(i), lut);
+			DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
+		}
+        }
+	ixl_flush(hw);
+}
+
+
+/*
+** This routine refreshes vlan filters, called by init
+** it scans the filter table and then updates the AQ
+*/
+static void
+ixlv_setup_vlan_filters(struct ixlv_sc *sc)
+{
+	struct ixl_vsi			*vsi = &sc->vsi;
+	struct ixlv_vlan_filter	*f;
+	int				cnt = 0;
+
+	if (vsi->num_vlans == 0)
+		return;
+	/*
+	** Scan the filter table for vlan entries,
+	** and if found call for the AQ update.
+	*/
+	SLIST_FOREACH(f, sc->vlan_filters, next)
+                if (f->flags & IXL_FILTER_ADD)
+			cnt++;
+	if (cnt > 0)
+		ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd,
+		    IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc);
+}
+
+
+/*
+** This routine adds new MAC filters to the sc's list;
+** these are later added in hardware by sending a virtual
+** channel message.
+*/
+static int
+ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags)
+{
+	struct ixlv_mac_filter	*f;
+
+	/* Does one already exist? */
+	f = ixlv_find_mac_filter(sc, macaddr);
+	if (f != NULL) {
+		IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT,
+		    MAC_FORMAT_ARGS(macaddr));
+		return (EEXIST);
+	}
+
+	/* If not, get a new empty filter */
+	f = ixlv_get_mac_filter(sc);
+	if (f == NULL) {
+		if_printf(sc->vsi.ifp, "%s: no filters available!!\n",
+		    __func__);
+		return (ENOMEM);
+	}
+
+	IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT,
+	    MAC_FORMAT_ARGS(macaddr));
+
+	bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+	f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+	f->flags |= flags;
+	return (0);
+}
+
+/*
+** Marks a MAC filter for deletion.
+*/
+static int
+ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr)
+{
+	struct ixlv_mac_filter	*f;
+
+	f = ixlv_find_mac_filter(sc, macaddr);
+	if (f == NULL)
+		return (ENOENT);
+
+	f->flags |= IXL_FILTER_DEL;
+	return (0);
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+**  - done outside interrupt context since it might sleep
+*/
+static void
+ixlv_do_adminq(void *context, int pending)
+{
+	struct ixlv_sc		*sc = context;
+
+	mtx_lock(&sc->mtx);
+	ixlv_do_adminq_locked(sc);
+	mtx_unlock(&sc->mtx);
+	return;
+}
+
+static void
+ixlv_do_adminq_locked(struct ixlv_sc *sc)
+{
+	struct i40e_hw			*hw = &sc->hw;
+	struct i40e_arq_event_info	event;
+	struct i40e_virtchnl_msg	*v_msg;
+	device_t			dev = sc->dev;
+	u16				result = 0;
+	u32				reg, oldreg;
+	i40e_status			ret;
+
+	IXLV_CORE_LOCK_ASSERT(sc);
+
+	event.buf_len = IXL_AQ_BUF_SZ;
+        event.msg_buf = sc->aq_buffer;
+	v_msg = (struct i40e_virtchnl_msg *)&event.desc;
+
+	do {
+		ret = i40e_clean_arq_element(hw, &event, &result);
+		if (ret)
+			break;
+		ixlv_vc_completion(sc, v_msg->v_opcode,
+		    v_msg->v_retval, event.msg_buf, event.msg_len);
+		if (result != 0)
+			bzero(event.msg_buf, IXL_AQ_BUF_SZ);
+	} while (result);
+
+	/* check for Admin queue errors */
+	oldreg = reg = rd32(hw, hw->aq.arq.len);
+	if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) {
+		device_printf(dev, "ARQ VF Error detected\n");
+		reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK;
+	}
+	if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) {
+		device_printf(dev, "ARQ Overflow Error detected\n");
+		reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK;
+	}
+	if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) {
+		device_printf(dev, "ARQ Critical Error detected\n");
+		reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK;
+	}
+	if (oldreg != reg)
+		wr32(hw, hw->aq.arq.len, reg);
+
+	oldreg = reg = rd32(hw, hw->aq.asq.len);
+	if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) {
+		device_printf(dev, "ASQ VF Error detected\n");
+		reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK;
+	}
+	if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) {
+		device_printf(dev, "ASQ Overflow Error detected\n");
+		reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK;
+	}
+	if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
+		device_printf(dev, "ASQ Critical Error detected\n");
+		reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK;
+	}
+	if (oldreg != reg)
+		wr32(hw, hw->aq.asq.len, reg);
+
+	ixlv_enable_adminq_irq(hw);
+}
+
+static void
+ixlv_add_sysctls(struct ixlv_sc *sc)
+{
+	device_t dev = sc->dev;
+	struct ixl_vsi *vsi = &sc->vsi;
+	struct i40e_eth_stats *es = &vsi->eth_stats;
+
+	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+	struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+	struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+
+	struct sysctl_oid *vsi_node, *queue_node;
+	struct sysctl_oid_list *vsi_list, *queue_list;
+
+#define QUEUE_NAME_LEN 32
+	char queue_namebuf[QUEUE_NAME_LEN];
+
+	struct ixl_queue *queues = vsi->queues;
+	struct tx_ring *txr;
+	struct rx_ring *rxr;
+
+	/* Driver statistics sysctls */
+	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+			CTLFLAG_RD, &sc->watchdog_events,
+			"Watchdog timeouts");
+	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+			CTLFLAG_RD, &sc->admin_irq,
+			"Admin Queue IRQ Handled");
+
+	/* VSI statistics sysctls */
+	vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi",
+				   CTLFLAG_RD, NULL, "VSI-specific statistics");
+	vsi_list = SYSCTL_CHILDREN(vsi_node);
+
+	struct ixl_sysctl_info ctls[] =
+	{
+		{&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+		{&es->rx_unicast, "ucast_pkts_rcvd",
+			"Unicast Packets Received"},
+		{&es->rx_multicast, "mcast_pkts_rcvd",
+			"Multicast Packets Received"},
+		{&es->rx_broadcast, "bcast_pkts_rcvd",
+			"Broadcast Packets Received"},
+		{&es->rx_discards, "rx_discards", "Discarded RX packets"},
+		{&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"},
+		{&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+		{&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+		{&es->tx_multicast, "mcast_pkts_txd",
+			"Multicast Packets Transmitted"},
+		{&es->tx_broadcast, "bcast_pkts_txd",
+			"Broadcast Packets Transmitted"},
+		{&es->tx_errors, "tx_errors", "TX packet errors"},
+		// end
+		{0,0,0}
+	};
+	struct ixl_sysctl_info *entry = ctls;
+	while (entry->stat != 0)
+	{
+		SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name,
+				CTLFLAG_RD, entry->stat,
+				entry->description);
+		entry++;
+	}
+
+	/* Queue sysctls */
+	for (int q = 0; q < vsi->num_queues; q++) {
+		snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+		queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf,
+					     CTLFLAG_RD, NULL, "Queue Name");
+		queue_list = SYSCTL_CHILDREN(queue_node);
+
+		txr = &(queues[q].txr);
+		rxr = &(queues[q].rxr);
+
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+				CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+				"m_defrag() failed");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped",
+				CTLFLAG_RD, &(queues[q].dropped_pkts),
+				"Driver dropped packets");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs",
+				CTLFLAG_RD, &(queues[q].irqs),
+				"irqs on this queue");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+				CTLFLAG_RD, &(queues[q].tso),
+				"TSO");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
+				CTLFLAG_RD, &(queues[q].tx_dma_setup),
+				"Driver tx dma failure in xmit");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+				CTLFLAG_RD, &(txr->no_desc),
+				"Queue No Descriptor Available");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+				CTLFLAG_RD, &(txr->total_packets),
+				"Queue Packets Transmitted");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+				CTLFLAG_RD, &(txr->tx_bytes),
+				"Queue Bytes Transmitted");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+				CTLFLAG_RD, &(rxr->rx_packets),
+				"Queue Packets Received");
+		SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+				CTLFLAG_RD, &(rxr->rx_bytes),
+				"Queue Bytes Received");
+
+		/* Examine queue state */
+		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", 
+				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+				sizeof(struct ixl_queue),
+				ixlv_sysctl_qtx_tail_handler, "IU",
+				"Queue Transmit Descriptor Tail");
+		SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", 
+				CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+				sizeof(struct ixl_queue),
+				ixlv_sysctl_qrx_tail_handler, "IU",
+				"Queue Receive Descriptor Tail");
+	}
+}
+
+static void
+ixlv_init_filters(struct ixlv_sc *sc)
+{
+	sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter),
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	SLIST_INIT(sc->mac_filters);
+	sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter),
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	SLIST_INIT(sc->vlan_filters);
+	return;
+}
+
+static void
+ixlv_free_filters(struct ixlv_sc *sc)
+{
+	struct ixlv_mac_filter *f;
+	struct ixlv_vlan_filter *v;
+
+	while (!SLIST_EMPTY(sc->mac_filters)) {
+		f = SLIST_FIRST(sc->mac_filters);
+		SLIST_REMOVE_HEAD(sc->mac_filters, next);
+		free(f, M_DEVBUF);
+	}
+	while (!SLIST_EMPTY(sc->vlan_filters)) {
+		v = SLIST_FIRST(sc->vlan_filters);
+		SLIST_REMOVE_HEAD(sc->vlan_filters, next);
+		free(v, M_DEVBUF);
+	}
+	return;
+}
+
+/**
+ * ixlv_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL1 value from hardware
+ * for a sysctl.
+ */
+static int 
+ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_queue *que;
+	int error;
+	u32 val;
+
+	que = ((struct ixl_queue *)oidp->oid_arg1);
+	if (!que) return 0;
+
+	val = rd32(que->vsi->hw, que->txr.tail);
+	error = sysctl_handle_int(oidp, &val, 0, req);
+	if (error || !req->newptr)
+		return error;
+	return (0);
+}
+
+/**
+ * ixlv_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL1 value from hardware
+ * for a sysctl.
+ */
+static int 
+ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+	struct ixl_queue *que;
+	int error;
+	u32 val;
+
+	que = ((struct ixl_queue *)oidp->oid_arg1);
+	if (!que) return 0;
+
+	val = rd32(que->vsi->hw, que->rxr.tail);
+	error = sysctl_handle_int(oidp, &val, 0, req);
+	if (error || !req->newptr)
+		return error;
+	return (0);
+}
+


Property changes on: trunk/sys/dev/ixl/if_ixlv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixl.h
===================================================================
--- trunk/sys/dev/ixl/ixl.h	                        (rev 0)
+++ trunk/sys/dev/ixl/ixl.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,655 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixl.h 292097 2015-12-11 12:47:49Z smh $*/
+
+
+#ifndef _IXL_H_
+#define _IXL_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf_ring.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+#include <sys/eventhandler.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+#include <netinet/sctp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+
+#ifdef PCI_IOV
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#endif
+
+#include "i40e_type.h"
+#include "i40e_prototype.h"
+
+#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
+#include <sys/sbuf.h>
+
+#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_FORMAT_ARGS(mac_addr) \
+	(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
+	(mac_addr)[4], (mac_addr)[5]
+#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
+#endif /* IXL_DEBUG || IXL_DEBUG_SYSCTL */
+
+#ifdef IXL_DEBUG
+/* Enable debug sysctls */
+#ifndef IXL_DEBUG_SYSCTL
+#define IXL_DEBUG_SYSCTL 1
+#endif
+
+#define _DBG_PRINTF(S, ...)		printf("%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _DEV_DBG_PRINTF(dev, S, ...)	device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
+#define _IF_DBG_PRINTF(ifp, S, ...)	if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__)
+
+/* Defines for printing generic debug information */
+#define DPRINTF(...)			_DBG_PRINTF(__VA_ARGS__)
+#define DDPRINTF(...)			_DEV_DBG_PRINTF(__VA_ARGS__)
+#define IDPRINTF(...)			_IF_DBG_PRINTF(__VA_ARGS__)
+
+/* Defines for printing specific debug information */
+#define DEBUG_INIT  1
+#define DEBUG_IOCTL 1
+#define DEBUG_HW    1
+
+#define INIT_DEBUGOUT(...)		if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_DEV(...)		if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__)
+#define INIT_DBG_IF(...)		if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define IOCTL_DEBUGOUT(...)		if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__)
+#define IOCTL_DBG_IF2(ifp, S, ...)	if (DEBUG_IOCTL) \
+					    if_printf(ifp, S "\n", ##__VA_ARGS__)
+#define IOCTL_DBG_IF(...)		if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__)
+
+#define HW_DEBUGOUT(...)		if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__)
+
+#else /* no IXL_DEBUG */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define DPRINTF(...)
+#define DDPRINTF(...)
+#define IDPRINTF(...)
+
+#define INIT_DEBUGOUT(...)
+#define INIT_DBG_DEV(...)
+#define INIT_DBG_IF(...)
+#define IOCTL_DEBUGOUT(...)
+#define IOCTL_DBG_IF2(...)
+#define IOCTL_DBG_IF(...)
+#define HW_DEBUGOUT(...)
+#endif /* IXL_DEBUG */
+
+/* Tunables */
+
+/*
+ * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the
+ * number of tx/rx descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more operations. Each descriptor is 16
+ * or 32 bytes (configurable in FVL)
+ */
+#define DEFAULT_RING	1024
+#define PERFORM_RING	2048
+#define MAX_RING	4096
+#define MIN_RING	32
+
+/*
+** Default number of entries in Tx queue buf_ring.
+*/
+#define SMALL_TXBRSZ 4096
+/* This may require mbuf cluster tuning */
+#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
+
+/* Alignment for rings */
+#define DBA_ALIGN	128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP	10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXL_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXL_TX_CLEANUP_THRESHOLD	(que->num_desc / 8)
+#define IXL_TX_OP_THRESHOLD		(que->num_desc / 32)
+
+/* Flow control constants */
+#define IXL_FC_PAUSE		0xFFFF
+#define IXL_FC_HI		0x20000
+#define IXL_FC_LO		0x10000
+
+#define MAX_MULTICAST_ADDR	128
+
+#define IXL_BAR			3
+#define IXL_ADM_LIMIT		2
+#define IXL_TSO_SIZE		65535
+#define IXL_TX_BUF_SZ		((u32) 1514)
+#define IXL_AQ_BUF_SZ		((u32) 4096)
+#define IXL_RX_HDR		128
+/* Controls the length of the Admin Queue */
+#define IXL_AQ_LEN		256
+#define IXL_AQ_LEN_MAX		1024
+#define IXL_AQ_BUFSZ		4096
+#define IXL_RX_LIMIT		512
+#define IXL_RX_ITR		0
+#define IXL_TX_ITR		1
+#define IXL_ITR_NONE		3
+#define IXL_QUEUE_EOL		0x7FF
+#define IXL_MAX_FRAME		0x2600
+#define IXL_MAX_TX_SEGS		8
+#define IXL_MAX_TSO_SEGS	66 
+#define IXL_SPARSE_CHAIN	6
+#define IXL_QUEUE_HUNG		0x80000000
+#define IXL_KEYSZ		10
+
+#define IXL_VF_MAX_BUFFER	0x3F80
+#define IXL_VF_MAX_HDR_BUFFER	0x840
+#define IXL_VF_MAX_FRAME	0x3FFF
+
+/* ERJ: hardware can support ~1.5k filters between all functions */
+#define IXL_MAX_FILTERS	256
+#define IXL_MAX_TX_BUSY	10
+
+#define IXL_NVM_VERSION_LO_SHIFT	0
+#define IXL_NVM_VERSION_LO_MASK		(0xff << IXL_NVM_VERSION_LO_SHIFT)
+#define IXL_NVM_VERSION_HI_SHIFT	12
+#define IXL_NVM_VERSION_HI_MASK		(0xf << IXL_NVM_VERSION_HI_SHIFT)
+
+
+/*
+ * Interrupt Moderation parameters 
+ */
+#define IXL_MAX_ITR		0x07FF
+#define IXL_ITR_100K		0x0005
+#define IXL_ITR_20K		0x0019
+#define IXL_ITR_8K		0x003E
+#define IXL_ITR_4K		0x007A
+#define IXL_ITR_DYNAMIC		0x8000
+#define IXL_LOW_LATENCY		0
+#define IXL_AVE_LATENCY		1
+#define IXL_BULK_LATENCY	2
+
+/* MacVlan Flags */
+#define IXL_FILTER_USED		(u16)(1 << 0)
+#define IXL_FILTER_VLAN		(u16)(1 << 1)
+#define IXL_FILTER_ADD		(u16)(1 << 2)
+#define IXL_FILTER_DEL		(u16)(1 << 3)
+#define IXL_FILTER_MC		(u16)(1 << 4)
+
+/* used in the vlan field of the filter when not a vlan */
+#define IXL_VLAN_ANY		-1
+
+#define CSUM_OFFLOAD_IPV4	(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#define CSUM_OFFLOAD_IPV6	(CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
+#define CSUM_OFFLOAD		(CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
+
+/* Misc flags for ixl_vsi.flags */
+#define IXL_FLAGS_KEEP_TSO4	(1 << 0)
+#define IXL_FLAGS_KEEP_TSO6	(1 << 1)
+
+#define IXL_VF_RESET_TIMEOUT	100
+
+#define IXL_VSI_DATA_PORT	0x01
+
+#define IXLV_MAX_QUEUES		16
+#define IXL_MAX_VSI_QUEUES	(2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1))
+
+#define IXL_RX_CTX_BASE_UNITS	128
+#define IXL_TX_CTX_BASE_UNITS	128
+
+#define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \
+	I40E_VPINT_LNKLSTN(((vector) - 1) + \
+	    (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
+
+#define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \
+	I40E_VFINT_DYN_CTLN(((vector) - 1) + \
+	    (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
+
+#define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS	0xAA
+
+#define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK	0x20
+
+#define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf)	((glb_vf) / 32)
+#define IXL_GLGEN_VFLRSTAT_MASK(glb_vf)	(1 << ((glb_vf) % 32))
+
+#define IXL_MAX_ITR_IDX		3
+
+#define IXL_END_OF_INTR_LNKLST	0x7FF
+
+#define IXL_TX_LOCK(_sc)                mtx_lock(&(_sc)->mtx)
+#define IXL_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->mtx)
+#define IXL_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->mtx)
+#define IXL_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->mtx)
+#define IXL_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->mtx, MA_OWNED)
+
+#define IXL_RX_LOCK(_sc)                mtx_lock(&(_sc)->mtx)
+#define IXL_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->mtx)
+#define IXL_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->mtx)
+
+#if __FreeBSD_version >= 1100036
+#define IXL_SET_IPACKETS(vsi, count)	(vsi)->ipackets = (count)
+#define IXL_SET_IERRORS(vsi, count)	(vsi)->ierrors = (count)
+#define IXL_SET_OPACKETS(vsi, count)	(vsi)->opackets = (count)
+#define IXL_SET_OERRORS(vsi, count)	(vsi)->oerrors = (count)
+#define IXL_SET_COLLISIONS(vsi, count)	/* Do nothing; collisions is always 0. */
+#define IXL_SET_IBYTES(vsi, count)	(vsi)->ibytes = (count)
+#define IXL_SET_OBYTES(vsi, count)	(vsi)->obytes = (count)
+#define IXL_SET_IMCASTS(vsi, count)	(vsi)->imcasts = (count)
+#define IXL_SET_OMCASTS(vsi, count)	(vsi)->omcasts = (count)
+#define IXL_SET_IQDROPS(vsi, count)	(vsi)->iqdrops = (count)
+#define IXL_SET_OQDROPS(vsi, count)	(vsi)->iqdrops = (count)
+#define IXL_SET_NOPROTO(vsi, count)	(vsi)->noproto = (count)
+#else
+#define IXL_SET_IPACKETS(vsi, count)	(vsi)->ifp->if_ipackets = (count)
+#define IXL_SET_IERRORS(vsi, count)	(vsi)->ifp->if_ierrors = (count)
+#define IXL_SET_OPACKETS(vsi, count)	(vsi)->ifp->if_opackets = (count)
+#define IXL_SET_OERRORS(vsi, count)	(vsi)->ifp->if_oerrors = (count)
+#define IXL_SET_COLLISIONS(vsi, count)	(vsi)->ifp->if_collisions = (count)
+#define IXL_SET_IBYTES(vsi, count)	(vsi)->ifp->if_ibytes = (count)
+#define IXL_SET_OBYTES(vsi, count)	(vsi)->ifp->if_obytes = (count)
+#define IXL_SET_IMCASTS(vsi, count)	(vsi)->ifp->if_imcasts = (count)
+#define IXL_SET_OMCASTS(vsi, count)	(vsi)->ifp->if_omcasts = (count)
+#define IXL_SET_IQDROPS(vsi, count)	(vsi)->ifp->if_iqdrops = (count)
+#define IXL_SET_OQDROPS(vsi, odrops)	(vsi)->ifp->if_snd.ifq_drops = (odrops)
+#define IXL_SET_NOPROTO(vsi, count)	(vsi)->noproto = (count)
+#endif
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ * 
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ * 
+ *****************************************************************************
+ */
+typedef struct _ixl_vendor_info_t {
+	unsigned int    vendor_id;
+	unsigned int    device_id;
+	unsigned int    subvendor_id;
+	unsigned int    subdevice_id;
+	unsigned int    index;
+} ixl_vendor_info_t;
+
+
+struct ixl_tx_buf {
+	u32		eop_index;
+	struct mbuf	*m_head;
+	bus_dmamap_t	map;
+	bus_dma_tag_t	tag;
+};
+
+struct ixl_rx_buf {
+	struct mbuf	*m_head;
+	struct mbuf	*m_pack;
+	struct mbuf	*fmp;
+	bus_dmamap_t	hmap;
+	bus_dmamap_t	pmap;
+};
+
+/*
+** This struct has multiple uses, multicast
+** addresses, vlans, and mac filters all use it.
+*/
+struct ixl_mac_filter {
+	SLIST_ENTRY(ixl_mac_filter) next;
+	u8	macaddr[ETHER_ADDR_LEN];
+	s16	vlan;
+	u16	flags;
+};
+
+
+/*
+ * The Transmit ring control struct
+ */
+struct tx_ring {
+        struct ixl_queue	*que;
+	struct mtx		mtx;
+	u32			tail;
+	struct i40e_tx_desc	*base;
+	struct i40e_dma_mem	dma;
+	u16			next_avail;
+	u16			next_to_clean;
+	u16			atr_rate;
+	u16			atr_count;
+	u16			itr;
+	u16			latency;
+	struct ixl_tx_buf	*buffers;
+	volatile u16		avail;
+	u32			cmd;
+	bus_dma_tag_t		tx_tag;
+	bus_dma_tag_t		tso_tag;
+	char			mtx_name[16];
+	struct buf_ring		*br;
+
+	/* Used for Dynamic ITR calculation */
+	u32			packets;
+	u32 			bytes;
+
+	/* Soft Stats */
+	u64			tx_bytes;
+	u64			no_desc;
+	u64			total_packets;
+};
+
+
+/*
+ * The Receive ring control struct
+ */
+struct rx_ring {
+        struct ixl_queue	*que;
+	struct mtx		mtx;
+	union i40e_rx_desc	*base;
+	struct i40e_dma_mem	dma;
+	struct lro_ctrl		lro;
+	bool			lro_enabled;
+	bool			hdr_split;
+	bool			discard;
+        u16			next_refresh;
+        u16 			next_check;
+	u16			itr;
+	u16			latency;
+	char			mtx_name[16];
+	struct ixl_rx_buf	*buffers;
+	u32			mbuf_sz;
+	u32			tail;
+	bus_dma_tag_t		htag;
+	bus_dma_tag_t		ptag;
+
+	/* Used for Dynamic ITR calculation */
+	u32			packets;
+	u32 			bytes;
+
+	/* Soft stats */
+	u64			split;
+	u64			rx_packets;
+	u64 			rx_bytes;
+	u64 			discarded;
+	u64 			not_done;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring pair.
+*/
+struct ixl_queue {
+	struct ixl_vsi		*vsi;
+	u32			me;
+	u32			msix;           /* This queue's MSIX vector */
+	u32			eims;           /* This queue's EIMS bit */
+	struct resource		*res;
+	void			*tag;
+	int			num_desc;	/* both tx and rx */
+	int			busy;
+	struct tx_ring		txr;
+	struct rx_ring		rxr;
+	struct task		task;
+	struct task		tx_task;
+	struct taskqueue	*tq;
+
+	/* Queue stats */
+	u64			irqs;
+	u64			tso;
+	u64			mbuf_defrag_failed;
+	u64			mbuf_hdr_failed;
+	u64			mbuf_pkt_failed;
+	u64			tx_map_avail;
+	u64			tx_dma_setup;
+	u64			dropped_pkts;
+};
+
+/*
+** Virtual Station interface: 
+**	there would be one of these per traffic class/type
+**	for now just one, and its embedded in the pf
+*/
+SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
+struct ixl_vsi {
+	void 			*back;
+	struct ifnet		*ifp;
+	struct device		*dev;
+	struct i40e_hw		*hw;
+	struct ifmedia		media;
+	u64			que_mask;
+	int			id;
+	u16			vsi_num;
+	u16			msix_base;	/* station base MSIX vector */
+	u16			first_queue;
+	u16			num_queues;
+	u16			rx_itr_setting;
+	u16			tx_itr_setting;
+	struct ixl_queue	*queues;	/* head of queues */
+	bool			link_active;
+	u16			seid;
+	u16			uplink_seid;
+	u16			downlink_seid;
+	u16			max_frame_size;
+
+	/* MAC/VLAN Filter list */
+	struct ixl_ftl_head ftl;
+	u16			num_macs;
+
+	struct i40e_aqc_vsi_properties_data info;
+
+	eventhandler_tag 	vlan_attach;
+	eventhandler_tag 	vlan_detach;
+	u16			num_vlans;
+
+	/* Per-VSI stats from hardware */
+	struct i40e_eth_stats	eth_stats;
+	struct i40e_eth_stats	eth_stats_offsets;
+	bool 			stat_offsets_loaded;
+	/* VSI stat counters */
+	u64			ipackets;
+	u64			ierrors;
+	u64			opackets;
+	u64			oerrors;
+	u64			ibytes;
+	u64			obytes;
+	u64			imcasts;
+	u64			omcasts;
+	u64			iqdrops;
+	u64			oqdrops;
+	u64			noproto;
+
+	/* Driver statistics */
+	u64			hw_filters_del;
+	u64			hw_filters_add;
+
+	/* Misc. */
+	u64 			active_queues;
+	u64 			flags;
+	struct sysctl_oid	*vsi_node;
+};
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixl_rx_unrefreshed(struct ixl_queue *que)
+{       
+        struct rx_ring	*rxr = &que->rxr;
+        
+	if (rxr->next_check > rxr->next_refresh)
+		return (rxr->next_check - rxr->next_refresh - 1);
+	else
+		return ((que->num_desc + rxr->next_check) -
+		    rxr->next_refresh - 1);
+}       
+
+/*
+** Find the next available unused filter
+*/
+static inline struct ixl_mac_filter *
+ixl_get_filter(struct ixl_vsi *vsi)
+{
+	struct ixl_mac_filter  *f;
+
+	/* create a new empty filter */
+	f = malloc(sizeof(struct ixl_mac_filter),
+	    M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (f)
+		SLIST_INSERT_HEAD(&vsi->ftl, f, next);
+
+	return (f);
+}
+
+/*
+** Compare two ethernet addresses
+*/
+static inline bool
+cmp_etheraddr(const u8 *ea1, const u8 *ea2)
+{       
+	bool cmp = FALSE;
+
+	if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) &&
+	    (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) &&
+	    (ea1[4] == ea2[4]) && (ea1[5] == ea2[5])) 
+		cmp = TRUE;
+
+	return (cmp);
+}       
+
+/*
+ * Info for stats sysctls
+ */
+struct ixl_sysctl_info {
+	u64	*stat;
+	char	*name;
+	char	*description;
+};
+
+extern int ixl_atr_rate;
+
+/*
+** ixl_fw_version_str - format the FW and NVM version strings
+*/
+static inline char *
+ixl_fw_version_str(struct i40e_hw *hw)
+{
+	static char buf[32];
+
+	snprintf(buf, sizeof(buf),
+	    "f%d.%d a%d.%d n%02x.%02x e%08x",
+	    hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+	    hw->aq.api_maj_ver, hw->aq.api_min_ver,
+	    (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+	    IXL_NVM_VERSION_HI_SHIFT,
+	    (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+	    IXL_NVM_VERSION_LO_SHIFT,
+	    hw->nvm.eetrack);
+	return buf;
+}
+
+/*********************************************************************
+ *  TXRX Function prototypes
+ *********************************************************************/
+int	ixl_allocate_tx_data(struct ixl_queue *);
+int	ixl_allocate_rx_data(struct ixl_queue *);
+void	ixl_init_tx_ring(struct ixl_queue *);
+int	ixl_init_rx_ring(struct ixl_queue *);
+bool	ixl_rxeof(struct ixl_queue *, int);
+bool	ixl_txeof(struct ixl_queue *);
+int	ixl_mq_start(struct ifnet *, struct mbuf *);
+int	ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
+void	ixl_deferred_mq_start(void *, int);
+void	ixl_qflush(struct ifnet *);
+void	ixl_free_vsi(struct ixl_vsi *);
+void	ixl_free_que_tx(struct ixl_queue *);
+void	ixl_free_que_rx(struct ixl_queue *);
+#ifdef IXL_FDIR
+void	ixl_atr(struct ixl_queue *, struct tcphdr *, int);
+#endif
+#if __FreeBSD_version >= 1100000
+uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
+#endif
+
+#endif /* _IXL_H_ */


Property changes on: trunk/sys/dev/ixl/ixl.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixl_pf.h
===================================================================
--- trunk/sys/dev/ixl/ixl_pf.h	                        (rev 0)
+++ trunk/sys/dev/ixl/ixl_pf.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,140 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixl_pf.h 292097 2015-12-11 12:47:49Z smh $*/
+
+
+#ifndef _IXL_PF_H_
+#define _IXL_PF_H_
+
+#define	VF_FLAG_ENABLED			0x01
+#define	VF_FLAG_SET_MAC_CAP		0x02
+#define	VF_FLAG_VLAN_CAP		0x04
+#define	VF_FLAG_PROMISC_CAP		0x08
+#define	VF_FLAG_MAC_ANTI_SPOOF		0x10
+
+struct ixl_vf {
+	struct ixl_vsi		vsi;
+	uint32_t		vf_flags;
+
+	uint8_t			mac[ETHER_ADDR_LEN];
+	uint16_t		vf_num;
+
+	struct sysctl_ctx_list	ctx;
+};
+
+/* Physical controller structure */
+struct ixl_pf {
+	struct i40e_hw		hw;
+	struct i40e_osdep	osdep;
+	struct device		*dev;
+
+	struct resource		*pci_mem;
+	struct resource		*msix_mem;
+
+	/*
+	 * Interrupt resources: this set is
+	 * either used for legacy, or for Link
+	 * when doing MSIX
+	 */
+	void			*tag;
+	struct resource 	*res;
+
+	struct callout		timer;
+	int			msix;
+	int			if_flags;
+
+	struct mtx		pf_mtx;
+
+	u32			qbase;
+	u32 			admvec;
+	struct task     	adminq;
+	struct taskqueue	*tq;
+
+	bool			link_up;
+	u32			link_speed;
+	int			advertised_speed;
+	int			fc; /* local flow ctrl setting */
+
+	/*
+	** Network interfaces
+	**   These are the traffic class holders, and
+	**   will have a stack interface and queues 
+	**   associated with them.
+	** NOTE: The PF has only a single interface,
+	**   so it is embedded in the PF struct.
+	*/
+	struct ixl_vsi		vsi;
+
+	/* Misc stats maintained by the driver */
+	u64			watchdog_events;
+	u64			admin_irq;
+
+	/* Statistics from hw */
+	struct i40e_hw_port_stats 	stats;
+	struct i40e_hw_port_stats	stats_offsets;
+	bool 				stat_offsets_loaded;
+
+	struct ixl_vf		*vfs;
+	int			num_vfs;
+	uint16_t		veb_seid;
+	struct task		vflr_task;
+	int			vc_debug_lvl;
+};
+
+#define IXL_SET_ADVERTISE_HELP		\
+"Control link advertise speed:\n"	\
+"\tFlags:\n"				\
+"\t\t0x1 - advertise 100 Mb\n"		\
+"\t\t0x2 - advertise 1G\n"		\
+"\t\t0x4 - advertise 10G\n"		\
+"\t\t0x8 - advertise 20G\n\n"		\
+"\tDoes not work on 40G devices."
+
+#define	I40E_VC_DEBUG(pf, level, ...) \
+	do { \
+		if ((pf)->vc_debug_lvl >= (level)) \
+			device_printf((pf)->dev, __VA_ARGS__); \
+	} while (0)
+
+#define	i40e_send_vf_nack(pf, vf, op, st) \
+	ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
+
+#define IXL_PF_LOCK_INIT(_sc, _name) \
+        mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF)
+#define IXL_PF_LOCK(_sc)              mtx_lock(&(_sc)->pf_mtx)
+#define IXL_PF_UNLOCK(_sc)            mtx_unlock(&(_sc)->pf_mtx)
+#define IXL_PF_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->pf_mtx)
+#define IXL_PF_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
+
+#endif /* _IXL_PF_H_ */


Property changes on: trunk/sys/dev/ixl/ixl_pf.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixl_txrx.c
===================================================================
--- trunk/sys/dev/ixl/ixl_txrx.c	                        (rev 0)
+++ trunk/sys/dev/ixl/ixl_txrx.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,1832 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixl_txrx.c 292099 2015-12-11 13:05:18Z smh $*/
+
+/*
+**	IXL driver TX/RX Routines:
+**	    This was seperated to allow usage by
+** 	    both the BASE and the VF drivers.
+*/
+
+#ifndef IXL_STANDALONE_BUILD
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixl.h"
+
+#ifdef RSS
+#include <net/rss_config.h>
+#endif
+
+/* Local Prototypes */
+static void	ixl_rx_checksum(struct mbuf *, u32, u32, u8);
+static void	ixl_refresh_mbufs(struct ixl_queue *, int);
+static int      ixl_xmit(struct ixl_queue *, struct mbuf **);
+static int	ixl_tx_setup_offload(struct ixl_queue *,
+		    struct mbuf *, u32 *, u32 *);
+static bool	ixl_tso_setup(struct ixl_queue *, struct mbuf *);
+
+static __inline void ixl_rx_discard(struct rx_ring *, int);
+static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
+		    struct mbuf *, u8);
+
+#ifdef DEV_NETMAP
+#include <dev/netmap/if_ixl_netmap.h>
+#endif /* DEV_NETMAP */
+
+/*
+** Multiqueue Transmit driver
+*/
+int
+ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+	struct ixl_vsi		*vsi = ifp->if_softc;
+	struct ixl_queue	*que;
+	struct tx_ring		*txr;
+	int 			err, i;
+#ifdef RSS
+	u32			bucket_id;
+#endif
+
+	/*
+	** Which queue to use:
+	**
+	** When doing RSS, map it to the same outbound
+	** queue as the incoming flow would be mapped to.
+	** If everything is setup correctly, it should be
+	** the same bucket that the current CPU we're on is.
+	*/
+	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+#ifdef  RSS
+		if (rss_hash2bucket(m->m_pkthdr.flowid,
+		    M_HASHTYPE_GET(m), &bucket_id) == 0) {
+			i = bucket_id % vsi->num_queues;
+                } else
+#endif
+                        i = m->m_pkthdr.flowid % vsi->num_queues;
+        } else
+		i = curcpu % vsi->num_queues;
+	/*
+	** This may not be perfect, but until something
+	** better comes along it will keep from scheduling
+	** on stalled queues.
+	*/
+	if (((1 << i) & vsi->active_queues) == 0)
+		i = ffsl(vsi->active_queues);
+
+	que = &vsi->queues[i];
+	txr = &que->txr;
+
+	err = drbr_enqueue(ifp, txr->br, m);
+	if (err)
+		return (err);
+	if (IXL_TX_TRYLOCK(txr)) {
+		ixl_mq_start_locked(ifp, txr);
+		IXL_TX_UNLOCK(txr);
+	} else
+		taskqueue_enqueue(que->tq, &que->tx_task);
+
+	return (0);
+}
+
+int
+ixl_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
+{
+	struct ixl_queue	*que = txr->que;
+	struct ixl_vsi		*vsi = que->vsi;
+        struct mbuf		*next;
+        int			err = 0;
+
+
+	if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
+	    vsi->link_active == 0)
+		return (ENETDOWN);
+
+	/* Process the transmit queue */
+	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
+		if ((err = ixl_xmit(que, &next)) != 0) {
+			if (next == NULL)
+				drbr_advance(ifp, txr->br);
+			else
+				drbr_putback(ifp, txr->br, next);
+			break;
+		}
+		drbr_advance(ifp, txr->br);
+		/* Send a copy of the frame to the BPF listener */
+		ETHER_BPF_MTAP(ifp, next);
+		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+			break;
+	}
+
+	if (txr->avail < IXL_TX_CLEANUP_THRESHOLD)
+		ixl_txeof(que);
+
+	return (err);
+}
+
+/*
+ * Called from a taskqueue to drain queued transmit packets.
+ */
+void
+ixl_deferred_mq_start(void *arg, int pending)
+{
+	struct ixl_queue	*que = arg;
+        struct tx_ring		*txr = &que->txr;
+	struct ixl_vsi		*vsi = que->vsi;
+        struct ifnet		*ifp = vsi->ifp;
+        
+	IXL_TX_LOCK(txr);
+	if (!drbr_empty(ifp, txr->br))
+		ixl_mq_start_locked(ifp, txr);
+	IXL_TX_UNLOCK(txr);
+}
+
+/*
+** Flush all queue ring buffers
+*/
+void
+ixl_qflush(struct ifnet *ifp)
+{
+	struct ixl_vsi	*vsi = ifp->if_softc;
+
+        for (int i = 0; i < vsi->num_queues; i++) {
+		struct ixl_queue *que = &vsi->queues[i];
+		struct tx_ring	*txr = &que->txr;
+		struct mbuf	*m;
+		IXL_TX_LOCK(txr);
+		while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+			m_freem(m);
+		IXL_TX_UNLOCK(txr);
+	}
+	if_qflush(ifp);
+}
+
+/*
+** Find mbuf chains passed to the driver 
+** that are 'sparse', using more than 8
+** mbufs to deliver an mss-size chunk of data
+*/
+static inline bool
+ixl_tso_detect_sparse(struct mbuf *mp)
+{
+	struct mbuf	*m;
+	int		num = 0, mss;
+	bool		ret = FALSE;
+
+	mss = mp->m_pkthdr.tso_segsz;
+	for (m = mp->m_next; m != NULL; m = m->m_next) {
+		num++;
+		mss -= m->m_len;
+		if (mss < 1)
+			break;
+		if (m->m_next == NULL)
+			break;
+	}
+	if (num > IXL_SPARSE_CHAIN)
+		ret = TRUE;
+
+	return (ret);
+}
+
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to tx descriptors, allowing the
+ *  TX engine to transmit the packets. 
+ *  	- return 0 on success, positive on failure
+ *
+ **********************************************************************/
+#define IXL_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+
+static int
+ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
+{
+	struct ixl_vsi		*vsi = que->vsi;
+	struct i40e_hw		*hw = vsi->hw;
+	struct tx_ring		*txr = &que->txr;
+	struct ixl_tx_buf	*buf;
+	struct i40e_tx_desc	*txd = NULL;
+	struct mbuf		*m_head, *m;
+	int             	i, j, error, nsegs, maxsegs;
+	int			first, last = 0;
+	u16			vtag = 0;
+	u32			cmd, off;
+	bus_dmamap_t		map;
+	bus_dma_tag_t		tag;
+	bus_dma_segment_t	segs[IXL_MAX_TSO_SEGS];
+
+
+	cmd = off = 0;
+	m_head = *m_headp;
+
+        /*
+         * Important to capture the first descriptor
+         * used because it will contain the index of
+         * the one we tell the hardware to report back
+         */
+        first = txr->next_avail;
+	buf = &txr->buffers[first];
+	map = buf->map;
+	tag = txr->tx_tag;
+	maxsegs = IXL_MAX_TX_SEGS;
+
+	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+		/* Use larger mapping for TSO */
+		tag = txr->tso_tag;
+		maxsegs = IXL_MAX_TSO_SEGS;
+		if (ixl_tso_detect_sparse(m_head)) {
+			m = m_defrag(m_head, M_NOWAIT);
+			if (m == NULL) {
+				m_freem(*m_headp);
+				*m_headp = NULL;
+				return (ENOBUFS);
+			}
+			*m_headp = m;
+		}
+	}
+
+	/*
+	 * Map the packet for DMA.
+	 */
+	error = bus_dmamap_load_mbuf_sg(tag, map,
+	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+	if (error == EFBIG) {
+		struct mbuf *m;
+
+		m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
+		if (m == NULL) {
+			que->mbuf_defrag_failed++;
+			m_freem(*m_headp);
+			*m_headp = NULL;
+			return (ENOBUFS);
+		}
+		*m_headp = m;
+
+		/* Try it again */
+		error = bus_dmamap_load_mbuf_sg(tag, map,
+		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+		if (error == ENOMEM) {
+			que->tx_dma_setup++;
+			return (error);
+		} else if (error != 0) {
+			que->tx_dma_setup++;
+			m_freem(*m_headp);
+			*m_headp = NULL;
+			return (error);
+		}
+	} else if (error == ENOMEM) {
+		que->tx_dma_setup++;
+		return (error);
+	} else if (error != 0) {
+		que->tx_dma_setup++;
+		m_freem(*m_headp);
+		*m_headp = NULL;
+		return (error);
+	}
+
+	/* Make certain there are enough descriptors */
+	if (nsegs > txr->avail - 2) {
+		txr->no_desc++;
+		error = ENOBUFS;
+		goto xmit_fail;
+	}
+	m_head = *m_headp;
+
+	/* Set up the TSO/CSUM offload */
+	if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) {
+		error = ixl_tx_setup_offload(que, m_head, &cmd, &off);
+		if (error)
+			goto xmit_fail;
+	}
+
+	cmd |= I40E_TX_DESC_CMD_ICRC;
+	/* Grab the VLAN tag */
+	if (m_head->m_flags & M_VLANTAG) {
+		cmd |= I40E_TX_DESC_CMD_IL2TAG1;
+		vtag = htole16(m_head->m_pkthdr.ether_vtag);
+	}
+
+	i = txr->next_avail;
+	for (j = 0; j < nsegs; j++) {
+		bus_size_t seglen;
+
+		buf = &txr->buffers[i];
+		buf->tag = tag; /* Keep track of the type tag */
+		txd = &txr->base[i];
+		seglen = segs[j].ds_len;
+
+		txd->buffer_addr = htole64(segs[j].ds_addr);
+		txd->cmd_type_offset_bsz =
+		    htole64(I40E_TX_DESC_DTYPE_DATA
+		    | ((u64)cmd  << I40E_TXD_QW1_CMD_SHIFT)
+		    | ((u64)off << I40E_TXD_QW1_OFFSET_SHIFT)
+		    | ((u64)seglen  << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
+		    | ((u64)vtag  << I40E_TXD_QW1_L2TAG1_SHIFT));
+
+		last = i; /* descriptor that will get completion IRQ */
+
+		if (++i == que->num_desc)
+			i = 0;
+
+		buf->m_head = NULL;
+		buf->eop_index = -1;
+	}
+	/* Set the last descriptor for report */
+	txd->cmd_type_offset_bsz |=
+	    htole64(((u64)IXL_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT));
+	txr->avail -= nsegs;
+	txr->next_avail = i;
+
+	buf->m_head = m_head;
+	/* Swap the dma map between the first and last descriptor */
+	txr->buffers[first].map = buf->map;
+	buf->map = map;
+	bus_dmamap_sync(tag, map, BUS_DMASYNC_PREWRITE);
+
+        /* Set the index of the descriptor that will be marked done */
+        buf = &txr->buffers[first];
+	buf->eop_index = last;
+
+        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	/*
+	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
+	 * hardware that this frame is available to transmit.
+	 */
+	++txr->total_packets;
+	wr32(hw, txr->tail, i);
+
+	ixl_flush(hw);
+	/* Mark outstanding work */
+	if (que->busy == 0)
+		que->busy = 1;
+	return (0);
+
+xmit_fail:
+	bus_dmamap_unload(tag, buf->map);
+	return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+int
+ixl_allocate_tx_data(struct ixl_queue *que)
+{
+	struct tx_ring		*txr = &que->txr;
+	struct ixl_vsi		*vsi = que->vsi;
+	device_t		dev = vsi->dev;
+	struct ixl_tx_buf	*buf;
+	int			error = 0;
+
+	/*
+	 * Setup DMA descriptor areas.
+	 */
+	if ((error = bus_dma_tag_create(NULL,		/* parent */
+			       1, 0,			/* alignment, bounds */
+			       BUS_SPACE_MAXADDR,	/* lowaddr */
+			       BUS_SPACE_MAXADDR,	/* highaddr */
+			       NULL, NULL,		/* filter, filterarg */
+			       IXL_TSO_SIZE,		/* maxsize */
+			       IXL_MAX_TX_SEGS,		/* nsegments */
+			       PAGE_SIZE,		/* maxsegsize */
+			       0,			/* flags */
+			       NULL,			/* lockfunc */
+			       NULL,			/* lockfuncarg */
+			       &txr->tx_tag))) {
+		device_printf(dev,"Unable to allocate TX DMA tag\n");
+		goto fail;
+	}
+
+	/* Make a special tag for TSO */
+	if ((error = bus_dma_tag_create(NULL,		/* parent */
+			       1, 0,			/* alignment, bounds */
+			       BUS_SPACE_MAXADDR,	/* lowaddr */
+			       BUS_SPACE_MAXADDR,	/* highaddr */
+			       NULL, NULL,		/* filter, filterarg */
+			       IXL_TSO_SIZE,		/* maxsize */
+			       IXL_MAX_TSO_SEGS,	/* nsegments */
+			       PAGE_SIZE,		/* maxsegsize */
+			       0,			/* flags */
+			       NULL,			/* lockfunc */
+			       NULL,			/* lockfuncarg */
+			       &txr->tso_tag))) {
+		device_printf(dev,"Unable to allocate TX TSO DMA tag\n");
+		goto fail;
+	}
+
+	if (!(txr->buffers =
+	    (struct ixl_tx_buf *) malloc(sizeof(struct ixl_tx_buf) *
+	    que->num_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+		device_printf(dev, "Unable to allocate tx_buffer memory\n");
+		error = ENOMEM;
+		goto fail;
+	}
+
+        /* Create the descriptor buffer default dma maps */
+	buf = txr->buffers;
+	for (int i = 0; i < que->num_desc; i++, buf++) {
+		buf->tag = txr->tx_tag;
+		error = bus_dmamap_create(buf->tag, 0, &buf->map);
+		if (error != 0) {
+			device_printf(dev, "Unable to create TX DMA map\n");
+			goto fail;
+		}
+	}
+fail:
+	return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  (Re)Initialize a queue transmit ring.
+ *	- called by init, it clears the descriptor ring,
+ *	  and frees any stale mbufs 
+ *
+ **********************************************************************/
+void
+ixl_init_tx_ring(struct ixl_queue *que)
+{
+#ifdef DEV_NETMAP
+	struct netmap_adapter *na = NA(que->vsi->ifp);
+	struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+	struct tx_ring		*txr = &que->txr;
+	struct ixl_tx_buf	*buf;
+
+	/* Clear the old ring contents */
+	IXL_TX_LOCK(txr);
+
+#ifdef DEV_NETMAP
+	/*
+	 * (under lock): if in netmap mode, do some consistency
+	 * checks and set slot to entry 0 of the netmap ring.
+	 */
+	slot = netmap_reset(na, NR_TX, que->me, 0);
+#endif /* DEV_NETMAP */
+
+	bzero((void *)txr->base,
+	      (sizeof(struct i40e_tx_desc)) * que->num_desc);
+
+	/* Reset indices */
+	txr->next_avail = 0;
+	txr->next_to_clean = 0;
+
+#ifdef IXL_FDIR
+	/* Initialize flow director */
+	txr->atr_rate = ixl_atr_rate;
+	txr->atr_count = 0;
+#endif
+
+	/* Free any existing tx mbufs. */
+        buf = txr->buffers;
+	for (int i = 0; i < que->num_desc; i++, buf++) {
+		if (buf->m_head != NULL) {
+			bus_dmamap_sync(buf->tag, buf->map,
+			    BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(buf->tag, buf->map);
+			m_freem(buf->m_head);
+			buf->m_head = NULL;
+		}
+#ifdef DEV_NETMAP
+		/*
+		 * In netmap mode, set the map for the packet buffer.
+		 * NOTE: Some drivers (not this one) also need to set
+		 * the physical buffer address in the NIC ring.
+		 * netmap_idx_n2k() maps a nic index, i, into the corresponding
+		 * netmap slot index, si
+		 */
+		if (slot) {
+			int si = netmap_idx_n2k(&na->tx_rings[que->me], i);
+			netmap_load_map(na, buf->tag, buf->map, NMB(na, slot + si));
+		}
+#endif /* DEV_NETMAP */
+		/* Clear the EOP index */
+		buf->eop_index = -1;
+        }
+
+	/* Set number of descriptors available */
+	txr->avail = que->num_desc;
+
+	bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	IXL_TX_UNLOCK(txr);
+}
+
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+void
+ixl_free_que_tx(struct ixl_queue *que)
+{
+	struct tx_ring *txr = &que->txr;
+	struct ixl_tx_buf *buf;
+
+	INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
+
+	for (int i = 0; i < que->num_desc; i++) {
+		buf = &txr->buffers[i];
+		if (buf->m_head != NULL) {
+			bus_dmamap_sync(buf->tag, buf->map,
+			    BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(buf->tag,
+			    buf->map);
+			m_freem(buf->m_head);
+			buf->m_head = NULL;
+			if (buf->map != NULL) {
+				bus_dmamap_destroy(buf->tag,
+				    buf->map);
+				buf->map = NULL;
+			}
+		} else if (buf->map != NULL) {
+			bus_dmamap_unload(buf->tag,
+			    buf->map);
+			bus_dmamap_destroy(buf->tag,
+			    buf->map);
+			buf->map = NULL;
+		}
+	}
+	if (txr->br != NULL)
+		buf_ring_free(txr->br, M_DEVBUF);
+	if (txr->buffers != NULL) {
+		free(txr->buffers, M_DEVBUF);
+		txr->buffers = NULL;
+	}
+	if (txr->tx_tag != NULL) {
+		bus_dma_tag_destroy(txr->tx_tag);
+		txr->tx_tag = NULL;
+	}
+	if (txr->tso_tag != NULL) {
+		bus_dma_tag_destroy(txr->tso_tag);
+		txr->tso_tag = NULL;
+	}
+
+	INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
+	return;
+}
+
+/*********************************************************************
+ *
+ *  Setup descriptor for hw offloads 
+ *
+ **********************************************************************/
+
+static int
+ixl_tx_setup_offload(struct ixl_queue *que,
+    struct mbuf *mp, u32 *cmd, u32 *off)
+{
+	struct ether_vlan_header	*eh;
+#ifdef INET
+	struct ip			*ip = NULL;
+#endif
+	struct tcphdr			*th = NULL;
+#ifdef INET6
+	struct ip6_hdr			*ip6;
+#endif
+	int				elen, ip_hlen = 0, tcp_hlen;
+	u16				etype;
+	u8				ipproto = 0;
+	bool				tso = FALSE;
+
+
+	/* Set up the TSO context descriptor if required */
+	if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
+		tso = ixl_tso_setup(que, mp);
+		if (tso)
+			++que->tso;
+		else
+			return (ENXIO);
+	}
+
+	/*
+	 * Determine where frame payload starts.
+	 * Jump over vlan headers if already present,
+	 * helpful for QinQ too.
+	 */
+	eh = mtod(mp, struct ether_vlan_header *);
+	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+		etype = ntohs(eh->evl_proto);
+		elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+	} else {
+		etype = ntohs(eh->evl_encap_proto);
+		elen = ETHER_HDR_LEN;
+	}
+
+	switch (etype) {
+#ifdef INET
+		case ETHERTYPE_IP:
+			ip = (struct ip *)(mp->m_data + elen);
+			ip_hlen = ip->ip_hl << 2;
+			ipproto = ip->ip_p;
+			th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+			/* The IP checksum must be recalculated with TSO */
+			if (tso)
+				*cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
+			else
+				*cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
+			break;
+#endif
+#ifdef INET6
+		case ETHERTYPE_IPV6:
+			ip6 = (struct ip6_hdr *)(mp->m_data + elen);
+			ip_hlen = sizeof(struct ip6_hdr);
+			ipproto = ip6->ip6_nxt;
+			th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+			*cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
+			break;
+#endif
+		default:
+			break;
+	}
+
+	*off |= (elen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+	*off |= (ip_hlen >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+
+	switch (ipproto) {
+		case IPPROTO_TCP:
+			tcp_hlen = th->th_off << 2;
+			if (mp->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) {
+				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+				*off |= (tcp_hlen >> 2) <<
+				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+			}
+#ifdef IXL_FDIR
+			ixl_atr(que, th, etype);
+#endif
+			break;
+		case IPPROTO_UDP:
+			if (mp->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_UDP_IPV6)) {
+				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
+				*off |= (sizeof(struct udphdr) >> 2) <<
+				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+			}
+			break;
+
+		case IPPROTO_SCTP:
+			if (mp->m_pkthdr.csum_flags & (CSUM_SCTP|CSUM_SCTP_IPV6)) {
+				*cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
+				*off |= (sizeof(struct sctphdr) >> 2) <<
+				    I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+			}
+			/* Fall Thru */
+		default:
+			break;
+	}
+
+        return (0);
+}
+
+
+/**********************************************************************
+ *
+ *  Setup context for hardware segmentation offload (TSO)
+ *
+ **********************************************************************/
+static bool
+ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
+{
+	struct tx_ring			*txr = &que->txr;
+	struct i40e_tx_context_desc	*TXD;
+	struct ixl_tx_buf		*buf;
+	u32				cmd, mss, type, tsolen;
+	u16				etype;
+	int				idx, elen, ip_hlen, tcp_hlen;
+	struct ether_vlan_header	*eh;
+#ifdef INET
+	struct ip			*ip;
+#endif
+#ifdef INET6
+	struct ip6_hdr			*ip6;
+#endif
+#if defined(INET6) || defined(INET)
+	struct tcphdr			*th;
+#endif
+	u64				type_cmd_tso_mss;
+
+	/*
+	 * Determine where frame payload starts.
+	 * Jump over vlan headers if already present
+	 */
+	eh = mtod(mp, struct ether_vlan_header *);
+	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+		elen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+		etype = eh->evl_proto;
+	} else {
+		elen = ETHER_HDR_LEN;
+		etype = eh->evl_encap_proto;
+	}
+
+        switch (ntohs(etype)) {
+#ifdef INET6
+	case ETHERTYPE_IPV6:
+		ip6 = (struct ip6_hdr *)(mp->m_data + elen);
+		if (ip6->ip6_nxt != IPPROTO_TCP)
+			return (ENXIO);
+		ip_hlen = sizeof(struct ip6_hdr);
+		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+		tcp_hlen = th->th_off << 2;
+		break;
+#endif
+#ifdef INET
+	case ETHERTYPE_IP:
+		ip = (struct ip *)(mp->m_data + elen);
+		if (ip->ip_p != IPPROTO_TCP)
+			return (ENXIO);
+		ip->ip_sum = 0;
+		ip_hlen = ip->ip_hl << 2;
+		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+		th->th_sum = in_pseudo(ip->ip_src.s_addr,
+		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+		tcp_hlen = th->th_off << 2;
+		break;
+#endif
+	default:
+		printf("%s: CSUM_TSO but no supported IP version (0x%04x)",
+		    __func__, ntohs(etype));
+		return FALSE;
+        }
+
+        /* Ensure we have at least the IP+TCP header in the first mbuf. */
+        if (mp->m_len < elen + ip_hlen + sizeof(struct tcphdr))
+		return FALSE;
+
+	idx = txr->next_avail;
+	buf = &txr->buffers[idx];
+	TXD = (struct i40e_tx_context_desc *) &txr->base[idx];
+	tsolen = mp->m_pkthdr.len - (elen + ip_hlen + tcp_hlen);
+
+	type = I40E_TX_DESC_DTYPE_CONTEXT;
+	cmd = I40E_TX_CTX_DESC_TSO;
+	mss = mp->m_pkthdr.tso_segsz;
+
+	type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
+	    ((u64)cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+	    ((u64)tsolen << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+	    ((u64)mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
+	TXD->type_cmd_tso_mss = htole64(type_cmd_tso_mss);
+
+	TXD->tunneling_params = htole32(0);
+	buf->m_head = NULL;
+	buf->eop_index = -1;
+
+	if (++idx == que->num_desc)
+		idx = 0;
+
+	txr->avail--;
+	txr->next_avail = idx;
+
+	return TRUE;
+}
+
+/*             
+** ixl_get_tx_head - Retrieve the value from the 
+**    location the HW records its HEAD index
+*/
+static inline u32
+ixl_get_tx_head(struct ixl_queue *que)
+{
+	struct tx_ring  *txr = &que->txr;
+	void *head = &txr->base[que->num_desc];
+	return LE32_TO_CPU(*(volatile __le32 *)head);
+}
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+bool
+ixl_txeof(struct ixl_queue *que)
+{
+	struct tx_ring		*txr = &que->txr;
+	u32			first, last, head, done, processed;
+	struct ixl_tx_buf	*buf;
+	struct i40e_tx_desc	*tx_desc, *eop_desc;
+
+
+	mtx_assert(&txr->mtx, MA_OWNED);
+
+#ifdef DEV_NETMAP
+	// XXX todo: implement moderation
+	if (netmap_tx_irq(que->vsi->ifp, que->me))
+		return FALSE;
+#endif /* DEF_NETMAP */
+
+	/* These are not the descriptors you seek, move along :) */
+	if (txr->avail == que->num_desc) {
+		que->busy = 0;
+		return FALSE;
+	}
+
+	processed = 0;
+	first = txr->next_to_clean;
+	buf = &txr->buffers[first];
+	tx_desc = (struct i40e_tx_desc *)&txr->base[first];
+	last = buf->eop_index;
+	if (last == -1)
+		return FALSE;
+	eop_desc = (struct i40e_tx_desc *)&txr->base[last];
+
+	/* Get the Head WB value */
+	head = ixl_get_tx_head(que);
+
+	/*
+	** Get the index of the first descriptor
+	** BEYOND the EOP and call that 'done'.
+	** I do this so the comparison in the
+	** inner while loop below can be simple
+	*/
+	if (++last == que->num_desc) last = 0;
+	done = last;
+
+        bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+            BUS_DMASYNC_POSTREAD);
+	/*
+	** The HEAD index of the ring is written in a 
+	** defined location, this rather than a done bit
+	** is what is used to keep track of what must be
+	** 'cleaned'.
+	*/
+	while (first != head) {
+		/* We clean the range of the packet */
+		while (first != done) {
+			++txr->avail;
+			++processed;
+
+			if (buf->m_head) {
+				txr->bytes += /* for ITR adjustment */
+				    buf->m_head->m_pkthdr.len;
+				txr->tx_bytes += /* for TX stats */
+				    buf->m_head->m_pkthdr.len;
+				bus_dmamap_sync(buf->tag,
+				    buf->map,
+				    BUS_DMASYNC_POSTWRITE);
+				bus_dmamap_unload(buf->tag,
+				    buf->map);
+				m_freem(buf->m_head);
+				buf->m_head = NULL;
+				buf->map = NULL;
+			}
+			buf->eop_index = -1;
+
+			if (++first == que->num_desc)
+				first = 0;
+
+			buf = &txr->buffers[first];
+			tx_desc = &txr->base[first];
+		}
+		++txr->packets;
+		/* See if there is more work now */
+		last = buf->eop_index;
+		if (last != -1) {
+			eop_desc = &txr->base[last];
+			/* Get next done point */
+			if (++last == que->num_desc) last = 0;
+			done = last;
+		} else
+			break;
+	}
+	bus_dmamap_sync(txr->dma.tag, txr->dma.map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+	txr->next_to_clean = first;
+
+
+	/*
+	** Hang detection, we know there's
+	** work outstanding or the first return
+	** would have been taken, so indicate an
+	** unsuccessful pass, in local_timer if
+	** the value is too great the queue will
+	** be considered hung. If anything has been
+	** cleaned then reset the state.
+	*/
+	if ((processed == 0) && (que->busy != IXL_QUEUE_HUNG))
+		++que->busy;
+
+	if (processed)
+		que->busy = 1; /* Note this turns off HUNG */
+
+	/*
+	 * If there are no pending descriptors, clear the timeout.
+	 */
+	if (txr->avail == que->num_desc) {
+		que->busy = 0;
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixl_refresh_mbufs(struct ixl_queue *que, int limit)
+{
+	struct ixl_vsi		*vsi = que->vsi;
+	struct rx_ring		*rxr = &que->rxr;
+	bus_dma_segment_t	hseg[1];
+	bus_dma_segment_t	pseg[1];
+	struct ixl_rx_buf	*buf;
+	struct mbuf		*mh, *mp;
+	int			i, j, nsegs, error;
+	bool			refreshed = FALSE;
+
+	i = j = rxr->next_refresh;
+	/* Control the loop with one beyond */
+	if (++j == que->num_desc)
+		j = 0;
+
+	while (j != limit) {
+		buf = &rxr->buffers[i];
+		if (rxr->hdr_split == FALSE)
+			goto no_split;
+
+		if (buf->m_head == NULL) {
+			mh = m_gethdr(M_NOWAIT, MT_DATA);
+			if (mh == NULL)
+				goto update;
+		} else
+			mh = buf->m_head;
+
+		mh->m_pkthdr.len = mh->m_len = MHLEN;
+		mh->m_len = MHLEN;
+		mh->m_flags |= M_PKTHDR;
+		/* Get the memory mapping */
+		error = bus_dmamap_load_mbuf_sg(rxr->htag,
+		    buf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0) {
+			printf("Refresh mbufs: hdr dmamap load"
+			    " failure - %d\n", error);
+			m_free(mh);
+			buf->m_head = NULL;
+			goto update;
+		}
+		buf->m_head = mh;
+		bus_dmamap_sync(rxr->htag, buf->hmap,
+		    BUS_DMASYNC_PREREAD);
+		rxr->base[i].read.hdr_addr =
+		   htole64(hseg[0].ds_addr);
+
+no_split:
+		if (buf->m_pack == NULL) {
+			mp = m_getjcl(M_NOWAIT, MT_DATA,
+			    M_PKTHDR, rxr->mbuf_sz);
+			if (mp == NULL)
+				goto update;
+		} else
+			mp = buf->m_pack;
+
+		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+		/* Get the memory mapping */
+		error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+		    buf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0) {
+			printf("Refresh mbufs: payload dmamap load"
+			    " failure - %d\n", error);
+			m_free(mp);
+			buf->m_pack = NULL;
+			goto update;
+		}
+		buf->m_pack = mp;
+		bus_dmamap_sync(rxr->ptag, buf->pmap,
+		    BUS_DMASYNC_PREREAD);
+		rxr->base[i].read.pkt_addr =
+		   htole64(pseg[0].ds_addr);
+		/* Used only when doing header split */
+		rxr->base[i].read.hdr_addr = 0;
+
+		refreshed = TRUE;
+		/* Next is precalculated */
+		i = j;
+		rxr->next_refresh = i;
+		if (++j == que->num_desc)
+			j = 0;
+	}
+update:
+	if (refreshed) /* Update hardware tail index */
+		wr32(vsi->hw, rxr->tail, rxr->next_refresh);
+	return;
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per descriptor, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've defined.
+ *
+ **********************************************************************/
+int
+ixl_allocate_rx_data(struct ixl_queue *que)
+{
+	struct rx_ring		*rxr = &que->rxr;
+	struct ixl_vsi		*vsi = que->vsi;
+	device_t 		dev = vsi->dev;
+	struct ixl_rx_buf 	*buf;
+	int             	i, bsize, error;
+
+	bsize = sizeof(struct ixl_rx_buf) * que->num_desc;
+	if (!(rxr->buffers =
+	    (struct ixl_rx_buf *) malloc(bsize,
+	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
+		device_printf(dev, "Unable to allocate rx_buffer memory\n");
+		error = ENOMEM;
+		return (error);
+	}
+
+	if ((error = bus_dma_tag_create(NULL,	/* parent */
+				   1, 0,	/* alignment, bounds */
+				   BUS_SPACE_MAXADDR,	/* lowaddr */
+				   BUS_SPACE_MAXADDR,	/* highaddr */
+				   NULL, NULL,		/* filter, filterarg */
+				   MSIZE,		/* maxsize */
+				   1,			/* nsegments */
+				   MSIZE,		/* maxsegsize */
+				   0,			/* flags */
+				   NULL,		/* lockfunc */
+				   NULL,		/* lockfuncarg */
+				   &rxr->htag))) {
+		device_printf(dev, "Unable to create RX DMA htag\n");
+		return (error);
+	}
+
+	if ((error = bus_dma_tag_create(NULL,	/* parent */
+				   1, 0,	/* alignment, bounds */
+				   BUS_SPACE_MAXADDR,	/* lowaddr */
+				   BUS_SPACE_MAXADDR,	/* highaddr */
+				   NULL, NULL,		/* filter, filterarg */
+				   MJUM16BYTES,		/* maxsize */
+				   1,			/* nsegments */
+				   MJUM16BYTES,		/* maxsegsize */
+				   0,			/* flags */
+				   NULL,		/* lockfunc */
+				   NULL,		/* lockfuncarg */
+				   &rxr->ptag))) {
+		device_printf(dev, "Unable to create RX DMA ptag\n");
+		return (error);
+	}
+
+	for (i = 0; i < que->num_desc; i++) {
+		buf = &rxr->buffers[i];
+		error = bus_dmamap_create(rxr->htag,
+		    BUS_DMA_NOWAIT, &buf->hmap);
+		if (error) {
+			device_printf(dev, "Unable to create RX head map\n");
+			break;
+		}
+		error = bus_dmamap_create(rxr->ptag,
+		    BUS_DMA_NOWAIT, &buf->pmap);
+		if (error) {
+			device_printf(dev, "Unable to create RX pkt map\n");
+			break;
+		}
+	}
+
+	return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  (Re)Initialize the queue receive ring and its buffers.
+ *
+ **********************************************************************/
+int
+ixl_init_rx_ring(struct ixl_queue *que)
+{
+	struct	rx_ring 	*rxr = &que->rxr;
+	struct ixl_vsi		*vsi = que->vsi;
+#if defined(INET6) || defined(INET)
+	struct ifnet		*ifp = vsi->ifp;
+	struct lro_ctrl		*lro = &rxr->lro;
+#endif
+	struct ixl_rx_buf	*buf;
+	bus_dma_segment_t	pseg[1], hseg[1];
+	int			rsize, nsegs, error = 0;
+#ifdef DEV_NETMAP
+	struct netmap_adapter *na = NA(que->vsi->ifp);
+	struct netmap_slot *slot;
+#endif /* DEV_NETMAP */
+
+	IXL_RX_LOCK(rxr);
+#ifdef DEV_NETMAP
+	/* same as in ixl_init_tx_ring() */
+	slot = netmap_reset(na, NR_RX, que->me, 0);
+#endif /* DEV_NETMAP */
+	/* Clear the ring contents */
+	rsize = roundup2(que->num_desc *
+	    sizeof(union i40e_rx_desc), DBA_ALIGN);
+	bzero((void *)rxr->base, rsize);
+	/* Cleanup any existing buffers */
+	for (int i = 0; i < que->num_desc; i++) {
+		buf = &rxr->buffers[i];
+		if (buf->m_head != NULL) {
+			bus_dmamap_sync(rxr->htag, buf->hmap,
+			    BUS_DMASYNC_POSTREAD);
+			bus_dmamap_unload(rxr->htag, buf->hmap);
+			buf->m_head->m_flags |= M_PKTHDR;
+			m_freem(buf->m_head);
+		}
+		if (buf->m_pack != NULL) {
+			bus_dmamap_sync(rxr->ptag, buf->pmap,
+			    BUS_DMASYNC_POSTREAD);
+			bus_dmamap_unload(rxr->ptag, buf->pmap);
+			buf->m_pack->m_flags |= M_PKTHDR;
+			m_freem(buf->m_pack);
+		}
+		buf->m_head = NULL;
+		buf->m_pack = NULL;
+	}
+
+	/* header split is off */
+	rxr->hdr_split = FALSE;
+
+	/* Now replenish the mbufs */
+	for (int j = 0; j != que->num_desc; ++j) {
+		struct mbuf	*mh, *mp;
+
+		buf = &rxr->buffers[j];
+#ifdef DEV_NETMAP
+		/*
+		 * In netmap mode, fill the map and set the buffer
+		 * address in the NIC ring, considering the offset
+		 * between the netmap and NIC rings (see comment in
+		 * ixgbe_setup_transmit_ring() ). No need to allocate
+		 * an mbuf, so end the block with a continue;
+		 */
+		if (slot) {
+			int sj = netmap_idx_n2k(&na->rx_rings[que->me], j);
+			uint64_t paddr;
+			void *addr;
+
+			addr = PNMB(na, slot + sj, &paddr);
+			netmap_load_map(na, rxr->dma.tag, buf->pmap, addr);
+			/* Update descriptor and the cached value */
+			rxr->base[j].read.pkt_addr = htole64(paddr);
+			rxr->base[j].read.hdr_addr = 0;
+			continue;
+		}
+#endif /* DEV_NETMAP */
+		/*
+		** Don't allocate mbufs if not
+		** doing header split, its wasteful
+		*/ 
+		if (rxr->hdr_split == FALSE)
+			goto skip_head;
+
+		/* First the header */
+		buf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+		if (buf->m_head == NULL) {
+			error = ENOBUFS;
+			goto fail;
+		}
+		m_adj(buf->m_head, ETHER_ALIGN);
+		mh = buf->m_head;
+		mh->m_len = mh->m_pkthdr.len = MHLEN;
+		mh->m_flags |= M_PKTHDR;
+		/* Get the memory mapping */
+		error = bus_dmamap_load_mbuf_sg(rxr->htag,
+		    buf->hmap, buf->m_head, hseg,
+		    &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0) /* Nothing elegant to do here */
+			goto fail;
+		bus_dmamap_sync(rxr->htag,
+		    buf->hmap, BUS_DMASYNC_PREREAD);
+		/* Update descriptor */
+		rxr->base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+		/* Now the payload cluster */
+		buf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+		    M_PKTHDR, rxr->mbuf_sz);
+		if (buf->m_pack == NULL) {
+			error = ENOBUFS;
+                        goto fail;
+		}
+		mp = buf->m_pack;
+		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
+		/* Get the memory mapping */
+		error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+		    buf->pmap, mp, pseg,
+		    &nsegs, BUS_DMA_NOWAIT);
+		if (error != 0)
+                        goto fail;
+		bus_dmamap_sync(rxr->ptag,
+		    buf->pmap, BUS_DMASYNC_PREREAD);
+		/* Update descriptor */
+		rxr->base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+		rxr->base[j].read.hdr_addr = 0;
+	}
+
+
+	/* Setup our descriptor indices */
+	rxr->next_check = 0;
+	rxr->next_refresh = 0;
+	rxr->lro_enabled = FALSE;
+	rxr->split = 0;
+	rxr->bytes = 0;
+	rxr->discard = FALSE;
+
+	wr32(vsi->hw, rxr->tail, que->num_desc - 1);
+	ixl_flush(vsi->hw);
+
+#if defined(INET6) || defined(INET)
+	/*
+	** Now set up the LRO interface:
+	*/
+	if (ifp->if_capenable & IFCAP_LRO) {
+		int err = tcp_lro_init(lro);
+		if (err) {
+			if_printf(ifp, "queue %d: LRO Initialization failed!\n", que->me);
+			goto fail;
+		}
+		INIT_DBG_IF(ifp, "queue %d: RX Soft LRO Initialized", que->me);
+		rxr->lro_enabled = TRUE;
+		lro->ifp = vsi->ifp;
+	}
+#endif
+
+	bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+fail:
+	IXL_RX_UNLOCK(rxr);
+	return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  Free station receive ring data structures
+ *
+ **********************************************************************/
+void
+ixl_free_que_rx(struct ixl_queue *que)
+{
+	struct rx_ring		*rxr = &que->rxr;
+	struct ixl_rx_buf	*buf;
+
+	INIT_DBG_IF(que->vsi->ifp, "queue %d: begin", que->me);
+
+	/* Cleanup any existing buffers */
+	if (rxr->buffers != NULL) {
+		for (int i = 0; i < que->num_desc; i++) {
+			buf = &rxr->buffers[i];
+			if (buf->m_head != NULL) {
+				bus_dmamap_sync(rxr->htag, buf->hmap,
+				    BUS_DMASYNC_POSTREAD);
+				bus_dmamap_unload(rxr->htag, buf->hmap);
+				buf->m_head->m_flags |= M_PKTHDR;
+				m_freem(buf->m_head);
+			}
+			if (buf->m_pack != NULL) {
+				bus_dmamap_sync(rxr->ptag, buf->pmap,
+				    BUS_DMASYNC_POSTREAD);
+				bus_dmamap_unload(rxr->ptag, buf->pmap);
+				buf->m_pack->m_flags |= M_PKTHDR;
+				m_freem(buf->m_pack);
+			}
+			buf->m_head = NULL;
+			buf->m_pack = NULL;
+			if (buf->hmap != NULL) {
+				bus_dmamap_destroy(rxr->htag, buf->hmap);
+				buf->hmap = NULL;
+			}
+			if (buf->pmap != NULL) {
+				bus_dmamap_destroy(rxr->ptag, buf->pmap);
+				buf->pmap = NULL;
+			}
+		}
+		if (rxr->buffers != NULL) {
+			free(rxr->buffers, M_DEVBUF);
+			rxr->buffers = NULL;
+		}
+	}
+
+	if (rxr->htag != NULL) {
+		bus_dma_tag_destroy(rxr->htag);
+		rxr->htag = NULL;
+	}
+	if (rxr->ptag != NULL) {
+		bus_dma_tag_destroy(rxr->ptag);
+		rxr->ptag = NULL;
+	}
+
+	INIT_DBG_IF(que->vsi->ifp, "queue %d: end", que->me);
+	return;
+}
+
+static __inline void
+ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
+{
+
+#if defined(INET6) || defined(INET)
+        /*
+         * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+         * should be computed by hardware. Also it should not have VLAN tag in
+         * ethernet header.
+         */
+        if (rxr->lro_enabled &&
+            (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+            (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+                /*
+                 * Send to the stack if:
+                 **  - LRO not enabled, or
+                 **  - no LRO resources, or
+                 **  - lro enqueue fails
+                 */
+                if (rxr->lro.lro_cnt != 0)
+                        if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                                return;
+        }
+#endif
+	IXL_RX_UNLOCK(rxr);
+        (*ifp->if_input)(ifp, m);
+	IXL_RX_LOCK(rxr);
+}
+
+
+static __inline void
+ixl_rx_discard(struct rx_ring *rxr, int i)
+{
+	struct ixl_rx_buf	*rbuf;
+
+	rbuf = &rxr->buffers[i];
+
+        if (rbuf->fmp != NULL) {/* Partial chain ? */
+		rbuf->fmp->m_flags |= M_PKTHDR;
+                m_freem(rbuf->fmp);
+                rbuf->fmp = NULL;
+	}
+
+	/*
+	** With advanced descriptors the writeback
+	** clobbers the buffer addrs, so its easier
+	** to just free the existing mbufs and take
+	** the normal refresh path to get new buffers
+	** and mapping.
+	*/
+	if (rbuf->m_head) {
+		m_free(rbuf->m_head);
+		rbuf->m_head = NULL;
+	}
+ 
+	if (rbuf->m_pack) {
+		m_free(rbuf->m_pack);
+		rbuf->m_pack = NULL;
+	}
+
+	return;
+}
+
+#ifdef RSS
+/*
+** i40e_ptype_to_hash: parse the packet type
+** to determine the appropriate hash.
+*/
+static inline int
+ixl_ptype_to_hash(u8 ptype)
+{
+        struct i40e_rx_ptype_decoded	decoded;
+	u8				ex = 0;
+
+	decoded = decode_rx_desc_ptype(ptype);
+	ex = decoded.outer_frag;
+
+	if (!decoded.known)
+		return M_HASHTYPE_OPAQUE;
+
+	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_L2) 
+		return M_HASHTYPE_OPAQUE;
+
+	/* Note: anything that gets to this point is IP */
+        if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) { 
+		switch (decoded.inner_prot) {
+			case I40E_RX_PTYPE_INNER_PROT_TCP:
+				if (ex)
+					return M_HASHTYPE_RSS_TCP_IPV6_EX;
+				else
+					return M_HASHTYPE_RSS_TCP_IPV6;
+			case I40E_RX_PTYPE_INNER_PROT_UDP:
+				if (ex)
+					return M_HASHTYPE_RSS_UDP_IPV6_EX;
+				else
+					return M_HASHTYPE_RSS_UDP_IPV6;
+			default:
+				if (ex)
+					return M_HASHTYPE_RSS_IPV6_EX;
+				else
+					return M_HASHTYPE_RSS_IPV6;
+		}
+	}
+        if (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) { 
+		switch (decoded.inner_prot) {
+			case I40E_RX_PTYPE_INNER_PROT_TCP:
+					return M_HASHTYPE_RSS_TCP_IPV4;
+			case I40E_RX_PTYPE_INNER_PROT_UDP:
+				if (ex)
+					return M_HASHTYPE_RSS_UDP_IPV4_EX;
+				else
+					return M_HASHTYPE_RSS_UDP_IPV4;
+			default:
+					return M_HASHTYPE_RSS_IPV4;
+		}
+	}
+	/* We should never get here!! */
+	return M_HASHTYPE_OPAQUE;
+}
+#endif /* RSS */
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+bool
+ixl_rxeof(struct ixl_queue *que, int count)
+{
+	struct ixl_vsi		*vsi = que->vsi;
+	struct rx_ring		*rxr = &que->rxr;
+	struct ifnet		*ifp = vsi->ifp;
+#if defined(INET6) || defined(INET)
+	struct lro_ctrl		*lro = &rxr->lro;
+	struct lro_entry	*queued;
+#endif
+	int			i, nextp, processed = 0;
+	union i40e_rx_desc	*cur;
+	struct ixl_rx_buf	*rbuf, *nbuf;
+
+
+	IXL_RX_LOCK(rxr);
+
+#ifdef DEV_NETMAP
+	if (netmap_rx_irq(ifp, que->me, &count)) {
+		IXL_RX_UNLOCK(rxr);
+		return (FALSE);
+	}
+#endif /* DEV_NETMAP */
+
+	for (i = rxr->next_check; count != 0;) {
+		struct mbuf	*sendmp, *mh, *mp;
+		u32		rsc, status, error;
+		u16		hlen, plen, vtag;
+		u64		qword;
+		u8		ptype;
+		bool		eop;
+ 
+		/* Sync the ring. */
+		bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+		cur = &rxr->base[i];
+		qword = le64toh(cur->wb.qword1.status_error_len);
+		status = (qword & I40E_RXD_QW1_STATUS_MASK)
+		    >> I40E_RXD_QW1_STATUS_SHIFT;
+		error = (qword & I40E_RXD_QW1_ERROR_MASK)
+		    >> I40E_RXD_QW1_ERROR_SHIFT;
+		plen = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK)
+		    >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+		hlen = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK)
+		    >> I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
+		ptype = (qword & I40E_RXD_QW1_PTYPE_MASK)
+		    >> I40E_RXD_QW1_PTYPE_SHIFT;
+
+		if ((status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) == 0) {
+			++rxr->not_done;
+			break;
+		}
+		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+			break;
+
+		count--;
+		sendmp = NULL;
+		nbuf = NULL;
+		rsc = 0;
+		cur->wb.qword1.status_error_len = 0;
+		rbuf = &rxr->buffers[i];
+		mh = rbuf->m_head;
+		mp = rbuf->m_pack;
+		eop = (status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT));
+		if (status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT))
+			vtag = le16toh(cur->wb.qword0.lo_dword.l2tag1);
+		else
+			vtag = 0;
+
+		/*
+		** Make sure bad packets are discarded,
+		** note that only EOP descriptor has valid
+		** error results.
+		*/
+                if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+			rxr->discarded++;
+			ixl_rx_discard(rxr, i);
+			goto next_desc;
+		}
+
+		/* Prefetch the next buffer */
+		if (!eop) {
+			nextp = i + 1;
+			if (nextp == que->num_desc)
+				nextp = 0;
+			nbuf = &rxr->buffers[nextp];
+			prefetch(nbuf);
+		}
+
+		/*
+		** The header mbuf is ONLY used when header 
+		** split is enabled, otherwise we get normal 
+		** behavior, ie, both header and payload
+		** are DMA'd into the payload buffer.
+		**
+		** Rather than using the fmp/lmp global pointers
+		** we now keep the head of a packet chain in the
+		** buffer struct and pass this along from one
+		** descriptor to the next, until we get EOP.
+		*/
+		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+			if (hlen > IXL_RX_HDR)
+				hlen = IXL_RX_HDR;
+			mh->m_len = hlen;
+			mh->m_flags |= M_PKTHDR;
+			mh->m_next = NULL;
+			mh->m_pkthdr.len = mh->m_len;
+			/* Null buf pointer so it is refreshed */
+			rbuf->m_head = NULL;
+			/*
+			** Check the payload length, this
+			** could be zero if its a small
+			** packet.
+			*/
+			if (plen > 0) {
+				mp->m_len = plen;
+				mp->m_next = NULL;
+				mp->m_flags &= ~M_PKTHDR;
+				mh->m_next = mp;
+				mh->m_pkthdr.len += mp->m_len;
+				/* Null buf pointer so it is refreshed */
+				rbuf->m_pack = NULL;
+				rxr->split++;
+			}
+			/*
+			** Now create the forward
+			** chain so when complete 
+			** we wont have to.
+			*/
+                        if (eop == 0) {
+				/* stash the chain head */
+                                nbuf->fmp = mh;
+				/* Make forward chain */
+                                if (plen)
+                                        mp->m_next = nbuf->m_pack;
+                                else
+                                        mh->m_next = nbuf->m_pack;
+                        } else {
+				/* Singlet, prepare to send */
+                                sendmp = mh;
+                                if (vtag) {
+                                        sendmp->m_pkthdr.ether_vtag = vtag;
+                                        sendmp->m_flags |= M_VLANTAG;
+                                }
+                        }
+		} else {
+			/*
+			** Either no header split, or a
+			** secondary piece of a fragmented
+			** split packet.
+			*/
+			mp->m_len = plen;
+			/*
+			** See if there is a stored head
+			** that determines what we are
+			*/
+			sendmp = rbuf->fmp;
+			rbuf->m_pack = rbuf->fmp = NULL;
+
+			if (sendmp != NULL) /* secondary frag */
+				sendmp->m_pkthdr.len += mp->m_len;
+			else {
+				/* first desc of a non-ps chain */
+				sendmp = mp;
+				sendmp->m_flags |= M_PKTHDR;
+				sendmp->m_pkthdr.len = mp->m_len;
+				if (vtag) {
+					sendmp->m_pkthdr.ether_vtag = vtag;
+					sendmp->m_flags |= M_VLANTAG;
+				}
+                        }
+			/* Pass the head pointer on */
+			if (eop == 0) {
+				nbuf->fmp = sendmp;
+				sendmp = NULL;
+				mp->m_next = nbuf->m_pack;
+			}
+		}
+		++processed;
+		/* Sending this frame? */
+		if (eop) {
+			sendmp->m_pkthdr.rcvif = ifp;
+			/* gather stats */
+			rxr->rx_packets++;
+			rxr->rx_bytes += sendmp->m_pkthdr.len;
+			/* capture data for dynamic ITR adjustment */
+			rxr->packets++;
+			rxr->bytes += sendmp->m_pkthdr.len;
+			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+				ixl_rx_checksum(sendmp, status, error, ptype);
+#ifdef RSS
+			sendmp->m_pkthdr.flowid =
+			    le32toh(cur->wb.qword0.hi_dword.rss);
+			M_HASHTYPE_SET(sendmp, ixl_ptype_to_hash(ptype));
+#else
+			sendmp->m_pkthdr.flowid = que->msix;
+			M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
+#endif
+		}
+next_desc:
+		bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+		/* Advance our pointers to the next descriptor. */
+		if (++i == que->num_desc)
+			i = 0;
+
+		/* Now send to the stack or do LRO */
+		if (sendmp != NULL) {
+			rxr->next_check = i;
+			ixl_rx_input(rxr, ifp, sendmp, ptype);
+			i = rxr->next_check;
+		}
+
+               /* Every 8 descriptors we go to refresh mbufs */
+		if (processed == 8) {
+			ixl_refresh_mbufs(que, i);
+			processed = 0;
+		}
+	}
+
+	/* Refresh any remaining buf structs */
+	if (ixl_rx_unrefreshed(que))
+		ixl_refresh_mbufs(que, i);
+
+	rxr->next_check = i;
+
+#if defined(INET6) || defined(INET)
+	/*
+	 * Flush any outstanding LRO work
+	 */
+	while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+		SLIST_REMOVE_HEAD(&lro->lro_active, next);
+		tcp_lro_flush(lro, queued);
+	}
+#endif
+
+	IXL_RX_UNLOCK(rxr);
+	return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixl_rx_checksum(struct mbuf * mp, u32 status, u32 error, u8 ptype)
+{
+	struct i40e_rx_ptype_decoded decoded;
+
+	decoded = decode_rx_desc_ptype(ptype);
+
+	/* Errors? */
+ 	if (error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+	    (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) {
+		mp->m_pkthdr.csum_flags = 0;
+		return;
+	}
+
+	/* IPv6 with extension headers likely have bad csum */
+	if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+	    decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+		if (status &
+		    (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) {
+			mp->m_pkthdr.csum_flags = 0;
+			return;
+		}
+
+ 
+	/* IP Checksum Good */
+	mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+	mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+	if (status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)) {
+		mp->m_pkthdr.csum_flags |= 
+		    (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+		mp->m_pkthdr.csum_data |= htons(0xffff);
+	}
+	return;
+}
+
+#if __FreeBSD_version >= 1100000
+uint64_t
+ixl_get_counter(if_t ifp, ift_counter cnt)
+{
+	struct ixl_vsi *vsi;
+
+	vsi = if_getsoftc(ifp);
+
+	switch (cnt) {
+	case IFCOUNTER_IPACKETS:
+		return (vsi->ipackets);
+	case IFCOUNTER_IERRORS:
+		return (vsi->ierrors);
+	case IFCOUNTER_OPACKETS:
+		return (vsi->opackets);
+	case IFCOUNTER_OERRORS:
+		return (vsi->oerrors);
+	case IFCOUNTER_COLLISIONS:
+		/* Collisions are by standard impossible in 40G/10G Ethernet */
+		return (0);
+	case IFCOUNTER_IBYTES:
+		return (vsi->ibytes);
+	case IFCOUNTER_OBYTES:
+		return (vsi->obytes);
+	case IFCOUNTER_IMCASTS:
+		return (vsi->imcasts);
+	case IFCOUNTER_OMCASTS:
+		return (vsi->omcasts);
+	case IFCOUNTER_IQDROPS:
+		return (vsi->iqdrops);
+	case IFCOUNTER_OQDROPS:
+		return (vsi->oqdrops);
+	case IFCOUNTER_NOPROTO:
+		return (vsi->noproto);
+	default:
+		return (if_get_counter_default(ifp, cnt));
+	}
+}
+#endif
+


Property changes on: trunk/sys/dev/ixl/ixl_txrx.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixlv.h
===================================================================
--- trunk/sys/dev/ixl/ixlv.h	                        (rev 0)
+++ trunk/sys/dev/ixl/ixlv.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,214 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixlv.h 292100 2015-12-11 13:08:38Z smh $*/
+
+
+#ifndef _IXLV_H_
+#define _IXLV_H_
+
+#include "ixlv_vc_mgr.h"
+
+#define IXLV_AQ_MAX_ERR		1000
+#define IXLV_MAX_FILTERS	128
+#define IXLV_MAX_QUEUES		16
+#define IXLV_AQ_TIMEOUT		(1 * hz)
+#define IXLV_CALLOUT_TIMO	(hz / 50)	/* 20 msec */
+
+#define IXLV_FLAG_AQ_ENABLE_QUEUES            (u32)(1)
+#define IXLV_FLAG_AQ_DISABLE_QUEUES           (u32)(1 << 1)
+#define IXLV_FLAG_AQ_ADD_MAC_FILTER           (u32)(1 << 2)
+#define IXLV_FLAG_AQ_ADD_VLAN_FILTER          (u32)(1 << 3)
+#define IXLV_FLAG_AQ_DEL_MAC_FILTER           (u32)(1 << 4)
+#define IXLV_FLAG_AQ_DEL_VLAN_FILTER          (u32)(1 << 5)
+#define IXLV_FLAG_AQ_CONFIGURE_QUEUES         (u32)(1 << 6)
+#define IXLV_FLAG_AQ_MAP_VECTORS              (u32)(1 << 7)
+#define IXLV_FLAG_AQ_HANDLE_RESET             (u32)(1 << 8)
+#define IXLV_FLAG_AQ_CONFIGURE_PROMISC        (u32)(1 << 9)
+#define IXLV_FLAG_AQ_GET_STATS                (u32)(1 << 10)
+
+/* printf %b arg */
+#define IXLV_FLAGS \
+    "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \
+    "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
+    "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
+    "\12CONFIGURE_PROMISC\13GET_STATS"
+
+/* Hack for compatibility with 1.0.x linux pf driver */
+#define I40E_VIRTCHNL_OP_EVENT 17
+
+/* Driver state */
+enum ixlv_state_t {
+	IXLV_START,
+	IXLV_FAILED,
+	IXLV_RESET_REQUIRED,
+	IXLV_RESET_PENDING,
+	IXLV_VERSION_CHECK,
+	IXLV_GET_RESOURCES,
+	IXLV_INIT_READY,
+	IXLV_INIT_START,
+	IXLV_INIT_CONFIG,
+	IXLV_INIT_MAPPING,
+	IXLV_INIT_ENABLE,
+	IXLV_INIT_COMPLETE,
+	IXLV_RUNNING,	
+};
+
+struct ixlv_mac_filter {
+	SLIST_ENTRY(ixlv_mac_filter)  next;
+	u8      macaddr[ETHER_ADDR_LEN];
+	u16     flags;
+};
+SLIST_HEAD(mac_list, ixlv_mac_filter);
+
+struct ixlv_vlan_filter {
+	SLIST_ENTRY(ixlv_vlan_filter)  next;
+	u16     vlan;
+	u16     flags;
+};
+SLIST_HEAD(vlan_list, ixlv_vlan_filter);
+
+/* Software controller structure */
+struct ixlv_sc {
+	struct i40e_hw		hw;
+	struct i40e_osdep	osdep;
+	struct device		*dev;
+
+	struct resource		*pci_mem;
+	struct resource		*msix_mem;
+
+	enum ixlv_state_t	init_state;
+
+	/*
+	 * Interrupt resources
+	 */
+	void			*tag;
+	struct resource 	*res; /* For the AQ */
+
+	struct ifmedia		media;
+	struct callout		timer;
+	int			msix;
+	int			pf_version;
+	int			if_flags;
+
+	bool			link_up;
+	u32			link_speed;
+
+	struct mtx		mtx;
+
+	u32			qbase;
+	u32 			admvec;
+	struct timeout_task	timeout;
+	struct task     	aq_irq;
+	struct task     	aq_sched;
+	struct taskqueue	*tq;
+
+	struct ixl_vsi		vsi;
+
+	/* Filter lists */
+	struct mac_list		*mac_filters;
+	struct vlan_list	*vlan_filters;
+
+	/* Promiscuous mode */
+	u32			promiscuous_flags;
+
+	/* Admin queue task flags */
+	u32			aq_wait_count;
+
+	struct ixl_vc_mgr	vc_mgr;
+	struct ixl_vc_cmd	add_mac_cmd;
+	struct ixl_vc_cmd	del_mac_cmd;
+	struct ixl_vc_cmd	config_queues_cmd;
+	struct ixl_vc_cmd	map_vectors_cmd;
+	struct ixl_vc_cmd	enable_queues_cmd;
+	struct ixl_vc_cmd	add_vlan_cmd;
+	struct ixl_vc_cmd	del_vlan_cmd;
+	struct ixl_vc_cmd	add_multi_cmd;
+	struct ixl_vc_cmd	del_multi_cmd;
+
+	/* Virtual comm channel */
+	struct i40e_virtchnl_vf_resource *vf_res;
+	struct i40e_virtchnl_vsi_resource *vsi_res;
+
+	/* Misc stats maintained by the driver */
+	u64			watchdog_events;
+	u64			admin_irq;
+
+	u8			aq_buffer[IXL_AQ_BUF_SZ];
+};
+
+#define IXLV_CORE_LOCK_ASSERT(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
+/*
+** This checks for a zero mac addr, something that will be likely
+** unless the Admin on the Host has created one.
+*/
+static inline bool
+ixlv_check_ether_addr(u8 *addr)
+{
+	bool status = TRUE;
+
+	if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 &&
+	    addr[3] == 0 && addr[4]== 0 && addr[5] == 0))
+		status = FALSE;
+	return (status);
+}
+
+/*
+** VF Common function prototypes
+*/
+int	ixlv_send_api_ver(struct ixlv_sc *);
+int	ixlv_verify_api_ver(struct ixlv_sc *);
+int	ixlv_send_vf_config_msg(struct ixlv_sc *);
+int	ixlv_get_vf_config(struct ixlv_sc *);
+void	ixlv_init(void *);
+int	ixlv_reinit_locked(struct ixlv_sc *);
+void	ixlv_configure_queues(struct ixlv_sc *);
+void	ixlv_enable_queues(struct ixlv_sc *);
+void	ixlv_disable_queues(struct ixlv_sc *);
+void	ixlv_map_queues(struct ixlv_sc *);
+void	ixlv_enable_intr(struct ixl_vsi *);
+void	ixlv_disable_intr(struct ixl_vsi *);
+void	ixlv_add_ether_filters(struct ixlv_sc *);
+void	ixlv_del_ether_filters(struct ixlv_sc *);
+void	ixlv_request_stats(struct ixlv_sc *);
+void	ixlv_request_reset(struct ixlv_sc *);
+void	ixlv_vc_completion(struct ixlv_sc *,
+	enum i40e_virtchnl_ops, i40e_status, u8 *, u16);
+void	ixlv_add_ether_filter(struct ixlv_sc *);
+void	ixlv_add_vlans(struct ixlv_sc *);
+void	ixlv_del_vlans(struct ixlv_sc *);
+void	ixlv_update_stats_counters(struct ixlv_sc *,
+		    struct i40e_eth_stats *);
+void	ixlv_update_link_status(struct ixlv_sc *);
+
+#endif /* _IXLV_H_ */


Property changes on: trunk/sys/dev/ixl/ixlv.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixlv_vc_mgr.h
===================================================================
--- trunk/sys/dev/ixl/ixlv_vc_mgr.h	                        (rev 0)
+++ trunk/sys/dev/ixl/ixlv_vc_mgr.h	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,77 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2014, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixlv_vc_mgr.h 274360 2014-11-10 23:56:06Z jfv $*/
+
+#ifndef _IXLV_VC_MGR_H_
+#define _IXLV_VC_MGR_H_
+
+#include <sys/queue.h>
+
+struct ixl_vc_cmd;
+
+typedef void ixl_vc_callback_t(struct ixl_vc_cmd *, void *,
+	enum i40e_status_code);
+
+
+#define	IXLV_VC_CMD_FLAG_BUSY		0x0001
+
+struct ixl_vc_cmd
+{
+	uint32_t request;
+	uint32_t flags;
+
+	ixl_vc_callback_t *callback;
+	void *arg;
+
+	TAILQ_ENTRY(ixl_vc_cmd) next;
+};
+
+struct ixl_vc_mgr
+{
+	struct ixlv_sc *sc;
+	struct ixl_vc_cmd *current;
+	struct callout callout;	
+
+	TAILQ_HEAD(, ixl_vc_cmd) pending;
+};
+
+#define	IXLV_VC_TIMEOUT			(2 * hz)
+
+void	ixl_vc_init_mgr(struct ixlv_sc *, struct ixl_vc_mgr *);
+void	ixl_vc_enqueue(struct ixl_vc_mgr *, struct ixl_vc_cmd *,
+	    uint32_t, ixl_vc_callback_t *, void *);
+void	ixl_vc_flush(struct ixl_vc_mgr *mgr);
+
+#endif
+


Property changes on: trunk/sys/dev/ixl/ixlv_vc_mgr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/ixl/ixlvc.c
===================================================================
--- trunk/sys/dev/ixl/ixlvc.c	                        (rev 0)
+++ trunk/sys/dev/ixl/ixlvc.c	2018-05-27 23:00:05 UTC (rev 10060)
@@ -0,0 +1,1134 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2013-2015, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: stable/10/sys/dev/ixl/ixlvc.c 292100 2015-12-11 13:08:38Z smh $*/
+
+/*
+**	Virtual Channel support
+**		These are support functions to communication
+**		between the VF and PF drivers.
+*/
+
+#include "ixl.h"
+#include "ixlv.h"
+#include "i40e_prototype.h"
+
+
+/* busy wait delay in msec */
+#define IXLV_BUSY_WAIT_DELAY 10
+#define IXLV_BUSY_WAIT_COUNT 50
+
+static void	ixl_vc_process_resp(struct ixl_vc_mgr *, uint32_t,
+		    enum i40e_status_code);
+static void	ixl_vc_process_next(struct ixl_vc_mgr *mgr);
+static void	ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr);
+static void	ixl_vc_send_current(struct ixl_vc_mgr *mgr);
+
+#ifdef IXL_DEBUG
+/*
+** Validate VF messages
+*/
+static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
+    u8 *msg, u16 msglen)
+{
+	bool err_msg_format = false;
+	int valid_len;
+
+	/* Validate message length. */
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_VERSION:
+		valid_len = sizeof(struct i40e_virtchnl_version_info);
+		break;
+	case I40E_VIRTCHNL_OP_RESET_VF:
+	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+		valid_len = 0;
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_txq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+		valid_len = sizeof(struct i40e_virtchnl_rxq_info);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_vsi_queue_config_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vsi_queue_config_info *vqc =
+			    (struct i40e_virtchnl_vsi_queue_config_info *)msg;
+			valid_len += (vqc->num_queue_pairs *
+				      sizeof(struct
+					     i40e_virtchnl_queue_pair_info));
+			if (vqc->num_queue_pairs == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		valid_len = sizeof(struct i40e_virtchnl_irq_map_info);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_irq_map_info *vimi =
+			    (struct i40e_virtchnl_irq_map_info *)msg;
+			valid_len += (vimi->num_vectors *
+				      sizeof(struct i40e_virtchnl_vector_map));
+			if (vimi->num_vectors == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		valid_len = sizeof(struct i40e_virtchnl_ether_addr_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_ether_addr_list *veal =
+			    (struct i40e_virtchnl_ether_addr_list *)msg;
+			valid_len += veal->num_elements *
+			    sizeof(struct i40e_virtchnl_ether_addr);
+			if (veal->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		valid_len = sizeof(struct i40e_virtchnl_vlan_filter_list);
+		if (msglen >= valid_len) {
+			struct i40e_virtchnl_vlan_filter_list *vfl =
+			    (struct i40e_virtchnl_vlan_filter_list *)msg;
+			valid_len += vfl->num_elements * sizeof(u16);
+			if (vfl->num_elements == 0)
+				err_msg_format = true;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		valid_len = sizeof(struct i40e_virtchnl_promisc_info);
+		break;
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		valid_len = sizeof(struct i40e_virtchnl_queue_select);
+		break;
+	/* These are always errors coming from the VF. */
+	case I40E_VIRTCHNL_OP_EVENT:
+	case I40E_VIRTCHNL_OP_UNKNOWN:
+	default:
+		return EPERM;
+		break;
+	}
+	/* few more checks */
+	if ((valid_len != msglen) || (err_msg_format))
+		return EINVAL;
+	else
+		return 0;
+}
+#endif
+
+/*
+** ixlv_send_pf_msg
+**
+** Send message to PF and print status if failure.
+*/
+static int
+ixlv_send_pf_msg(struct ixlv_sc *sc,
+	enum i40e_virtchnl_ops op, u8 *msg, u16 len)
+{
+	struct i40e_hw	*hw = &sc->hw;
+	device_t	dev = sc->dev;
+	i40e_status	err;
+
+#ifdef IXL_DEBUG
+	/*
+	** Pre-validating messages to the PF
+	*/
+	int val_err;
+	val_err = ixl_vc_validate_vf_msg(sc, op, msg, len);
+	if (val_err)
+		device_printf(dev, "Error validating msg to PF for op %d,"
+		    " msglen %d: error %d\n", op, len, val_err);
+#endif
+
+	err = i40e_aq_send_msg_to_pf(hw, op, I40E_SUCCESS, msg, len, NULL);
+	if (err)
+		device_printf(dev, "Unable to send opcode %d to PF, "
+		    "error %d, aq status %d\n", op, err, hw->aq.asq_last_status);
+	return err;
+}
+
+
+/*
+** ixlv_send_api_ver
+**
+** Send API version admin queue message to the PF. The reply is not checked
+** in this function. Returns 0 if the message was successfully
+** sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+*/
+int
+ixlv_send_api_ver(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_version_info vvi;
+
+	vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
+	vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
+
+	return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_VERSION,
+	    (u8 *)&vvi, sizeof(vvi));
+}
+
+/*
+** ixlv_verify_api_ver
+**
+** Compare API versions with the PF. Must be called after admin queue is
+** initialized. Returns 0 if API versions match, EIO if
+** they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+*/
+int
+ixlv_verify_api_ver(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_version_info *pf_vvi;
+	struct i40e_hw *hw = &sc->hw;
+	struct i40e_arq_event_info event;
+	i40e_status err;
+	int retries = 0;
+
+	event.buf_len = IXL_AQ_BUFSZ;
+	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
+	if (!event.msg_buf) {
+		err = ENOMEM;
+		goto out;
+	}
+
+	do {
+		if (++retries > IXLV_AQ_MAX_ERR)
+			goto out_alloc;
+
+		/* NOTE: initial delay is necessary */
+		i40e_msec_delay(100);
+		err = i40e_clean_arq_element(hw, &event, NULL);
+	} while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
+	if (err)
+		goto out_alloc;
+
+	err = (i40e_status)le32toh(event.desc.cookie_low);
+	if (err) {
+		err = EIO;
+		goto out_alloc;
+	}
+
+	if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
+	    I40E_VIRTCHNL_OP_VERSION) {
+		DDPRINTF(sc->dev, "Received unexpected op response: %d\n",
+		    le32toh(event.desc.cookie_high));
+		err = EIO;
+		goto out_alloc;
+	}
+
+	pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
+	if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+	    ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+	    (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
+		err = EIO;
+	else
+		sc->pf_version = pf_vvi->minor;
+
+out_alloc:
+	free(event.msg_buf, M_DEVBUF);
+out:
+	return err;
+}
+
+/*
+** ixlv_send_vf_config_msg
+**
+** Send VF configuration request admin queue message to the PF. The reply
+** is not checked in this function. Returns 0 if the message was
+** successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
+*/
+int
+ixlv_send_vf_config_msg(struct ixlv_sc *sc)
+{
+	u32	caps;
+
+	caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+	    I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+	    I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+	if (sc->pf_version)
+		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+				  (u8 *)&caps, sizeof(caps));
+	else
+		return ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+				  NULL, 0);
+}
+
+/*
+** ixlv_get_vf_config
+**
+** Get VF configuration from PF and populate hw structure. Must be called after
+** admin queue is initialized. Busy waits until response is received from PF,
+** with maximum timeout. Response from PF is returned in the buffer for further
+** processing by the caller.
+*/
+int
+ixlv_get_vf_config(struct ixlv_sc *sc)
+{
+	struct i40e_hw	*hw = &sc->hw;
+	device_t	dev = sc->dev;
+	struct i40e_arq_event_info event;
+	u16 len;
+	i40e_status err = 0;
+	u32 retries = 0;
+
+	/* Note this assumes a single VSI */
+	len = sizeof(struct i40e_virtchnl_vf_resource) +
+	    sizeof(struct i40e_virtchnl_vsi_resource);
+	event.buf_len = len;
+	event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
+	if (!event.msg_buf) {
+		err = ENOMEM;
+		goto out;
+	}
+
+	for (;;) {
+		err = i40e_clean_arq_element(hw, &event, NULL);
+		if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+			if (++retries <= IXLV_AQ_MAX_ERR)
+				i40e_msec_delay(10);
+		} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
+		    I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
+			DDPRINTF(dev, "Received a response from PF,"
+			    " opcode %d, error %d",
+			    le32toh(event.desc.cookie_high),
+			    le32toh(event.desc.cookie_low));
+			retries++;
+			continue;
+		} else {
+			err = (i40e_status)le32toh(event.desc.cookie_low);
+			if (err) {
+				device_printf(dev, "%s: Error returned from PF,"
+				    " opcode %d, error %d\n", __func__,
+				    le32toh(event.desc.cookie_high),
+				    le32toh(event.desc.cookie_low));
+				err = EIO;
+				goto out_alloc;
+			}
+			/* We retrieved the config message, with no errors */
+			break;
+		}
+
+		if (retries > IXLV_AQ_MAX_ERR) {
+			INIT_DBG_DEV(dev, "Did not receive response after %d tries.",
+			    retries);
+			err = ETIMEDOUT;
+			goto out_alloc;
+		}
+	}
+
+	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
+	i40e_vf_parse_hw_config(hw, sc->vf_res);
+
+out_alloc:
+	free(event.msg_buf, M_DEVBUF);
+out:
+	return err;
+}
+
+/*
+** ixlv_configure_queues
+**
+** Request that the PF set up our queues.
+*/
+void
+ixlv_configure_queues(struct ixlv_sc *sc)
+{
+	device_t		dev = sc->dev;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ixl_queue	*que = vsi->queues;
+	struct tx_ring		*txr;
+	struct rx_ring		*rxr;
+	int			len, pairs;
+
+	struct i40e_virtchnl_vsi_queue_config_info *vqci;
+	struct i40e_virtchnl_queue_pair_info *vqpi;
+
+	pairs = vsi->num_queues;
+	len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
+		       (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
+	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (!vqci) {
+		device_printf(dev, "%s: unable to allocate memory\n", __func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+	vqci->vsi_id = sc->vsi_res->vsi_id;
+	vqci->num_queue_pairs = pairs;
+	vqpi = vqci->qpair;
+	/* Size check is not needed here - HW max is 16 queue pairs, and we
+	 * can fit info for 31 of them into the AQ buffer before it overflows.
+	 */
+	for (int i = 0; i < pairs; i++, que++, vqpi++) {
+		txr = &que->txr;
+		rxr = &que->rxr;
+		vqpi->txq.vsi_id = vqci->vsi_id;
+		vqpi->txq.queue_id = i;
+		vqpi->txq.ring_len = que->num_desc;
+		vqpi->txq.dma_ring_addr = txr->dma.pa;
+		/* Enable Head writeback */
+		vqpi->txq.headwb_enabled = 1;
+		vqpi->txq.dma_headwb_addr = txr->dma.pa +
+		    (que->num_desc * sizeof(struct i40e_tx_desc));
+
+		vqpi->rxq.vsi_id = vqci->vsi_id;
+		vqpi->rxq.queue_id = i;
+		vqpi->rxq.ring_len = que->num_desc;
+		vqpi->rxq.dma_ring_addr = rxr->dma.pa;
+		vqpi->rxq.max_pkt_size = vsi->max_frame_size;
+		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
+		vqpi->rxq.splithdr_enabled = 0;
+	}
+
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+			   (u8 *)vqci, len);
+	free(vqci, M_DEVBUF);
+}
+
+/*
+** ixlv_enable_queues
+**
+** Request that the PF enable all of our queues.
+*/
+void
+ixlv_enable_queues(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_queue_select vqs;
+
+	vqs.vsi_id = sc->vsi_res->vsi_id;
+	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+}
+
+/*
+** ixlv_disable_queues
+**
+** Request that the PF disable all of our queues.
+*/
+void
+ixlv_disable_queues(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_queue_select vqs;
+
+	vqs.vsi_id = sc->vsi_res->vsi_id;
+	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
+	vqs.rx_queues = vqs.tx_queues;
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+			   (u8 *)&vqs, sizeof(vqs));
+}
+
+/*
+** ixlv_map_queues
+**
+** Request that the PF map queues to interrupt vectors. Misc causes, including
+** admin queue, are always mapped to vector 0.
+*/
+void
+ixlv_map_queues(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_irq_map_info *vm;
+	int 			i, q, len;
+	struct ixl_vsi		*vsi = &sc->vsi;
+	struct ixl_queue	*que = vsi->queues;
+
+	/* How many queue vectors, adminq uses one */
+	q = sc->msix - 1;
+
+	len = sizeof(struct i40e_virtchnl_irq_map_info) +
+	      (sc->msix * sizeof(struct i40e_virtchnl_vector_map));
+	vm = malloc(len, M_DEVBUF, M_NOWAIT);
+	if (!vm) {
+		printf("%s: unable to allocate memory\n", __func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+
+	vm->num_vectors = sc->msix;
+	/* Queue vectors first */
+	for (i = 0; i < q; i++, que++) {
+		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
+		vm->vecmap[i].txq_map = (1 << que->me);
+		vm->vecmap[i].rxq_map = (1 << que->me);
+		vm->vecmap[i].rxitr_idx = 0;
+		vm->vecmap[i].txitr_idx = 0;
+	}
+
+	/* Misc vector last - this is only for AdminQ messages */
+	vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
+	vm->vecmap[i].vector_id = 0;
+	vm->vecmap[i].txq_map = 0;
+	vm->vecmap[i].rxq_map = 0;
+	vm->vecmap[i].rxitr_idx = 0;
+	vm->vecmap[i].txitr_idx = 0;
+
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+	    (u8 *)vm, len);
+	free(vm, M_DEVBUF);
+}
+
+/*
+** Scan the Filter List looking for vlans that need
+** to be added, then create the data to hand to the AQ
+** for handling.
+*/
+void
+ixlv_add_vlans(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_vlan_filter_list	*v;
+	struct ixlv_vlan_filter *f, *ftmp;
+	device_t	dev = sc->dev;
+	int		len, i = 0, cnt = 0;
+
+	/* Get count of VLAN filters to add */
+	SLIST_FOREACH(f, sc->vlan_filters, next) {
+		if (f->flags & IXL_FILTER_ADD)
+			cnt++;
+	}
+
+	if (!cnt) {  /* no work... */
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+		    I40E_SUCCESS);
+		return;
+	}
+
+	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+	      (cnt * sizeof(u16));
+
+	if (len > IXL_AQ_BUF_SZ) {
+		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+			__func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+
+	v = malloc(len, M_DEVBUF, M_NOWAIT);
+	if (!v) {
+		device_printf(dev, "%s: unable to allocate memory\n",
+			__func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+
+	v->vsi_id = sc->vsi_res->vsi_id;
+	v->num_elements = cnt;
+
+	/* Scan the filter array */
+	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+                if (f->flags & IXL_FILTER_ADD) {
+                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+			f->flags = IXL_FILTER_USED;
+                        i++;
+                }
+                if (i == cnt)
+                        break;
+	}
+	// ERJ: Should this be taken out?
+ 	if (i == 0) { /* Should not happen... */
+		device_printf(dev, "%s: i == 0?\n", __func__);
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+		    I40E_SUCCESS);
+		return;
+ 	}
+
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
+	free(v, M_DEVBUF);
+	/* add stats? */
+}
+
+/*
+** Scan the Filter Table looking for vlans that need
+** to be removed, then create the data to hand to the AQ
+** for handling.
+*/
+void
+ixlv_del_vlans(struct ixlv_sc *sc)
+{
+	device_t	dev = sc->dev;
+	struct i40e_virtchnl_vlan_filter_list *v;
+	struct ixlv_vlan_filter *f, *ftmp;
+	int len, i = 0, cnt = 0;
+
+	/* Get count of VLAN filters to delete */
+	SLIST_FOREACH(f, sc->vlan_filters, next) {
+		if (f->flags & IXL_FILTER_DEL)
+			cnt++;
+	}
+
+	if (!cnt) {  /* no work... */
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+		    I40E_SUCCESS);
+		return;
+	}
+
+	len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
+	      (cnt * sizeof(u16));
+
+	if (len > IXL_AQ_BUF_SZ) {
+		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
+			__func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+
+	v = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (!v) {
+		device_printf(dev, "%s: unable to allocate memory\n",
+			__func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+
+	v->vsi_id = sc->vsi_res->vsi_id;
+	v->num_elements = cnt;
+
+	/* Scan the filter array */
+	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
+                if (f->flags & IXL_FILTER_DEL) {
+                        bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
+                        i++;
+                        SLIST_REMOVE(sc->vlan_filters, f, ixlv_vlan_filter, next);
+                        free(f, M_DEVBUF);
+                }
+                if (i == cnt)
+                        break;
+	}
+	// ERJ: Take this out?
+ 	if (i == 0) { /* Should not happen... */
+		device_printf(dev, "%s: i == 0?\n", __func__);
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+		    I40E_SUCCESS);
+		return;
+ 	}
+
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
+	free(v, M_DEVBUF);
+	/* add stats? */
+}
+
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+void
+ixlv_add_ether_filters(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_ether_addr_list *a;
+	struct ixlv_mac_filter	*f;
+	device_t			dev = sc->dev;
+	int				len, j = 0, cnt = 0;
+
+	/* Get count of MAC addresses to add */
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if (f->flags & IXL_FILTER_ADD)
+			cnt++;
+	}
+	if (cnt == 0) { /* Should not happen... */
+		DDPRINTF(dev, "cnt == 0, exiting...");
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
+		    I40E_SUCCESS);
+		return;
+	}
+
+	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+
+	a = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (a == NULL) {
+		device_printf(dev, "%s: Failed to get memory for "
+		    "virtchnl_ether_addr_list\n", __func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+	a->vsi_id = sc->vsi.id;
+	a->num_elements = cnt;
+
+	/* Scan the filter array */
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if (f->flags & IXL_FILTER_ADD) {
+			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
+			f->flags &= ~IXL_FILTER_ADD;
+			j++;
+
+			DDPRINTF(dev, "ADD: " MAC_FORMAT,
+			    MAC_FORMAT_ARGS(f->macaddr));
+		}
+		if (j == cnt)
+			break;
+	}
+	DDPRINTF(dev, "len %d, j %d, cnt %d",
+	    len, j, cnt);
+	ixlv_send_pf_msg(sc,
+	    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, (u8 *)a, len);
+	/* add stats? */
+	free(a, M_DEVBUF);
+	return;
+}
+
+/*
+** This routine takes filters flagged for deletion in the
+** sc MAC filter list and creates an Admin Queue call
+** to delete those filters in the hardware.
+*/
+void
+ixlv_del_ether_filters(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_ether_addr_list *d;
+	device_t			dev = sc->dev;
+	struct ixlv_mac_filter	*f, *f_temp;
+	int				len, j = 0, cnt = 0;
+
+	/* Get count of MAC addresses to delete */
+	SLIST_FOREACH(f, sc->mac_filters, next) {
+		if (f->flags & IXL_FILTER_DEL)
+			cnt++;
+	}
+	if (cnt == 0) {
+		DDPRINTF(dev, "cnt == 0, exiting...");
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
+		    I40E_SUCCESS);
+		return;
+	}
+
+	len = sizeof(struct i40e_virtchnl_ether_addr_list) +
+	    (cnt * sizeof(struct i40e_virtchnl_ether_addr));
+
+	d = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
+	if (d == NULL) {
+		device_printf(dev, "%s: Failed to get memory for "
+		    "virtchnl_ether_addr_list\n", __func__);
+		ixl_vc_schedule_retry(&sc->vc_mgr);
+		return;
+	}
+	d->vsi_id = sc->vsi.id;
+	d->num_elements = cnt;
+
+	/* Scan the filter array */
+	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
+		if (f->flags & IXL_FILTER_DEL) {
+			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
+			DDPRINTF(dev, "DEL: " MAC_FORMAT,
+			    MAC_FORMAT_ARGS(f->macaddr));
+			j++;
+			SLIST_REMOVE(sc->mac_filters, f, ixlv_mac_filter, next);
+			free(f, M_DEVBUF);
+		}
+		if (j == cnt)
+			break;
+	}
+	ixlv_send_pf_msg(sc,
+	    I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS, (u8 *)d, len);
+	/* add stats? */
+	free(d, M_DEVBUF);
+	return;
+}
+
+/*
+** ixlv_request_reset
+** Request that the PF reset this VF. No response is expected.
+*/
+void
+ixlv_request_reset(struct ixlv_sc *sc)
+{
+	/*
+	** Set the reset status to "in progress" before
+	** the request, this avoids any possibility of
+	** a mistaken early detection of completion.
+	*/
+	wr32(&sc->hw, I40E_VFGEN_RSTAT, I40E_VFR_INPROGRESS);
+	ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+}
+
+/*
+** ixlv_request_stats
+** Request the statistics for this VF's VSI from PF.
+*/
+void
+ixlv_request_stats(struct ixlv_sc *sc)
+{
+	struct i40e_virtchnl_queue_select vqs;
+	int error = 0;
+
+	vqs.vsi_id = sc->vsi_res->vsi_id;
+	/* Low priority, we don't need to error check */
+	error = ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_GET_STATS,
+	    (u8 *)&vqs, sizeof(vqs));
+#ifdef IXL_DEBUG
+	if (error)
+		device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
+#endif
+}
+
+/*
+** Updates driver's stats counters with VSI stats returned from PF.
+*/
+void
+ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
+{
+	struct ixl_vsi *vsi = &sc->vsi;
+	uint64_t tx_discards;
+
+	tx_discards = es->tx_discards;
+	for (int i = 0; i < vsi->num_queues; i++)
+		tx_discards += sc->vsi.queues[i].txr.br->br_drops;
+
+	/* Update ifnet stats */
+	IXL_SET_IPACKETS(vsi, es->rx_unicast +
+	                   es->rx_multicast +
+			   es->rx_broadcast);
+	IXL_SET_OPACKETS(vsi, es->tx_unicast +
+	                   es->tx_multicast +
+			   es->tx_broadcast);
+	IXL_SET_IBYTES(vsi, es->rx_bytes);
+	IXL_SET_OBYTES(vsi, es->tx_bytes);
+	IXL_SET_IMCASTS(vsi, es->rx_multicast);
+	IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+	IXL_SET_OERRORS(vsi, es->tx_errors);
+	IXL_SET_IQDROPS(vsi, es->rx_discards);
+	IXL_SET_OQDROPS(vsi, tx_discards);
+	IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+	IXL_SET_COLLISIONS(vsi, 0);
+
+	vsi->eth_stats = *es;
+}
+
+/*
+** ixlv_vc_completion
+**
+** Asynchronous completion function for admin queue messages. Rather than busy
+** wait, we fire off our requests and assume that no errors will be returned.
+** This function handles the reply messages.
+*/
+void
+ixlv_vc_completion(struct ixlv_sc *sc,
+    enum i40e_virtchnl_ops v_opcode,
+    i40e_status v_retval, u8 *msg, u16 msglen)
+{
+	device_t	dev = sc->dev;
+	struct ixl_vsi	*vsi = &sc->vsi;
+
+	if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
+		struct i40e_virtchnl_pf_event *vpe =
+			(struct i40e_virtchnl_pf_event *)msg;
+
+		switch (vpe->event) {
+		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
+#ifdef IXL_DEBUG
+			device_printf(dev, "Link change: status %d, speed %d\n",
+			    vpe->event_data.link_event.link_status,
+			    vpe->event_data.link_event.link_speed);
+#endif
+			sc->link_up =
+				vpe->event_data.link_event.link_status;
+			sc->link_speed =
+				vpe->event_data.link_event.link_speed;
+			ixlv_update_link_status(sc);
+			break;
+		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
+			device_printf(dev, "PF initiated reset!\n");
+			sc->init_state = IXLV_RESET_PENDING;
+			ixlv_init(sc);
+			break;
+		default:
+			device_printf(dev, "%s: Unknown event %d from AQ\n",
+				__func__, vpe->event);
+			break;
+		}
+
+		return;
+	}
+
+	/* Catch-all error response */
+	if (v_retval) {
+		device_printf(dev,
+		    "%s: AQ returned error %d to our request %d!\n",
+		    __func__, v_retval, v_opcode);
+	}
+
+#ifdef IXL_DEBUG
+	if (v_opcode != I40E_VIRTCHNL_OP_GET_STATS)
+		DDPRINTF(dev, "opcode %d", v_opcode);
+#endif
+
+	switch (v_opcode) {
+	case I40E_VIRTCHNL_OP_GET_STATS:
+		ixlv_update_stats_counters(sc, (struct i40e_eth_stats *)msg);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_MAC_FILTER,
+		    v_retval);
+		if (v_retval) {
+			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
+			device_printf(dev, "WARNING: Device may not receive traffic!\n");
+		}
+		break;
+	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_MAC_FILTER,
+		    v_retval);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_PROMISC,
+		    v_retval);
+		break;
+	case I40E_VIRTCHNL_OP_ADD_VLAN:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
+		    v_retval);
+		break;
+	case I40E_VIRTCHNL_OP_DEL_VLAN:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
+		    v_retval);
+		break;
+	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ENABLE_QUEUES,
+		    v_retval);
+		if (v_retval == 0) {
+			/* Update link status */
+			ixlv_update_link_status(sc);
+			/* Turn on all interrupts */
+			ixlv_enable_intr(vsi);
+			/* And inform the stack we're ready */
+			vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+			vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+		}
+		break;
+	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DISABLE_QUEUES,
+		    v_retval);
+		if (v_retval == 0) {
+			/* Turn off all interrupts */
+			ixlv_disable_intr(vsi);
+			/* Tell the stack that the interface is no longer active */
+			vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+		}
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIGURE_QUEUES,
+		    v_retval);
+		break;
+	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+		ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
+		    v_retval);
+		break;
+	default:
+		device_printf(dev,
+		    "%s: Received unexpected message %d from PF.\n",
+		    __func__, v_opcode);
+		break;
+	}
+	return;
+}
+
+static void
+ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
+{
+
+	switch (request) {
+	case IXLV_FLAG_AQ_MAP_VECTORS:
+		ixlv_map_queues(sc);
+		break;
+
+	case IXLV_FLAG_AQ_ADD_MAC_FILTER:
+		ixlv_add_ether_filters(sc);
+		break;
+
+	case IXLV_FLAG_AQ_ADD_VLAN_FILTER:
+		ixlv_add_vlans(sc);
+		break;
+
+	case IXLV_FLAG_AQ_DEL_MAC_FILTER:
+		ixlv_del_ether_filters(sc);
+		break;
+
+	case IXLV_FLAG_AQ_DEL_VLAN_FILTER:
+		ixlv_del_vlans(sc);
+		break;
+
+	case IXLV_FLAG_AQ_CONFIGURE_QUEUES:
+		ixlv_configure_queues(sc);
+		break;
+
+	case IXLV_FLAG_AQ_DISABLE_QUEUES:
+		ixlv_disable_queues(sc);
+		break;
+
+	case IXLV_FLAG_AQ_ENABLE_QUEUES:
+		ixlv_enable_queues(sc);
+		break;
+	}
+}
+
+void
+ixl_vc_init_mgr(struct ixlv_sc *sc, struct ixl_vc_mgr *mgr)
+{
+	mgr->sc = sc;
+	mgr->current = NULL;
+	TAILQ_INIT(&mgr->pending);
+	callout_init_mtx(&mgr->callout, &sc->mtx, 0);
+}
+
+static void
+ixl_vc_process_completion(struct ixl_vc_mgr *mgr, enum i40e_status_code err)
+{
+	struct ixl_vc_cmd *cmd;
+
+	cmd = mgr->current;
+	mgr->current = NULL;
+	cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+
+	cmd->callback(cmd, cmd->arg, err);
+	ixl_vc_process_next(mgr);
+}
+
+static void
+ixl_vc_process_resp(struct ixl_vc_mgr *mgr, uint32_t request,
+    enum i40e_status_code err)
+{
+	struct ixl_vc_cmd *cmd;
+
+	cmd = mgr->current;
+	if (cmd == NULL || cmd->request != request)
+		return;
+
+	callout_stop(&mgr->callout);
+	ixl_vc_process_completion(mgr, err);
+}
+
+static void
+ixl_vc_cmd_timeout(void *arg)
+{
+	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
+
+	IXLV_CORE_LOCK_ASSERT(mgr->sc);
+	ixl_vc_process_completion(mgr, I40E_ERR_TIMEOUT);
+}
+
+static void
+ixl_vc_cmd_retry(void *arg)
+{
+	struct ixl_vc_mgr *mgr = (struct ixl_vc_mgr *)arg;
+
+	IXLV_CORE_LOCK_ASSERT(mgr->sc);
+	ixl_vc_send_current(mgr);
+}
+
+static void
+ixl_vc_send_current(struct ixl_vc_mgr *mgr)
+{
+	struct ixl_vc_cmd *cmd;
+
+	cmd = mgr->current;
+	ixl_vc_send_cmd(mgr->sc, cmd->request);
+	callout_reset(&mgr->callout, IXLV_VC_TIMEOUT, ixl_vc_cmd_timeout, mgr);
+}
+
+static void
+ixl_vc_process_next(struct ixl_vc_mgr *mgr)
+{
+	struct ixl_vc_cmd *cmd;
+
+	if (mgr->current != NULL)
+		return;
+
+	if (TAILQ_EMPTY(&mgr->pending))
+		return;
+
+	cmd = TAILQ_FIRST(&mgr->pending);
+	TAILQ_REMOVE(&mgr->pending, cmd, next);
+
+	mgr->current = cmd;
+	ixl_vc_send_current(mgr);
+}
+
+static void
+ixl_vc_schedule_retry(struct ixl_vc_mgr *mgr)
+{
+
+	callout_reset(&mgr->callout, howmany(hz, 100), ixl_vc_cmd_retry, mgr);
+}
+
+void
+ixl_vc_enqueue(struct ixl_vc_mgr *mgr, struct ixl_vc_cmd *cmd,
+	    uint32_t req, ixl_vc_callback_t *callback, void *arg)
+{
+	IXLV_CORE_LOCK_ASSERT(mgr->sc);
+
+	if (cmd->flags & IXLV_VC_CMD_FLAG_BUSY) {
+		if (mgr->current == cmd)
+			mgr->current = NULL;
+		else
+			TAILQ_REMOVE(&mgr->pending, cmd, next);
+	}
+
+	cmd->request = req;
+	cmd->callback = callback;
+	cmd->arg = arg;
+	cmd->flags |= IXLV_VC_CMD_FLAG_BUSY;
+	TAILQ_INSERT_TAIL(&mgr->pending, cmd, next);
+
+	ixl_vc_process_next(mgr);
+}
+
+void
+ixl_vc_flush(struct ixl_vc_mgr *mgr)
+{
+	struct ixl_vc_cmd *cmd;
+
+	IXLV_CORE_LOCK_ASSERT(mgr->sc);
+	KASSERT(TAILQ_EMPTY(&mgr->pending) || mgr->current != NULL,
+	    ("ixlv: pending commands waiting but no command in progress"));
+
+	cmd = mgr->current;
+	if (cmd != NULL) {
+		mgr->current = NULL;
+		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
+	}
+
+	while ((cmd = TAILQ_FIRST(&mgr->pending)) != NULL) {
+		TAILQ_REMOVE(&mgr->pending, cmd, next);
+		cmd->flags &= ~IXLV_VC_CMD_FLAG_BUSY;
+		cmd->callback(cmd, cmd->arg, I40E_ERR_ADAPTER_STOPPED);
+	}
+
+	callout_stop(&mgr->callout);
+}
+


Property changes on: trunk/sys/dev/ixl/ixlvc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property


More information about the Midnightbsd-cvs mailing list