[Midnightbsd-cvs] src [6559] U trunk/sys/dev/ixgbe: update ixgbe(4) based on freebsd 9.2

laffer1 at midnightbsd.org laffer1 at midnightbsd.org
Sat Dec 28 09:30:04 EST 2013


Revision: 6559
          http://svnweb.midnightbsd.org/src/?rev=6559
Author:   laffer1
Date:     2013-12-28 09:30:04 -0500 (Sat, 28 Dec 2013)
Log Message:
-----------
update ixgbe(4) based on freebsd 9.2

Modified Paths:
--------------
    trunk/sys/dev/ixgbe/LICENSE
    trunk/sys/dev/ixgbe/README
    trunk/sys/dev/ixgbe/ixgbe.c
    trunk/sys/dev/ixgbe/ixgbe.h
    trunk/sys/dev/ixgbe/ixgbe_82598.c
    trunk/sys/dev/ixgbe/ixgbe_82598.h
    trunk/sys/dev/ixgbe/ixgbe_82599.c
    trunk/sys/dev/ixgbe/ixgbe_82599.h
    trunk/sys/dev/ixgbe/ixgbe_api.c
    trunk/sys/dev/ixgbe/ixgbe_api.h
    trunk/sys/dev/ixgbe/ixgbe_common.c
    trunk/sys/dev/ixgbe/ixgbe_common.h
    trunk/sys/dev/ixgbe/ixgbe_mbx.c
    trunk/sys/dev/ixgbe/ixgbe_mbx.h
    trunk/sys/dev/ixgbe/ixgbe_osdep.h
    trunk/sys/dev/ixgbe/ixgbe_phy.c
    trunk/sys/dev/ixgbe/ixgbe_phy.h
    trunk/sys/dev/ixgbe/ixgbe_type.h
    trunk/sys/dev/ixgbe/ixgbe_vf.c
    trunk/sys/dev/ixgbe/ixgbe_vf.h
    trunk/sys/dev/ixgbe/ixgbe_x540.c
    trunk/sys/dev/ixgbe/ixgbe_x540.h
    trunk/sys/dev/ixgbe/ixv.c
    trunk/sys/dev/ixgbe/ixv.h

Added Paths:
-----------
    trunk/sys/dev/ixgbe/ixgbe_dcb.c
    trunk/sys/dev/ixgbe/ixgbe_dcb.h
    trunk/sys/dev/ixgbe/ixgbe_dcb_82598.c
    trunk/sys/dev/ixgbe/ixgbe_dcb_82598.h
    trunk/sys/dev/ixgbe/ixgbe_dcb_82599.c
    trunk/sys/dev/ixgbe/ixgbe_dcb_82599.h

Property Changed:
----------------
    trunk/sys/dev/ixgbe/LICENSE
    trunk/sys/dev/ixgbe/README
    trunk/sys/dev/ixgbe/ixgbe.c
    trunk/sys/dev/ixgbe/ixgbe.h
    trunk/sys/dev/ixgbe/ixgbe_82598.c
    trunk/sys/dev/ixgbe/ixgbe_82598.h
    trunk/sys/dev/ixgbe/ixgbe_82599.c
    trunk/sys/dev/ixgbe/ixgbe_82599.h
    trunk/sys/dev/ixgbe/ixgbe_api.c
    trunk/sys/dev/ixgbe/ixgbe_api.h
    trunk/sys/dev/ixgbe/ixgbe_common.c
    trunk/sys/dev/ixgbe/ixgbe_common.h
    trunk/sys/dev/ixgbe/ixgbe_mbx.c
    trunk/sys/dev/ixgbe/ixgbe_mbx.h
    trunk/sys/dev/ixgbe/ixgbe_osdep.h
    trunk/sys/dev/ixgbe/ixgbe_phy.c
    trunk/sys/dev/ixgbe/ixgbe_phy.h
    trunk/sys/dev/ixgbe/ixgbe_type.h
    trunk/sys/dev/ixgbe/ixgbe_vf.c
    trunk/sys/dev/ixgbe/ixgbe_vf.h
    trunk/sys/dev/ixgbe/ixgbe_x540.c
    trunk/sys/dev/ixgbe/ixgbe_x540.h
    trunk/sys/dev/ixgbe/ixv.c
    trunk/sys/dev/ixgbe/ixv.h

Modified: trunk/sys/dev/ixgbe/LICENSE
===================================================================
--- trunk/sys/dev/ixgbe/LICENSE	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/LICENSE	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,6 @@
 /******************************************************************************
 
-  Copyright (c) 2001-2011, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,4 +30,4 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/LICENSE 248287 2013-03-14 21:39:39Z jfv $*/


Property changes on: trunk/sys/dev/ixgbe/LICENSE
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/README
===================================================================
--- trunk/sys/dev/ixgbe/README	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/README	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,8 +1,8 @@
 FreeBSD Driver for Intel(R) Ethernet 10 Gigabit PCI Express Server Adapters
 ============================================================================
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/README 252898 2013-07-06 21:38:55Z jfv $*/
 
-November 12, 2010
+Jun 18, 2013
 
 
 Contents
@@ -18,8 +18,8 @@
 Overview
 ========
 
-This file describes the FreeBSD* driver for the Intel(R) Ethernet 10 Gigabit 
-Family of Adapters.  Driver has been developed for use with FreeBSD 7.2 or later.
+This file describes the FreeBSD* driver for the
+Intel(R) Ethernet 10 Gigabit Family of Adapters.
 
 For questions related to hardware requirements, refer to the documentation
 supplied with your Intel 10GbE adapter.  All hardware requirements listed
@@ -42,7 +42,7 @@
 Intel optics and/or the direct attach cables listed below.
 
 When 82599-based SFP+ devices are connected back to back, they should be set to
-the same Speed setting via Ethtool. Results may vary if you mix speed settings. 
+the same Speed setting. Results may vary if you mix speed settings. 
  
 Supplier    Type                                             Part Numbers
 
@@ -70,7 +70,12 @@
 Avago	   DUAL RATE 1G/10G SFP+ LR (No Bail)	             AFCT-701SDZ-IN1
 Finistar   1000BASE-T SFP                                    FCLF8522P2BTL
 Avago      1000BASE-T SFP                                    ABCU-5710RZ
-		
+
+NOTE: As of driver version 2.5.13 it is possible to allow the operation
+of unsupported modules by setting the static variable 'allow_unsupported_sfp'
+to TRUE and rebuilding the driver. If problems occur please assure that they
+can be reproduced with fully supported optics first.
+
 82599-based adapters support all passive and active limiting direct attach 
 cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
 
@@ -224,14 +229,7 @@
   When there is a choice run on a 64bit OS rather than 32, it makes a 
   significant difference in improvement.
   
-  The default scheduler SCHED_4BSD is not smart about SMP locality issues. 
-  Significant improvement can be achieved by switching to the ULE scheduler.
-
-  This is done by changing the entry in the config file from SCHED_4BSD to 
-  SCHED_ULE. Note that this is only advisable on FreeBSD 7, on 6.X there have
-  been stability problems with ULE.
-
-  The interface can generate high number of interrupts. To avoid running 
+  The interface can generate a high number of interrupts. To avoid running 
   into the limit set by the kernel, adjust hw.intr_storm_threshold 
   setting using sysctl:
  
@@ -242,14 +240,12 @@
        hw.intr_storm_threshold=9000
 
   If you still see Interrupt Storm detected messages, increase the limit to a
-  higher number.
+  higher number, or the detection can be disabled by setting it to 0.
 
-  Best throughput results are seen with a large MTU; use 9000 if possible. 
+  The default number of descriptors is 2048, increasing or descreasing
+  may improve performance in some workloads, but change carefully.
 
-  The default number of descriptors is 1024, increasing this to 2K or even 
-  4K may improve performance in some workloads, but change carefully.
 
-
 Known Limitations
 =================
 
@@ -284,7 +280,7 @@
   ----------------------------------------------------------
   Some PCI-E x8 slots are actually configured as x4 slots. These slots have 
   insufficient bandwidth for full 10Gbe line rate with dual port 10GbE devices.
-  The driver can detect this situation and will write the following message in
+  The driver will detect this situation and will write the following message in
   the system log: "PCI-Express bandwidth available for this card is not 
   sufficient for optimal performance. For optimal performance a x8 PCI-Express 
   slot is required."


Property changes on: trunk/sys/dev/ixgbe/README
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,13 +31,11 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe.c 254573 2013-08-20 17:50:30Z jfv $*/
 
-#ifdef HAVE_KERNEL_OPTION_HEADERS
+
 #include "opt_inet.h"
 #include "opt_inet6.h"
-#endif
-
 #include "ixgbe.h"
 
 /*********************************************************************
@@ -47,7 +46,7 @@
 /*********************************************************************
  *  Driver version
  *********************************************************************/
-char ixgbe_driver_version[] = "2.4.8";
+char ixgbe_driver_version[] = "2.5.15";
 
 /*********************************************************************
  *  PCI Device ID Table
@@ -83,7 +82,7 @@
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
-	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, 0, 0, 0},
+	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, 0, 0, 0},
 	{IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
 	/* required last entry */
 	{0, 0, 0, 0, 0}
@@ -104,14 +103,15 @@
 static int      ixgbe_attach(device_t);
 static int      ixgbe_detach(device_t);
 static int      ixgbe_shutdown(device_t);
+#ifdef IXGBE_LEGACY_TX
 static void     ixgbe_start(struct ifnet *);
 static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
-#if __FreeBSD_version >= 800000
+#else /* ! IXGBE_LEGACY_TX */
 static int	ixgbe_mq_start(struct ifnet *, struct mbuf *);
-static int	ixgbe_mq_start_locked(struct ifnet *,
-                    struct tx_ring *, struct mbuf *);
+static int	ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *);
 static void	ixgbe_qflush(struct ifnet *);
-#endif
+static void	ixgbe_deferred_mq_start(void *, int);
+#endif /* IXGBE_LEGACY_TX */
 static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
 static void	ixgbe_init(void *);
 static void	ixgbe_init_locked(struct adapter *);
@@ -120,6 +120,7 @@
 static int      ixgbe_media_change(struct ifnet *);
 static void     ixgbe_identify_hardware(struct adapter *);
 static int      ixgbe_allocate_pci_resources(struct adapter *);
+static void	ixgbe_get_slot_info(struct ixgbe_hw *);
 static int      ixgbe_allocate_msix(struct adapter *);
 static int      ixgbe_allocate_legacy(struct adapter *);
 static int	ixgbe_allocate_queues(struct adapter *);
@@ -147,8 +148,8 @@
 static void     ixgbe_enable_intr(struct adapter *);
 static void     ixgbe_disable_intr(struct adapter *);
 static void     ixgbe_update_stats_counters(struct adapter *);
-static bool	ixgbe_txeof(struct tx_ring *);
-static bool	ixgbe_rxeof(struct ix_queue *, int);
+static void	ixgbe_txeof(struct tx_ring *);
+static bool	ixgbe_rxeof(struct ix_queue *);
 static void	ixgbe_rx_checksum(u32, struct mbuf *, u32);
 static void     ixgbe_set_promisc(struct adapter *);
 static void     ixgbe_set_multi(struct adapter *);
@@ -161,10 +162,10 @@
 static int	ixgbe_dma_malloc(struct adapter *, bus_size_t,
 		    struct ixgbe_dma_alloc *, int);
 static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
-static void	ixgbe_add_rx_process_limit(struct adapter *, const char *,
-		    const char *, int *, int);
-static bool	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
-static bool	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static int	ixgbe_tx_ctx_setup(struct tx_ring *,
+		    struct mbuf *, u32 *, u32 *);
+static int	ixgbe_tso_setup(struct tx_ring *,
+		    struct mbuf *, u32 *, u32 *);
 static void	ixgbe_set_ivar(struct adapter *, u8, u8, s8);
 static void	ixgbe_configure_ivars(struct adapter *);
 static u8 *	ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
@@ -179,6 +180,9 @@
 static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
 		    struct mbuf *, u32);
 
+static void	ixgbe_enable_rx_drop(struct adapter *);
+static void	ixgbe_disable_rx_drop(struct adapter *);
+
 /* Support for pluggable optic modules */
 static bool	ixgbe_sfp_probe(struct adapter *);
 static void	ixgbe_setup_optics(struct adapter *);
@@ -201,6 +205,9 @@
 static void	ixgbe_reinit_fdir(void *, int);
 #endif
 
+/* Missing shared code prototype */
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+
 /*********************************************************************
  *  FreeBSD Device Interface Entry Points
  *********************************************************************/
@@ -211,7 +218,7 @@
 	DEVMETHOD(device_attach, ixgbe_attach),
 	DEVMETHOD(device_detach, ixgbe_detach),
 	DEVMETHOD(device_shutdown, ixgbe_shutdown),
-	{0, 0}
+	DEVMETHOD_END
 };
 
 static driver_t ixgbe_driver = {
@@ -241,9 +248,13 @@
 TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
 
 /* How many packets rxeof tries to clean at a time */
-static int ixgbe_rx_process_limit = 128;
+static int ixgbe_rx_process_limit = 256;
 TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
 
+/* How many packets txeof tries to clean at a time */
+static int ixgbe_tx_process_limit = 256;
+TUNABLE_INT("hw.ixgbe.tx_process_limit", &ixgbe_tx_process_limit);
+
 /*
 ** Smart speed setting, default to on
 ** this only works as a compile option
@@ -261,15 +272,6 @@
 TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
 
 /*
- * Header split: this causes the hardware to DMA
- * the header into a separate mbuf from the payload,
- * it can be a performance win in some workloads, but
- * in others it actually hurts, its off by default. 
- */
-static int ixgbe_header_split = FALSE;
-TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
-
-/*
  * Number of Queues, can be set to 0,
  * it then autoconfigures based on the
  * number of cpus with a max of 8. This
@@ -290,6 +292,28 @@
 static int ixgbe_rxd = PERFORM_RXD;
 TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
 
+/*
+** Defining this on will allow the use
+** of unsupported SFP+ modules, note that
+** doing so you are on your own :)
+*/
+static int allow_unsupported_sfp = FALSE;
+TUNABLE_INT("hw.ixgbe.unsupported_sfp", &allow_unsupported_sfp);
+
+/*
+** HW RSC control: 
+**  this feature only works with
+**  IPv4, and only on 82599 and later.
+**  Also this will cause IP forwarding to
+**  fail and that can't be controlled by
+**  the stack as LRO can. For all these
+**  reasons I've deemed it best to leave
+**  this off and not bother with a tuneable
+**  interface, this would need to be compiled
+**  to enable.
+*/
+static bool ixgbe_rsc_enable = FALSE;
+
 /* Keep running tab on them for sanity check */
 static int ixgbe_total_ports;
 
@@ -399,11 +423,6 @@
 
 	INIT_DEBUGOUT("ixgbe_attach: begin");
 
-	if (resource_disabled("ixgbe", device_get_unit(dev))) {
-		device_printf(dev, "Disabled by device hint\n");
-		return (ENXIO);
-	}
-
 	/* Allocate, clear, and link in our adapter structure */
 	adapter = device_get_softc(dev);
 	adapter->dev = adapter->osdep.dev = dev;
@@ -498,6 +517,7 @@
 	}
 
 	/* Initialize the shared code */
+	hw->allow_unsupported_sfp = allow_unsupported_sfp;
 	error = ixgbe_init_shared_code(hw);
 	if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
 		/*
@@ -536,7 +556,6 @@
 	case IXGBE_ERR_SFP_NOT_SUPPORTED:
 		device_printf(dev,"Unsupported SFP+ Module\n");
 		error = EIO;
-		device_printf(dev,"Hardware Initialization Failure\n");
 		goto err_late;
 	case IXGBE_ERR_SFP_NOT_PRESENT:
 		device_printf(dev,"No SFP+ Module found\n");
@@ -559,11 +578,6 @@
 	if (ixgbe_setup_interface(dev, adapter) != 0)
 		goto err_late;
 
-	/* Sysctl for limiting the amount of work done in the taskqueue */
-	ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
-	    "max number of rx packets to process", &adapter->rx_process_limit,
-	    ixgbe_rx_process_limit);
-
 	/* Initialize statistics */
 	ixgbe_update_stats_counters(adapter);
 
@@ -573,24 +587,13 @@
 	adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
 	    ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
 
-        /* Print PCIE bus type/speed/width info */
-	ixgbe_get_bus_info(hw);
-	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
-	    ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
-	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
-	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
-	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
-	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
-	    ("Unknown"));
+        /*
+	** Check PCIE slot type/speed/width
+	*/
+	ixgbe_get_slot_info(hw);
 
-	if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
-	    (hw->bus.speed == ixgbe_bus_speed_2500)) {
-		device_printf(dev, "PCI-Express bandwidth available"
-		    " for this card\n     is not sufficient for"
-		    " optimal performance.\n");
-		device_printf(dev, "For optimal performance a x8 "
-		    "PCIE, or x4 PCIE 2 slot is required.\n");
-        }
+	/* Set an initial default flow control value */
+	adapter->fc =  ixgbe_fc_full;
 
 	/* let hardware know driver is loaded */
 	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -631,6 +634,7 @@
 {
 	struct adapter *adapter = device_get_softc(dev);
 	struct ix_queue *que = adapter->queues;
+	struct tx_ring *txr = adapter->tx_rings;
 	u32	ctrl_ext;
 
 	INIT_DEBUGOUT("ixgbe_detach: begin");
@@ -645,8 +649,11 @@
 	ixgbe_stop(adapter);
 	IXGBE_CORE_UNLOCK(adapter);
 
-	for (int i = 0; i < adapter->num_queues; i++, que++) {
+	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
 		if (que->tq) {
+#ifndef IXGBE_LEGACY_TX
+			taskqueue_drain(que->tq, &txr->txq_task);
+#endif
 			taskqueue_drain(que->tq, &que->que_task);
 			taskqueue_free(que->tq);
 		}
@@ -708,6 +715,7 @@
 }
 
 
+#ifdef IXGBE_LEGACY_TX
 /*********************************************************************
  *  Transmit entry point
  *
@@ -726,17 +734,14 @@
 
 	IXGBE_TX_LOCK_ASSERT(txr);
 
-	if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
-	    IFF_DRV_RUNNING)
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 		return;
 	if (!adapter->link_active)
 		return;
 
 	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
-		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE) {
-			txr->queue_status |= IXGBE_QUEUE_DEPLETED;
+		if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
 			break;
-                }
 
 		IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
 		if (m_head == NULL)
@@ -745,8 +750,6 @@
 		if (ixgbe_xmit(txr, &m_head)) {
 			if (m_head != NULL)
 				IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
-			if (txr->tx_avail <= IXGBE_QUEUE_MIN_FREE)
-				txr->queue_status |= IXGBE_QUEUE_DEPLETED;
 			break;
 		}
 		/* Send a copy of the frame to the BPF listener */
@@ -779,7 +782,8 @@
 	return;
 }
 
-#if __FreeBSD_version >= 800000
+#else /* ! IXGBE_LEGACY_TX */
+
 /*
 ** Multiqueue Transmit driver
 **
@@ -790,7 +794,7 @@
 	struct adapter	*adapter = ifp->if_softc;
 	struct ix_queue	*que;
 	struct tx_ring	*txr;
-	int 		i = 0, err = 0;
+	int 		i, err = 0;
 
 	/* Which queue to use */
 	if ((m->m_flags & M_FLOWID) != 0)
@@ -801,68 +805,63 @@
 	txr = &adapter->tx_rings[i];
 	que = &adapter->queues[i];
 
-	if (((txr->queue_status & IXGBE_QUEUE_DEPLETED) == 0) &&
-	    IXGBE_TX_TRYLOCK(txr)) {
-		err = ixgbe_mq_start_locked(ifp, txr, m);
+	err = drbr_enqueue(ifp, txr->br, m);
+	if (err)
+		return (err);
+	if (IXGBE_TX_TRYLOCK(txr)) {
+		err = ixgbe_mq_start_locked(ifp, txr);
 		IXGBE_TX_UNLOCK(txr);
-	} else {
-		err = drbr_enqueue(ifp, txr->br, m);
-		taskqueue_enqueue(que->tq, &que->que_task);
-	}
+	} else
+		taskqueue_enqueue(que->tq, &txr->txq_task);
 
 	return (err);
 }
 
 static int
-ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr)
 {
 	struct adapter  *adapter = txr->adapter;
         struct mbuf     *next;
-        int             enqueued, err = 0;
+        int             enqueued = 0, err = 0;
 
 	if (((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ||
-	    (txr->queue_status == IXGBE_QUEUE_DEPLETED) ||
-	    adapter->link_active == 0) {
-		if (m != NULL)
-			err = drbr_enqueue(ifp, txr->br, m);
-		return (err);
-	}
+	    adapter->link_active == 0)
+		return (ENETDOWN);
 
-	enqueued = 0;
-	if (m == NULL) {
-		next = drbr_dequeue(ifp, txr->br);
-	} else if (drbr_needs_enqueue(ifp, txr->br)) {
-		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
-			return (err);
-		next = drbr_dequeue(ifp, txr->br);
-	} else
-		next = m;
-
 	/* Process the queue */
+#if __FreeBSD_version < 901504
+	next = drbr_dequeue(ifp, txr->br);
 	while (next != NULL) {
 		if ((err = ixgbe_xmit(txr, &next)) != 0) {
 			if (next != NULL)
 				err = drbr_enqueue(ifp, txr->br, next);
+#else
+	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
+		if ((err = ixgbe_xmit(txr, &next)) != 0) {
+			if (next == NULL) {
+				drbr_advance(ifp, txr->br);
+			} else {
+				drbr_putback(ifp, txr->br, next);
+			}
+#endif
 			break;
 		}
+#if __FreeBSD_version >= 901504
+		drbr_advance(ifp, txr->br);
+#endif
 		enqueued++;
-		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
 		/* Send a copy of the frame to the BPF listener */
 		ETHER_BPF_MTAP(ifp, next);
 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
 			break;
-		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
-			ixgbe_txeof(txr);
-		if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
-			txr->queue_status |= IXGBE_QUEUE_DEPLETED;
-			break;
-		}
+#if __FreeBSD_version < 901504
 		next = drbr_dequeue(ifp, txr->br);
+#endif
 	}
 
 	if (enqueued > 0) {
 		/* Set watchdog on */
-		txr->queue_status |= IXGBE_QUEUE_WORKING;
+		txr->queue_status = IXGBE_QUEUE_WORKING;
 		txr->watchdog_time = ticks;
 	}
 
@@ -873,6 +872,22 @@
 }
 
 /*
+ * Called from a taskqueue to drain queued transmit packets.
+ */
+static void
+ixgbe_deferred_mq_start(void *arg, int pending)
+{
+	struct tx_ring *txr = arg;
+	struct adapter *adapter = txr->adapter;
+	struct ifnet *ifp = adapter->ifp;
+
+	IXGBE_TX_LOCK(txr);
+	if (!drbr_empty(ifp, txr->br))
+		ixgbe_mq_start_locked(ifp, txr);
+	IXGBE_TX_UNLOCK(txr);
+}
+
+/*
 ** Flush all ring buffers
 */
 static void
@@ -890,7 +905,7 @@
 	}
 	if_qflush(ifp);
 }
-#endif /* __FreeBSD_version >= 800000 */
+#endif /* IXGBE_LEGACY_TX */
 
 /*********************************************************************
  *  Ioctl entry point
@@ -905,6 +920,7 @@
 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
 {
 	struct adapter	*adapter = ifp->if_softc;
+	struct ixgbe_hw *hw = &adapter->hw;
 	struct ifreq	*ifr = (struct ifreq *) data;
 #if defined(INET) || defined(INET6)
 	struct ifaddr *ifa = (struct ifaddr *)data;
@@ -992,6 +1008,8 @@
 			ifp->if_capenable ^= IFCAP_HWCSUM;
 		if (mask & IFCAP_TSO4)
 			ifp->if_capenable ^= IFCAP_TSO4;
+		if (mask & IFCAP_TSO6)
+			ifp->if_capenable ^= IFCAP_TSO6;
 		if (mask & IFCAP_LRO)
 			ifp->if_capenable ^= IFCAP_LRO;
 		if (mask & IFCAP_VLAN_HWTAGGING)
@@ -1008,7 +1026,22 @@
 		VLAN_CAPABILITIES(ifp);
 		break;
 	}
-
+	case SIOCGI2C:
+	{
+		struct ixgbe_i2c_req	i2c;
+		IOCTL_DEBUGOUT("ioctl: SIOCGI2C (Get I2C Data)");
+		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+		if (error)
+			break;
+		if ((i2c.dev_addr != 0xA0) || (i2c.dev_addr != 0xA2)){
+			error = EINVAL;
+			break;
+		}
+		hw->phy.ops.read_i2c_byte(hw, i2c.offset,
+		    i2c.dev_addr, i2c.data);
+		error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+		break;
+	}
 	default:
 		IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
 		error = ether_ioctl(ifp, command, data);
@@ -1040,7 +1073,7 @@
 	u32		rxdctl, rxctrl;
 
 	mtx_assert(&adapter->core_mtx, MA_OWNED);
-	INIT_DEBUGOUT("ixgbe_init: begin");
+	INIT_DEBUGOUT("ixgbe_init_locked: begin");
 	hw->adapter_stopped = FALSE;
 	ixgbe_stop_adapter(hw);
         callout_stop(&adapter->timer);
@@ -1056,7 +1089,7 @@
 
 	/* Set the various hardware offload abilities */
 	ifp->if_hwassist = 0;
-	if (ifp->if_capenable & IFCAP_TSO4)
+	if (ifp->if_capenable & IFCAP_TSO)
 		ifp->if_hwassist |= CSUM_TSO;
 	if (ifp->if_capenable & IFCAP_TXCSUM) {
 		ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
@@ -1081,7 +1114,7 @@
 
 	/*
 	** Determine the correct mbuf pool
-	** for doing jumbo/headersplit
+	** for doing jumbo frames
 	*/
 	if (adapter->max_frame_size <= 2048)
 		adapter->rx_mbuf_sz = MCLBYTES;
@@ -1145,7 +1178,7 @@
 		 * from the Intel linux driver 3.8.21.
 		 * Prefetching enables tx line rate even with 1 queue.
 		 */
-		txdctl |= (16 << 0) | (1 << 8);
+		txdctl |= (32 << 0) | (1 << 8);
 		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
 	}
 
@@ -1277,7 +1310,7 @@
 			tmp = IXGBE_LOW_DV(frame);
 		hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
 		
-		adapter->fc = hw->fc.requested_mode = ixgbe_fc_full;
+		hw->fc.requested_mode = adapter->fc;
 		hw->fc.pause_time = IXGBE_FC_PAUSE;
 		hw->fc.send_xon = TRUE;
 	}
@@ -1289,7 +1322,6 @@
 
 	/* Now inform the stack we're ready */
 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
-	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 
 	return;
 }
@@ -1352,23 +1384,6 @@
 	}
 }
 
-static inline void
-ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
-{
-	u32 mask;
-
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-		mask = (IXGBE_EIMS_RTX_QUEUE & queues);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-	} else {
-		mask = (queues & 0xFFFFFFFF);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
-		mask = (queues >> 32);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
-	}
-}
-
-
 static void
 ixgbe_handle_que(void *context, int pending)
 {
@@ -1379,25 +1394,24 @@
 	bool		more;
 
 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-		more = ixgbe_rxeof(que, adapter->rx_process_limit);
+		more = ixgbe_rxeof(que);
 		IXGBE_TX_LOCK(txr);
 		ixgbe_txeof(txr);
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 		if (!drbr_empty(ifp, txr->br))
-			ixgbe_mq_start_locked(ifp, txr, NULL);
+			ixgbe_mq_start_locked(ifp, txr);
 #else
 		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
 			ixgbe_start_locked(txr, ifp);
 #endif
 		IXGBE_TX_UNLOCK(txr);
-		if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
-			taskqueue_enqueue(que->tq, &que->que_task);
-			return;
-		}
 	}
 
 	/* Reenable this interrupt */
-	ixgbe_enable_queue(adapter, que->msix);
+	if (que->res != NULL)
+		ixgbe_enable_queue(adapter, que->msix);
+	else
+		ixgbe_enable_intr(adapter);
 	return;
 }
 
@@ -1414,9 +1428,10 @@
 	struct ix_queue *que = arg;
 	struct adapter	*adapter = que->adapter;
 	struct ixgbe_hw	*hw = &adapter->hw;
+	struct ifnet    *ifp = adapter->ifp;
 	struct 		tx_ring *txr = adapter->tx_rings;
-	bool		more_tx, more_rx;
-	u32       	reg_eicr, loop = MAX_LOOP;
+	bool		more;
+	u32       	reg_eicr;
 
 
 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
@@ -1427,17 +1442,19 @@
 		return;
 	}
 
-	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+	more = ixgbe_rxeof(que);
 
 	IXGBE_TX_LOCK(txr);
-	do {
-		more_tx = ixgbe_txeof(txr);
-	} while (loop-- && more_tx);
+	ixgbe_txeof(txr);
+#ifdef IXGBE_LEGACY_TX
+	if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+		ixgbe_start_locked(txr, ifp);
+#else
+	if (!drbr_empty(ifp, txr->br))
+		ixgbe_mq_start_locked(ifp, txr);
+#endif
 	IXGBE_TX_UNLOCK(txr);
 
-	if (more_rx || more_tx)
-		taskqueue_enqueue(que->tq, &que->que_task);
-
 	/* Check for fan failure */
 	if ((hw->phy.media_type == ixgbe_media_type_copper) &&
 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
@@ -1450,7 +1467,10 @@
 	if (reg_eicr & IXGBE_EICR_LSC)
 		taskqueue_enqueue(adapter->tq, &adapter->link_task);
 
-	ixgbe_enable_intr(adapter);
+	if (more)
+		taskqueue_enqueue(que->tq, &que->que_task);
+	else
+		ixgbe_enable_intr(adapter);
 	return;
 }
 
@@ -1465,29 +1485,30 @@
 {
 	struct ix_queue	*que = arg;
 	struct adapter  *adapter = que->adapter;
+	struct ifnet    *ifp = adapter->ifp;
 	struct tx_ring	*txr = que->txr;
 	struct rx_ring	*rxr = que->rxr;
-	bool		more_tx, more_rx;
+	bool		more;
 	u32		newitr = 0;
 
+	/* Protect against spurious interrupts */
+	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+		return;
+
 	ixgbe_disable_queue(adapter, que->msix);
 	++que->irqs;
 
-	more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+	more = ixgbe_rxeof(que);
 
 	IXGBE_TX_LOCK(txr);
-	more_tx = ixgbe_txeof(txr);
-	/*
-	** Make certain that if the stack 
-	** has anything queued the task gets
-	** scheduled to handle it.
-	*/
-#if __FreeBSD_version < 800000
-	if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+	ixgbe_txeof(txr);
+#ifdef IXGBE_LEGACY_TX
+	if (!IFQ_DRV_IS_EMPTY(ifp->if_snd))
+		ixgbe_start_locked(txr, ifp);
 #else
-	if (!drbr_empty(adapter->ifp, txr->br))
+	if (!drbr_empty(ifp, txr->br))
+		ixgbe_mq_start_locked(ifp, txr);
 #endif
-		more_tx = 1;
 	IXGBE_TX_UNLOCK(txr);
 
 	/* Do AIM now? */
@@ -1541,9 +1562,9 @@
         rxr->packets = 0;
 
 no_calc:
-	if (more_tx || more_rx)
+	if (more)
 		taskqueue_enqueue(que->tq, &que->que_task);
-	else /* Reenable this interrupt */
+	else
 		ixgbe_enable_queue(adapter, que->msix);
 	return;
 }
@@ -1560,6 +1581,8 @@
 
 	/* First get the cause */
 	reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+	/* Be sure the queue bits are not cleared */
+	reg_eicr &= ~IXGBE_EICR_RTX_QUEUE;
 	/* Clear interrupt with write */
 	IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
 
@@ -1605,11 +1628,11 @@
 
 	/* Check for over temp condition */
 	if ((hw->mac.type == ixgbe_mac_X540) &&
-	    (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
+	    (reg_eicr & IXGBE_EICR_TS)) {
                 device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
 		    "PHY IS SHUT DOWN!!\n");
                 device_printf(adapter->dev, "System shutdown required\n");
-		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
 	}
 
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
@@ -1648,7 +1671,7 @@
 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
 			break;
 		case IXGBE_LINK_SPEED_1GB_FULL:
-			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
 			break;
 		case IXGBE_LINK_SPEED_10GB_FULL:
 			ifmr->ifm_active |= adapter->optics | IFM_FDX;
@@ -1707,9 +1730,9 @@
 {
 	struct adapter  *adapter = txr->adapter;
 	u32		olinfo_status = 0, cmd_type_len;
-	u32		paylen = 0;
 	int             i, j, error, nsegs;
-	int		first, last = 0;
+	int		first;
+	bool		remap = TRUE;
 	struct mbuf	*m_head;
 	bus_dma_segment_t segs[adapter->num_segs];
 	bus_dmamap_t	map;
@@ -1737,75 +1760,59 @@
 	/*
 	 * Map the packet for DMA.
 	 */
+retry:
 	error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
 	    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
 
-	if (error == EFBIG) {
+	if (__predict_false(error)) {
 		struct mbuf *m;
 
-		m = m_defrag(*m_headp, M_DONTWAIT);
-		if (m == NULL) {
-			adapter->mbuf_defrag_failed++;
-			m_freem(*m_headp);
-			*m_headp = NULL;
-			return (ENOBUFS);
-		}
-		*m_headp = m;
-
-		/* Try it again */
-		error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
-		    *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
-
-		if (error == ENOMEM) {
-			adapter->no_tx_dma_setup++;
+		switch (error) {
+		case EFBIG:
+			/* Try it again? - one try */
+			if (remap == TRUE) {
+				remap = FALSE;
+				m = m_defrag(*m_headp, M_NOWAIT);
+				if (m == NULL) {
+					adapter->mbuf_defrag_failed++;
+					m_freem(*m_headp);
+					*m_headp = NULL;
+					return (ENOBUFS);
+				}
+				*m_headp = m;
+				goto retry;
+			} else
+				return (error);
+		case ENOMEM:
+			txr->no_tx_dma_setup++;
 			return (error);
-		} else if (error != 0) {
-			adapter->no_tx_dma_setup++;
+		default:
+			txr->no_tx_dma_setup++;
 			m_freem(*m_headp);
 			*m_headp = NULL;
 			return (error);
 		}
-	} else if (error == ENOMEM) {
-		adapter->no_tx_dma_setup++;
-		return (error);
-	} else if (error != 0) {
-		adapter->no_tx_dma_setup++;
-		m_freem(*m_headp);
-		*m_headp = NULL;
-		return (error);
 	}
 
 	/* Make certain there are enough descriptors */
 	if (nsegs > txr->tx_avail - 2) {
 		txr->no_desc_avail++;
-		error = ENOBUFS;
-		goto xmit_fail;
+		bus_dmamap_unload(txr->txtag, map);
+		return (ENOBUFS);
 	}
 	m_head = *m_headp;
 
 	/*
 	** Set up the appropriate offload context
-	** this becomes the first descriptor of 
-	** a packet.
+	** this will consume the first descriptor
 	*/
-	if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
-		if (ixgbe_tso_setup(txr, m_head, &paylen)) {
-			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
-			olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
-			olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
-			olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
-			++adapter->tso_tx;
-		} else
-			return (ENXIO);
-	} else if (ixgbe_tx_ctx_setup(txr, m_head))
-		olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
+	if (__predict_false(error)) {
+		if (error == ENOBUFS)
+			*m_headp = NULL;
+		return (error);
+	}
 
-#ifdef IXGBE_IEEE1588
-        /* This is changing soon to an mtag detection */
-        if (we detect this mbuf has a TSTAMP mtag)
-                cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
-#endif
-
 #ifdef IXGBE_FDIR
 	/* Do the flow director magic */
 	if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
@@ -1816,10 +1823,6 @@
 		}
 	}
 #endif
-        /* Record payload length */
-	if (paylen == 0)
-        	olinfo_status |= m_head->m_pkthdr.len <<
-		    IXGBE_ADVTXD_PAYLEN_SHIFT;
 
 	i = txr->next_avail_desc;
 	for (j = 0; j < nsegs; j++) {
@@ -1835,13 +1838,9 @@
 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
 		    cmd_type_len |seglen);
 		txd->read.olinfo_status = htole32(olinfo_status);
-		last = i; /* descriptor that will get completion IRQ */
 
-		if (++i == adapter->num_tx_desc)
+		if (++i == txr->num_desc)
 			i = 0;
-
-		txbuf->m_head = NULL;
-		txbuf->eop_index = -1;
 	}
 
 	txd->read.cmd_type_len |=
@@ -1850,14 +1849,19 @@
 	txr->next_avail_desc = i;
 
 	txbuf->m_head = m_head;
-	/* Swap the dma map between the first and last descriptor */
+	/*
+	** Here we swap the map so the last descriptor,
+	** which gets the completion interrupt has the
+	** real map, and the first descriptor gets the
+	** unused map from this descriptor.
+	*/
 	txr->tx_buffers[first].map = txbuf->map;
 	txbuf->map = map;
 	bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
 
-        /* Set the index of the descriptor that will be marked done */
+        /* Set the EOP descriptor that will be marked done */
         txbuf = &txr->tx_buffers[first];
-	txbuf->eop_index = last;
+	txbuf->eop = txd;
 
         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1870,10 +1874,6 @@
 
 	return (0);
 
-xmit_fail:
-	bus_dmamap_unload(txr->txtag, txbuf->map);
-	return (error);
-
 }
 
 static void
@@ -1881,10 +1881,34 @@
 {
 	u_int32_t       reg_rctl;
 	struct ifnet   *ifp = adapter->ifp;
+	int		mcnt = 0;
 
 	reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
 	reg_rctl &= (~IXGBE_FCTRL_UPE);
-	reg_rctl &= (~IXGBE_FCTRL_MPE);
+	if (ifp->if_flags & IFF_ALLMULTI)
+		mcnt = MAX_NUM_MULTICAST_ADDRESSES;
+	else {
+		struct	ifmultiaddr *ifma;
+#if __FreeBSD_version < 800000
+		IF_ADDR_LOCK(ifp);
+#else
+		if_maddr_rlock(ifp);
+#endif
+		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+			if (ifma->ifma_addr->sa_family != AF_LINK)
+				continue;
+			if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+				break;
+			mcnt++;
+		}
+#if __FreeBSD_version < 800000
+		IF_ADDR_UNLOCK(ifp);
+#else
+		if_maddr_runlock(ifp);
+#endif
+	}
+	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
+		reg_rctl &= (~IXGBE_FCTRL_MPE);
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
 
 	if (ifp->if_flags & IFF_PROMISC) {
@@ -1923,18 +1947,6 @@
 	bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
 	    MAX_NUM_MULTICAST_ADDRESSES);
 
-	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
-	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	if (ifp->if_flags & IFF_PROMISC)
-		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	else if (ifp->if_flags & IFF_ALLMULTI) {
-		fctrl |= IXGBE_FCTRL_MPE;
-		fctrl &= ~IXGBE_FCTRL_UPE;
-	} else
-		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-	
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
-
 #if __FreeBSD_version < 800000
 	IF_ADDR_LOCK(ifp);
 #else
@@ -1943,6 +1955,8 @@
 	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
 		if (ifma->ifma_addr->sa_family != AF_LINK)
 			continue;
+		if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+			break;
 		bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
 		    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
 		    IXGBE_ETH_LENGTH_OF_ADDRESS);
@@ -1954,10 +1968,25 @@
 	if_maddr_runlock(ifp);
 #endif
 
-	update_ptr = mta;
-	ixgbe_update_mc_addr_list(&adapter->hw,
-	    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+	fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+	fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	if (ifp->if_flags & IFF_PROMISC)
+		fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
+	    ifp->if_flags & IFF_ALLMULTI) {
+		fctrl |= IXGBE_FCTRL_MPE;
+		fctrl &= ~IXGBE_FCTRL_UPE;
+	} else
+		fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+	
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
 
+	if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
+		update_ptr = mta;
+		ixgbe_update_mc_addr_list(&adapter->hw,
+		    update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+	}
+
 	return;
 }
 
@@ -1992,13 +2021,11 @@
 {
 	struct adapter	*adapter = arg;
 	device_t	dev = adapter->dev;
-	struct ifnet	*ifp = adapter->ifp;
 	struct ix_queue *que = adapter->queues;
 	struct tx_ring	*txr = adapter->tx_rings;
-	int		hung, busy, paused;
+	int		hung = 0, paused = 0;
 
 	mtx_assert(&adapter->core_mtx, MA_OWNED);
-	hung = busy = paused = 0;
 
 	/* Check for pluggable optics */
 	if (adapter->sfp_probe)
@@ -2017,30 +2044,20 @@
 
 	/*
 	** Check the TX queues status
-	**      - central locked handling of OACTIVE
 	**      - watchdog only if all queues show hung
 	*/          
 	for (int i = 0; i < adapter->num_queues; i++, que++, txr++) {
-		if ((txr->queue_status & IXGBE_QUEUE_HUNG) &&
+		if ((txr->queue_status == IXGBE_QUEUE_HUNG) &&
 		    (paused == 0))
 			++hung;
-		if (txr->queue_status & IXGBE_QUEUE_DEPLETED)
-			++busy;
-		if ((txr->queue_status & IXGBE_QUEUE_IDLE) == 0)
-			taskqueue_enqueue(que->tq, &que->que_task);
+		else if (txr->queue_status == IXGBE_QUEUE_WORKING)
+			taskqueue_enqueue(que->tq, &txr->txq_task);
         }
 	/* Only truely watchdog if all queues show hung */
         if (hung == adapter->num_queues)
                 goto watchdog;
-	/* Only turn off the stack flow when ALL are depleted */
-        if (busy == adapter->num_queues)
-                ifp->if_drv_flags |= IFF_DRV_OACTIVE;
-        else if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) &&
-            (busy < adapter->num_queues))
-                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
 
 out:
-	ixgbe_rearm_queues(adapter, adapter->que_mask);
 	callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
 	return;
 
@@ -2066,7 +2083,6 @@
 ixgbe_update_link_status(struct adapter *adapter)
 {
 	struct ifnet	*ifp = adapter->ifp;
-	struct tx_ring *txr = adapter->tx_rings;
 	device_t dev = adapter->dev;
 
 
@@ -2087,9 +2103,6 @@
 				device_printf(dev,"Link is Down\n");
 			if_link_state_change(ifp, LINK_STATE_DOWN);
 			adapter->link_active = FALSE;
-			for (int i = 0; i < adapter->num_queues;
-			    i++, txr++)
-				txr->queue_status = IXGBE_QUEUE_IDLE;
 		}
 	}
 
@@ -2120,15 +2133,19 @@
 
 	/* Let the stack know...*/
 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
-	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 
 	ixgbe_reset_hw(hw);
 	hw->adapter_stopped = FALSE;
 	ixgbe_stop_adapter(hw);
-	/* Turn off the laser */
-	if (hw->phy.multispeed_fiber)
-		ixgbe_disable_tx_laser(hw);
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		ixgbe_stop_mac_link_on_d3_82599(hw);
+	/* Turn off the laser - noop with no optics */
+	ixgbe_disable_tx_laser(hw);
 
+	/* Update the stack */
+	adapter->link_up = FALSE;
+       	ixgbe_update_link_status(adapter);
+
 	/* reprogram the RAR[0] in case user changed it. */
 	ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
 
@@ -2179,7 +2196,7 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int		layer;
-	
+
 	layer = ixgbe_get_supported_physical_layer(hw);
 
 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
@@ -2192,6 +2209,11 @@
 		return;
 	}
 
+	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
+		adapter->optics = IFM_1000_SX;
+		return;
+	}
+
 	if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
 	    IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
 		adapter->optics = IFM_10G_LR;
@@ -2227,9 +2249,12 @@
 static int
 ixgbe_allocate_legacy(struct adapter *adapter)
 {
-	device_t dev = adapter->dev;
+	device_t	dev = adapter->dev;
 	struct		ix_queue *que = adapter->queues;
-	int error, rid = 0;
+#ifndef IXGBE_LEGACY_TX
+	struct tx_ring		*txr = adapter->tx_rings;
+#endif
+	int		error, rid = 0;
 
 	/* MSI RID at 1 */
 	if (adapter->msix == 1)
@@ -2248,6 +2273,9 @@
 	 * Try allocating a fast interrupt and the associated deferred
 	 * processing contexts.
 	 */
+#ifndef IXGBE_LEGACY_TX
+	TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
+#endif
 	TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
 	que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
             taskqueue_thread_enqueue, &que->tq);
@@ -2294,9 +2322,10 @@
 {
 	device_t        dev = adapter->dev;
 	struct 		ix_queue *que = adapter->queues;
+	struct  	tx_ring *txr = adapter->tx_rings;
 	int 		error, rid, vector = 0;
 
-	for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+	for (int i = 0; i < adapter->num_queues; i++, vector++, que++, txr++) {
 		rid = vector + 1;
 		que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
 		    RF_SHAREABLE | RF_ACTIVE);
@@ -2326,6 +2355,9 @@
 		if (adapter->num_queues > 1)
 			bus_bind_intr(dev, que->res, i);
 
+#ifndef IXGBE_LEGACY_TX
+		TASK_INIT(&txr->txq_task, 0, ixgbe_deferred_mq_start, txr);
+#endif
 		TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
 		que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
 		    taskqueue_thread_enqueue, &que->tq);
@@ -2384,15 +2416,18 @@
 		goto msi;
 
 	/* First try MSI/X */
+	msgs = pci_msix_count(dev); 
+	if (msgs == 0)
+		goto msi;
 	rid = PCIR_BAR(MSIX_82598_BAR);
 	adapter->msix_mem = bus_alloc_resource_any(dev,
 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
-       	if (!adapter->msix_mem) {
+       	if (adapter->msix_mem == NULL) {
 		rid += 4;	/* 82599 maps in higher BAR */
 		adapter->msix_mem = bus_alloc_resource_any(dev,
 		    SYS_RES_MEMORY, &rid, RF_ACTIVE);
 	}
-       	if (!adapter->msix_mem) {
+       	if (adapter->msix_mem == NULL) {
 		/* May not be enabled */
 		device_printf(adapter->dev,
 		    "Unable to map MSIX table \n");
@@ -2399,14 +2434,6 @@
 		goto msi;
 	}
 
-	msgs = pci_msix_count(dev); 
-	if (msgs == 0) { /* system has msix disabled */
-		bus_release_resource(dev, SYS_RES_MEMORY,
-		    rid, adapter->msix_mem);
-		adapter->msix_mem = NULL;
-		goto msi;
-	}
-
 	/* Figure out a reasonable auto config value */
 	queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
 
@@ -2428,21 +2455,33 @@
 		    "MSIX Configuration Problem, "
 		    "%d vectors but %d queues wanted!\n",
 		    msgs, want);
-		return (0); /* Will go to Legacy setup */
+		goto msi;
 	}
-	if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+	if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
                	device_printf(adapter->dev,
 		    "Using MSIX interrupts with %d vectors\n", msgs);
 		adapter->num_queues = queues;
 		return (msgs);
 	}
+	/*
+	** If MSIX alloc failed or provided us with
+	** less than needed, free and fall through to MSI
+	*/
+	pci_release_msi(dev);
+
 msi:
-       	msgs = pci_msi_count(dev);
-       	if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+       	if (adapter->msix_mem != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    rid, adapter->msix_mem);
+		adapter->msix_mem = NULL;
+	}
+       	msgs = 1;
+       	if (pci_alloc_msi(dev, &msgs) == 0) {
                	device_printf(adapter->dev,"Using an MSI interrupt\n");
-	else
-               	device_printf(adapter->dev,"Using a Legacy interrupt\n");
-	return (msgs);
+		return (msgs);
+	}
+	device_printf(adapter->dev,"Using a Legacy interrupt\n");
+	return (0);
 }
 
 
@@ -2564,17 +2603,24 @@
 		return (-1);
 	}
 	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+#if __FreeBSD_version < 1000025
 	ifp->if_baudrate = 1000000000;
+#else
+	if_initbaudrate(ifp, IF_Gbps(10));
+#endif
 	ifp->if_init = ixgbe_init;
 	ifp->if_softc = adapter;
 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
 	ifp->if_ioctl = ixgbe_ioctl;
-	ifp->if_start = ixgbe_start;
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 	ifp->if_transmit = ixgbe_mq_start;
 	ifp->if_qflush = ixgbe_qflush;
+#else
+	ifp->if_start = ixgbe_start;
+	IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 2);
+	ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 2;
+	IFQ_SET_READY(&ifp->if_snd);
 #endif
-	ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
 
 	ether_ifattach(ifp, adapter->hw.mac.addr);
 
@@ -2586,16 +2632,14 @@
 	 */
 	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
 
-	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+	ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWCSUM;
 	ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+	ifp->if_capabilities |= IFCAP_LRO;
 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
 			     |  IFCAP_VLAN_HWTSO
 			     |  IFCAP_VLAN_MTU;
 	ifp->if_capenable = ifp->if_capabilities;
 
-	/* Don't enable LRO by default */
-	ifp->if_capabilities |= IFCAP_LRO;
-
 	/*
 	** Don't turn this on by default, if vlans are
 	** created on another pseudo device (eg. lagg)
@@ -2644,7 +2688,7 @@
 			taskqueue_enqueue(adapter->tq, &adapter->mod_task);
 	} else {
 		if (hw->mac.ops.check_link)
-			err = ixgbe_check_link(hw, &autoneg,
+			err = ixgbe_check_link(hw, &adapter->link_speed,
 			    &adapter->link_up, FALSE);
 		if (err)
 			goto out;
@@ -2655,8 +2699,8 @@
 		if (err)
 			goto out;
 		if (hw->mac.ops.setup_link)
-                	err = hw->mac.ops.setup_link(hw, autoneg,
-			    negotiate, adapter->link_up);
+                	err = hw->mac.ops.setup_link(hw,
+			    autoneg, adapter->link_up);
 	}
 out:
 	return;
@@ -2795,6 +2839,7 @@
 		txr = &adapter->tx_rings[i];
 		txr->adapter = adapter;
 		txr->me = i;
+		txr->num_desc = adapter->num_tx_desc;
 
 		/* Initialize the TX side lock */
 		snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
@@ -2818,7 +2863,7 @@
 			error = ENOMEM;
 			goto err_tx_desc;
         	}
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 		/* Allocate a buf ring */
 		txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
 		    M_WAITOK, &txr->tx_mtx);
@@ -2841,6 +2886,7 @@
 		/* Set up some basics */
 		rxr->adapter = adapter;
 		rxr->me = i;
+		rxr->num_desc = adapter->num_rx_desc;
 
 		/* Initialize the RX side lock */
 		snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
@@ -2986,7 +3032,7 @@
 
 	/* Free any existing tx buffers. */
         txbuf = txr->tx_buffers;
-	for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+	for (i = 0; i < txr->num_desc; i++, txbuf++) {
 		if (txbuf->m_head != NULL) {
 			bus_dmamap_sync(txr->txtag, txbuf->map,
 			    BUS_DMASYNC_POSTWRITE);
@@ -3010,8 +3056,8 @@
 			netmap_load_map(txr->txtag, txbuf->map, NMB(slot + si));
 		}
 #endif /* DEV_NETMAP */
-		/* Clear the EOP index */
-		txbuf->eop_index = -1;
+		/* Clear the EOP descriptor pointer */
+		txbuf->eop = NULL;
         }
 
 #ifdef IXGBE_FDIR
@@ -3065,7 +3111,7 @@
 		       (tdba & 0x00000000ffffffffULL));
 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
-		    adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+		    adapter->num_tx_desc * sizeof(union ixgbe_adv_tx_desc));
 
 		/* Setup the HW Tx Head and Tail descriptor pointers */
 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
@@ -3075,6 +3121,9 @@
 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
 		txr->queue_status = IXGBE_QUEUE_IDLE;
 
+		/* Set the processing limit */
+		txr->process_limit = ixgbe_tx_process_limit;
+
 		/* Disable Head Writeback */
 		switch (hw->mac.type) {
 		case ixgbe_mac_82598EB:
@@ -3149,7 +3198,7 @@
 	struct ixgbe_tx_buf *tx_buffer;
 	int             i;
 
-	INIT_DEBUGOUT("free_transmit_ring: begin");
+	INIT_DEBUGOUT("ixgbe_free_transmit_ring: begin");
 
 	if (txr->tx_buffers == NULL)
 		return;
@@ -3176,7 +3225,7 @@
 			tx_buffer->map = NULL;
 		}
 	}
-#if __FreeBSD_version >= 800000
+#ifdef IXGBE_LEGACY_TX
 	if (txr->br != NULL)
 		buf_ring_free(txr->br, M_DEVBUF);
 #endif
@@ -3193,43 +3242,49 @@
 
 /*********************************************************************
  *
- *  Advanced Context Descriptor setup for VLAN or CSUM
+ *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
  *
  **********************************************************************/
 
-static bool
-ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+static int
+ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
+    u32 *cmd_type_len, u32 *olinfo_status)
 {
-	struct adapter *adapter = txr->adapter;
 	struct ixgbe_adv_tx_context_desc *TXD;
-	struct ixgbe_tx_buf        *tx_buffer;
-	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
 	struct ether_vlan_header *eh;
 	struct ip *ip;
 	struct ip6_hdr *ip6;
-	int  ehdrlen, ip_hlen = 0;
+	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+	int	ehdrlen, ip_hlen = 0;
 	u16	etype;
 	u8	ipproto = 0;
-	bool	offload = TRUE;
-	int ctxd = txr->next_avail_desc;
-	u16 vtag = 0;
+	int	offload = TRUE;
+	int	ctxd = txr->next_avail_desc;
+	u16	vtag = 0;
 
+	/* First check if TSO is to be used */
+	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
+		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
 
 	if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
 		offload = FALSE;
 
-	tx_buffer = &txr->tx_buffers[ctxd];
+	/* Indicate the whole packet as payload when not doing TSO */
+       	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+	/* Now ready a context descriptor */
 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 
 	/*
 	** In advanced descriptors the vlan tag must 
-	** be placed into the descriptor itself.
+	** be placed into the context descriptor. Hence
+	** we need to make one even if not doing offloads.
 	*/
 	if (mp->m_flags & M_VLANTAG) {
 		vtag = htole16(mp->m_pkthdr.ether_vtag);
 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
-	} else if (offload == FALSE)
-		return FALSE;
+	} else if (offload == FALSE) /* ... no offload to do */
+		return (0);
 
 	/*
 	 * Determine where frame payload starts.
@@ -3258,6 +3313,7 @@
 		case ETHERTYPE_IPV6:
 			ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
 			ip_hlen = sizeof(struct ip6_hdr);
+			/* XXX-BZ this will go badly in case of ext hdrs. */
 			ipproto = ip6->ip6_nxt;
 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
 			break;
@@ -3291,22 +3347,22 @@
 			break;
 	}
 
+	if (offload) /* For the TX descriptor setup */
+		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
 	/* Now copy bits into descriptor */
-	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
-	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
+	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
 	TXD->seqnum_seed = htole32(0);
 	TXD->mss_l4len_idx = htole32(0);
 
-	tx_buffer->m_head = NULL;
-	tx_buffer->eop_index = -1;
-
 	/* We've consumed the first desc, adjust counters */
-	if (++ctxd == adapter->num_tx_desc)
+	if (++ctxd == txr->num_desc)
 		ctxd = 0;
 	txr->next_avail_desc = ctxd;
 	--txr->tx_avail;
 
-        return (offload);
+        return (0);
 }
 
 /**********************************************************************
@@ -3315,18 +3371,22 @@
  *  adapters using advanced tx descriptors
  *
  **********************************************************************/
-static bool
-ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
+static int
+ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp,
+    u32 *cmd_type_len, u32 *olinfo_status)
 {
-	struct adapter *adapter = txr->adapter;
 	struct ixgbe_adv_tx_context_desc *TXD;
-	struct ixgbe_tx_buf        *tx_buffer;
 	u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-	u32 mss_l4len_idx = 0;
-	u16 vtag = 0;
-	int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
+	u32 mss_l4len_idx = 0, paylen;
+	u16 vtag = 0, eh_type;
+	int ctxd, ehdrlen, ip_hlen, tcp_hlen;
 	struct ether_vlan_header *eh;
+#ifdef INET6
+	struct ip6_hdr *ip6;
+#endif
+#ifdef INET
 	struct ip *ip;
+#endif
 	struct tcphdr *th;
 
 
@@ -3335,32 +3395,56 @@
 	 * Jump over vlan headers if already present
 	 */
 	eh = mtod(mp, struct ether_vlan_header *);
-	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
+	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
-	else
+		eh_type = eh->evl_proto;
+	} else {
 		ehdrlen = ETHER_HDR_LEN;
+		eh_type = eh->evl_encap_proto;
+	}
 
-        /* Ensure we have at least the IP+TCP header in the first mbuf. */
-        if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
-		return FALSE;
+	switch (ntohs(eh_type)) {
+#ifdef INET6
+	case ETHERTYPE_IPV6:
+		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+		/* XXX-BZ For now we do not pretend to support ext. hdrs. */
+		if (ip6->ip6_nxt != IPPROTO_TCP)
+			return (ENXIO);
+		ip_hlen = sizeof(struct ip6_hdr);
+		ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+		th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
+		th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
+		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+		break;
+#endif
+#ifdef INET
+	case ETHERTYPE_IP:
+		ip = (struct ip *)(mp->m_data + ehdrlen);
+		if (ip->ip_p != IPPROTO_TCP)
+			return (ENXIO);
+		ip->ip_sum = 0;
+		ip_hlen = ip->ip_hl << 2;
+		th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+		th->th_sum = in_pseudo(ip->ip_src.s_addr,
+		    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+		/* Tell transmit desc to also do IPv4 checksum. */
+		*olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+		break;
+#endif
+	default:
+		panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
+		    __func__, ntohs(eh_type));
+		break;
+	}
 
 	ctxd = txr->next_avail_desc;
-	tx_buffer = &txr->tx_buffers[ctxd];
 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
 
-	ip = (struct ip *)(mp->m_data + ehdrlen);
-	if (ip->ip_p != IPPROTO_TCP)
-		return FALSE;   /* 0 */
-	ip->ip_sum = 0;
-	ip_hlen = ip->ip_hl << 2;
-	th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
-	th->th_sum = in_pseudo(ip->ip_src.s_addr,
-	    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
 	tcp_hlen = th->th_off << 2;
-	hdrlen = ehdrlen + ip_hlen + tcp_hlen;
 
 	/* This is used in the transmit desc in encap */
-	*paylen = mp->m_pkthdr.len - hdrlen;
+	paylen = mp->m_pkthdr.len - ehdrlen - ip_hlen - tcp_hlen;
 
 	/* VLAN MACLEN IPLEN */
 	if (mp->m_flags & M_VLANTAG) {
@@ -3370,15 +3454,13 @@
 
 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
 	vlan_macip_lens |= ip_hlen;
-	TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
 
 	/* ADV DTYPE TUCMD */
 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
 	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
-	type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
-	TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
 
-
 	/* MSS L4LEN IDX */
 	mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
 	mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
@@ -3385,15 +3467,17 @@
 	TXD->mss_l4len_idx = htole32(mss_l4len_idx);
 
 	TXD->seqnum_seed = htole32(0);
-	tx_buffer->m_head = NULL;
-	tx_buffer->eop_index = -1;
 
-	if (++ctxd == adapter->num_tx_desc)
+	if (++ctxd == txr->num_desc)
 		ctxd = 0;
 
 	txr->tx_avail--;
 	txr->next_avail_desc = ctxd;
-	return TRUE;
+	*cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+	*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+	*olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+	++txr->tso_tx;
+	return (0);
 }
 
 #ifdef IXGBE_FDIR
@@ -3479,14 +3563,15 @@
  *  tx_buffer is put back on the free queue.
  *
  **********************************************************************/
-static bool
+static void
 ixgbe_txeof(struct tx_ring *txr)
 {
-	struct adapter	*adapter = txr->adapter;
-	struct ifnet	*ifp = adapter->ifp;
-	u32	first, last, done, processed;
-	struct ixgbe_tx_buf *tx_buffer;
-	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
+	struct adapter		*adapter = txr->adapter;
+	struct ifnet		*ifp = adapter->ifp;
+	u32			work, processed = 0;
+	u16			limit = txr->process_limit;
+	struct ixgbe_tx_buf	*buf;
+	union ixgbe_adv_tx_desc *txd;
 
 	mtx_assert(&txr->tx_mtx, MA_OWNED);
 
@@ -3494,9 +3579,7 @@
 	if (ifp->if_capenable & IFCAP_NETMAP) {
 		struct netmap_adapter *na = NA(ifp);
 		struct netmap_kring *kring = &na->tx_rings[txr->me];
-
-		tx_desc = (struct ixgbe_legacy_tx_desc *)txr->tx_base;
-
+		txd = txr->tx_base;
 		bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 		    BUS_DMASYNC_POSTREAD);
 		/*
@@ -3519,97 +3602,100 @@
 		 */
 		if (!netmap_mitigate ||
 		    (kring->nr_kflags < kring->nkr_num_slots &&
-		     tx_desc[kring->nr_kflags].upper.fields.status & IXGBE_TXD_STAT_DD)) {
-			kring->nr_kflags = kring->nkr_num_slots;
-			selwakeuppri(&na->tx_rings[txr->me].si, PI_NET);
-			IXGBE_TX_UNLOCK(txr);
-			IXGBE_CORE_LOCK(adapter);
-			selwakeuppri(&na->tx_si, PI_NET);
-			IXGBE_CORE_UNLOCK(adapter);
-			IXGBE_TX_LOCK(txr);
+		    txd[kring->nr_kflags].wb.status & IXGBE_TXD_STAT_DD)) {
+			netmap_tx_irq(ifp, txr->me |
+			    (NETMAP_LOCKED_ENTER|NETMAP_LOCKED_EXIT));
 		}
-		return FALSE;
+		return;
 	}
 #endif /* DEV_NETMAP */
 
-	if (txr->tx_avail == adapter->num_tx_desc) {
+	if (txr->tx_avail == txr->num_desc) {
 		txr->queue_status = IXGBE_QUEUE_IDLE;
-		return FALSE;
+		return;
 	}
 
-	processed = 0;
-	first = txr->next_to_clean;
-	tx_buffer = &txr->tx_buffers[first];
-	/* For cleanup we just use legacy struct */
-	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
-	last = tx_buffer->eop_index;
-	if (last == -1)
-		return FALSE;
-	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
-
-	/*
-	** Get the index of the first descriptor
-	** BEYOND the EOP and call that 'done'.
-	** I do this so the comparison in the
-	** inner while loop below can be simple
-	*/
-	if (++last == adapter->num_tx_desc) last = 0;
-	done = last;
-
+	/* Get work starting point */
+	work = txr->next_to_clean;
+	buf = &txr->tx_buffers[work];
+	txd = &txr->tx_base[work];
+	work -= txr->num_desc; /* The distance to ring end */
         bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
             BUS_DMASYNC_POSTREAD);
-	/*
-	** Only the EOP descriptor of a packet now has the DD
-	** bit set, this is what we look for...
-	*/
-	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
-		/* We clean the range of the packet */
-		while (first != done) {
-			tx_desc->upper.data = 0;
-			tx_desc->lower.data = 0;
-			tx_desc->buffer_addr = 0;
-			++txr->tx_avail;
-			++processed;
 
-			if (tx_buffer->m_head) {
+	do {
+		union ixgbe_adv_tx_desc *eop= buf->eop;
+		if (eop == NULL) /* No work */
+			break;
+
+		if ((eop->wb.status & IXGBE_TXD_STAT_DD) == 0)
+			break;	/* I/O not complete */
+
+		if (buf->m_head) {
+			txr->bytes +=
+			    buf->m_head->m_pkthdr.len;
+			bus_dmamap_sync(txr->txtag,
+			    buf->map,
+			    BUS_DMASYNC_POSTWRITE);
+			bus_dmamap_unload(txr->txtag,
+			    buf->map);
+			m_freem(buf->m_head);
+			buf->m_head = NULL;
+			buf->map = NULL;
+		}
+		buf->eop = NULL;
+		++txr->tx_avail;
+
+		/* We clean the range if multi segment */
+		while (txd != eop) {
+			++txd;
+			++buf;
+			++work;
+			/* wrap the ring? */
+			if (__predict_false(!work)) {
+				work -= txr->num_desc;
+				buf = txr->tx_buffers;
+				txd = txr->tx_base;
+			}
+			if (buf->m_head) {
 				txr->bytes +=
-				    tx_buffer->m_head->m_pkthdr.len;
+				    buf->m_head->m_pkthdr.len;
 				bus_dmamap_sync(txr->txtag,
-				    tx_buffer->map,
+				    buf->map,
 				    BUS_DMASYNC_POSTWRITE);
 				bus_dmamap_unload(txr->txtag,
-				    tx_buffer->map);
-				m_freem(tx_buffer->m_head);
-				tx_buffer->m_head = NULL;
-				tx_buffer->map = NULL;
+				    buf->map);
+				m_freem(buf->m_head);
+				buf->m_head = NULL;
+				buf->map = NULL;
 			}
-			tx_buffer->eop_index = -1;
-			txr->watchdog_time = ticks;
+			++txr->tx_avail;
+			buf->eop = NULL;
 
-			if (++first == adapter->num_tx_desc)
-				first = 0;
-
-			tx_buffer = &txr->tx_buffers[first];
-			tx_desc =
-			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
 		}
 		++txr->packets;
+		++processed;
 		++ifp->if_opackets;
-		/* See if there is more work now */
-		last = tx_buffer->eop_index;
-		if (last != -1) {
-			eop_desc =
-			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
-			/* Get next done point */
-			if (++last == adapter->num_tx_desc) last = 0;
-			done = last;
-		} else
-			break;
-	}
+		txr->watchdog_time = ticks;
+
+		/* Try the next packet */
+		++txd;
+		++buf;
+		++work;
+		/* reset with a wrap */
+		if (__predict_false(!work)) {
+			work -= txr->num_desc;
+			buf = txr->tx_buffers;
+			txd = txr->tx_base;
+		}
+		prefetch(txd);
+	} while (__predict_true(--limit));
+
 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 
-	txr->next_to_clean = first;
+	work += txr->num_desc;
+	txr->next_to_clean = work;
 
 	/*
 	** Watchdog calculation, we know there's
@@ -3620,16 +3706,10 @@
 	if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
 		txr->queue_status = IXGBE_QUEUE_HUNG;
 
-	/* With a minimum free clear the depleted state bit.  */
-	if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD)
-		txr->queue_status &= ~IXGBE_QUEUE_DEPLETED;
-
-	if (txr->tx_avail == adapter->num_tx_desc) {
+	if (txr->tx_avail == txr->num_desc)
 		txr->queue_status = IXGBE_QUEUE_IDLE;
-		return (FALSE);
-	}
 
-	return TRUE;
+	return;
 }
 
 /*********************************************************************
@@ -3645,80 +3725,60 @@
 ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
 {
 	struct adapter		*adapter = rxr->adapter;
-	bus_dma_segment_t	hseg[1];
-	bus_dma_segment_t	pseg[1];
+	bus_dma_segment_t	seg[1];
 	struct ixgbe_rx_buf	*rxbuf;
-	struct mbuf		*mh, *mp;
+	struct mbuf		*mp;
 	int			i, j, nsegs, error;
 	bool			refreshed = FALSE;
 
 	i = j = rxr->next_to_refresh;
 	/* Control the loop with one beyond */
-	if (++j == adapter->num_rx_desc)
+	if (++j == rxr->num_desc)
 		j = 0;
 
 	while (j != limit) {
 		rxbuf = &rxr->rx_buffers[i];
-		if (rxr->hdr_split == FALSE)
-			goto no_split;
-
-		if (rxbuf->m_head == NULL) {
-			mh = m_gethdr(M_DONTWAIT, MT_DATA);
-			if (mh == NULL)
+		if (rxbuf->buf == NULL) {
+			mp = m_getjcl(M_NOWAIT, MT_DATA,
+			    M_PKTHDR, rxr->mbuf_sz);
+			if (mp == NULL)
 				goto update;
+			if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN))
+				m_adj(mp, ETHER_ALIGN);
 		} else
-			mh = rxbuf->m_head;
+			mp = rxbuf->buf;
 
-		mh->m_pkthdr.len = mh->m_len = MHLEN;
-		mh->m_len = MHLEN;
-		mh->m_flags |= M_PKTHDR;
-		/* Get the memory mapping */
-		error = bus_dmamap_load_mbuf_sg(rxr->htag,
-		    rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
-		if (error != 0) {
-			printf("Refresh mbufs: hdr dmamap load"
-			    " failure - %d\n", error);
-			m_free(mh);
-			rxbuf->m_head = NULL;
-			goto update;
-		}
-		rxbuf->m_head = mh;
-		bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-		    BUS_DMASYNC_PREREAD);
-		rxr->rx_base[i].read.hdr_addr =
-		    htole64(hseg[0].ds_addr);
+		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
 
-no_split:
-		if (rxbuf->m_pack == NULL) {
-			mp = m_getjcl(M_DONTWAIT, MT_DATA,
-			    M_PKTHDR, adapter->rx_mbuf_sz);
-			if (mp == NULL)
+		/* If we're dealing with an mbuf that was copied rather
+		 * than replaced, there's no need to go through busdma.
+		 */
+		if ((rxbuf->flags & IXGBE_RX_COPY) == 0) {
+			/* Get the memory mapping */
+			error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+			    rxbuf->pmap, mp, seg, &nsegs, BUS_DMA_NOWAIT);
+			if (error != 0) {
+				printf("Refresh mbufs: payload dmamap load"
+				    " failure - %d\n", error);
+				m_free(mp);
+				rxbuf->buf = NULL;
 				goto update;
-		} else
-			mp = rxbuf->m_pack;
-
-		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
-		/* Get the memory mapping */
-		error = bus_dmamap_load_mbuf_sg(rxr->ptag,
-		    rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
-		if (error != 0) {
-			printf("Refresh mbufs: payload dmamap load"
-			    " failure - %d\n", error);
-			m_free(mp);
-			rxbuf->m_pack = NULL;
-			goto update;
+			}
+			rxbuf->buf = mp;
+			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+			    BUS_DMASYNC_PREREAD);
+			rxbuf->addr = rxr->rx_base[i].read.pkt_addr =
+			    htole64(seg[0].ds_addr);
+		} else {
+			rxr->rx_base[i].read.pkt_addr = rxbuf->addr;
+			rxbuf->flags &= ~IXGBE_RX_COPY;
 		}
-		rxbuf->m_pack = mp;
-		bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
-		    BUS_DMASYNC_PREREAD);
-		rxr->rx_base[i].read.pkt_addr =
-		    htole64(pseg[0].ds_addr);
 
 		refreshed = TRUE;
 		/* Next is precalculated */
 		i = j;
 		rxr->next_to_refresh = i;
-		if (++j == adapter->num_rx_desc)
+		if (++j == rxr->num_desc)
 			j = 0;
 	}
 update:
@@ -3744,7 +3804,7 @@
 	struct ixgbe_rx_buf 	*rxbuf;
 	int             	i, bsize, error;
 
-	bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
+	bsize = sizeof(struct ixgbe_rx_buf) * rxr->num_desc;
 	if (!(rxr->rx_buffers =
 	    (struct ixgbe_rx_buf *) malloc(bsize,
 	    M_DEVBUF, M_NOWAIT | M_ZERO))) {
@@ -3758,22 +3818,6 @@
 				   BUS_SPACE_MAXADDR,	/* lowaddr */
 				   BUS_SPACE_MAXADDR,	/* highaddr */
 				   NULL, NULL,		/* filter, filterarg */
-				   MSIZE,		/* maxsize */
-				   1,			/* nsegments */
-				   MSIZE,		/* maxsegsize */
-				   0,			/* flags */
-				   NULL,		/* lockfunc */
-				   NULL,		/* lockfuncarg */
-				   &rxr->htag))) {
-		device_printf(dev, "Unable to create RX DMA tag\n");
-		goto fail;
-	}
-
-	if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
-				   1, 0,	/* alignment, bounds */
-				   BUS_SPACE_MAXADDR,	/* lowaddr */
-				   BUS_SPACE_MAXADDR,	/* highaddr */
-				   NULL, NULL,		/* filter, filterarg */
 				   MJUM16BYTES,		/* maxsize */
 				   1,			/* nsegments */
 				   MJUM16BYTES,		/* maxsegsize */
@@ -3785,18 +3829,12 @@
 		goto fail;
 	}
 
-	for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+	for (i = 0; i < rxr->num_desc; i++, rxbuf++) {
 		rxbuf = &rxr->rx_buffers[i];
-		error = bus_dmamap_create(rxr->htag,
-		    BUS_DMA_NOWAIT, &rxbuf->hmap);
-		if (error) {
-			device_printf(dev, "Unable to create RX head map\n");
-			goto fail;
-		}
 		error = bus_dmamap_create(rxr->ptag,
 		    BUS_DMA_NOWAIT, &rxbuf->pmap);
 		if (error) {
-			device_printf(dev, "Unable to create RX pkt map\n");
+			device_printf(dev, "Unable to create RX dma map\n");
 			goto fail;
 		}
 	}
@@ -3826,6 +3864,10 @@
  *  for an RX ring, this is toggled by the LRO capability
  *  even though it is transparent to the stack.
  *
+ *  NOTE: since this HW feature only works with IPV4 and 
+ *        our testing has shown soft LRO to be as effective
+ *        I have decided to disable this by default.
+ *
  **********************************************************************/
 static void
 ixgbe_setup_hw_rsc(struct rx_ring *rxr)
@@ -3834,6 +3876,13 @@
 	struct	ixgbe_hw	*hw = &adapter->hw;
 	u32			rscctrl, rdrxctl;
 
+	/* If turning LRO/RSC off we need to disable it */
+	if ((adapter->ifp->if_capenable & IFCAP_LRO) == 0) {
+		rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+		rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+		return;
+	}
+
 	rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
 	rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
 #ifdef DEV_NETMAP /* crcstrip is optional in netmap */
@@ -3849,11 +3898,11 @@
 	** Limit the total number of descriptors that
 	** can be combined, so it does not exceed 64K
 	*/
-	if (adapter->rx_mbuf_sz == MCLBYTES)
+	if (rxr->mbuf_sz == MCLBYTES)
 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-	else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
+	else if (rxr->mbuf_sz == MJUMPAGESIZE)
 		rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-	else if (adapter->rx_mbuf_sz == MJUM9BYTES)
+	else if (rxr->mbuf_sz == MJUM9BYTES)
 		rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
 	else  /* Using 16K cluster */
 		rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
@@ -3876,29 +3925,20 @@
 static void     
 ixgbe_free_receive_ring(struct rx_ring *rxr)
 { 
-	struct  adapter         *adapter;
 	struct ixgbe_rx_buf       *rxbuf;
 	int i;
 
-	adapter = rxr->adapter;
-	for (i = 0; i < adapter->num_rx_desc; i++) {
+	for (i = 0; i < rxr->num_desc; i++) {
 		rxbuf = &rxr->rx_buffers[i];
-		if (rxbuf->m_head != NULL) {
-			bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-			    BUS_DMASYNC_POSTREAD);
-			bus_dmamap_unload(rxr->htag, rxbuf->hmap);
-			rxbuf->m_head->m_flags |= M_PKTHDR;
-			m_freem(rxbuf->m_head);
-		}
-		if (rxbuf->m_pack != NULL) {
+		if (rxbuf->buf != NULL) {
 			bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 			    BUS_DMASYNC_POSTREAD);
 			bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
-			rxbuf->m_pack->m_flags |= M_PKTHDR;
-			m_freem(rxbuf->m_pack);
+			rxbuf->buf->m_flags |= M_PKTHDR;
+			m_freem(rxbuf->buf);
+			rxbuf->buf = NULL;
+			rxbuf->flags = 0;
 		}
-		rxbuf->m_head = NULL;
-		rxbuf->m_pack = NULL;
 	}
 }
 
@@ -3915,7 +3955,7 @@
 	struct ifnet		*ifp;
 	device_t		dev;
 	struct ixgbe_rx_buf	*rxbuf;
-	bus_dma_segment_t	pseg[1], hseg[1];
+	bus_dma_segment_t	seg[1];
 	struct lro_ctrl		*lro = &rxr->lro;
 	int			rsize, nsegs, error = 0;
 #ifdef DEV_NETMAP
@@ -3936,17 +3976,15 @@
 	rsize = roundup2(adapter->num_rx_desc *
 	    sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
 	bzero((void *)rxr->rx_base, rsize);
+	/* Cache the size */
+	rxr->mbuf_sz = adapter->rx_mbuf_sz;
 
 	/* Free current RX buffer structs and their mbufs */
 	ixgbe_free_receive_ring(rxr);
 
-	/* Configure header split? */
-	if (ixgbe_header_split)
-		rxr->hdr_split = TRUE;
-
 	/* Now replenish the mbufs */
-	for (int j = 0; j != adapter->num_rx_desc; ++j) {
-		struct mbuf	*mh, *mp;
+	for (int j = 0; j != rxr->num_desc; ++j) {
+		struct mbuf	*mp;
 
 		rxbuf = &rxr->rx_buffers[j];
 #ifdef DEV_NETMAP
@@ -3964,59 +4002,32 @@
 
 			addr = PNMB(slot + sj, &paddr);
 			netmap_load_map(rxr->ptag, rxbuf->pmap, addr);
-			/* Update descriptor */
+			/* Update descriptor and the cached value */
 			rxr->rx_base[j].read.pkt_addr = htole64(paddr);
+			rxbuf->addr = htole64(paddr);
 			continue;
 		}
 #endif /* DEV_NETMAP */
-		/*
-		** Don't allocate mbufs if not
-		** doing header split, its wasteful
-		*/ 
-		if (rxr->hdr_split == FALSE)
-			goto skip_head;
-
-		/* First the header */
-		rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
-		if (rxbuf->m_head == NULL) {
-			error = ENOBUFS;
-			goto fail;
-		}
-		m_adj(rxbuf->m_head, ETHER_ALIGN);
-		mh = rxbuf->m_head;
-		mh->m_len = mh->m_pkthdr.len = MHLEN;
-		mh->m_flags |= M_PKTHDR;
-		/* Get the memory mapping */
-		error = bus_dmamap_load_mbuf_sg(rxr->htag,
-		    rxbuf->hmap, rxbuf->m_head, hseg,
-		    &nsegs, BUS_DMA_NOWAIT);
-		if (error != 0) /* Nothing elegant to do here */
-			goto fail;
-		bus_dmamap_sync(rxr->htag,
-		    rxbuf->hmap, BUS_DMASYNC_PREREAD);
-		/* Update descriptor */
-		rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
-
-skip_head:
-		/* Now the payload cluster */
-		rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+		rxbuf->flags = 0; 
+		rxbuf->buf = m_getjcl(M_NOWAIT, MT_DATA,
 		    M_PKTHDR, adapter->rx_mbuf_sz);
-		if (rxbuf->m_pack == NULL) {
+		if (rxbuf->buf == NULL) {
 			error = ENOBUFS;
                         goto fail;
 		}
-		mp = rxbuf->m_pack;
-		mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+		mp = rxbuf->buf;
+		mp->m_pkthdr.len = mp->m_len = rxr->mbuf_sz;
 		/* Get the memory mapping */
 		error = bus_dmamap_load_mbuf_sg(rxr->ptag,
-		    rxbuf->pmap, mp, pseg,
+		    rxbuf->pmap, mp, seg,
 		    &nsegs, BUS_DMA_NOWAIT);
 		if (error != 0)
                         goto fail;
 		bus_dmamap_sync(rxr->ptag,
 		    rxbuf->pmap, BUS_DMASYNC_PREREAD);
-		/* Update descriptor */
-		rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+		/* Update the descriptor and the cached value */
+		rxr->rx_base[j].read.pkt_addr = htole64(seg[0].ds_addr);
+		rxbuf->addr = htole64(seg[0].ds_addr);
 	}
 
 
@@ -4024,7 +4035,7 @@
 	rxr->next_to_check = 0;
 	rxr->next_to_refresh = 0;
 	rxr->lro_enabled = FALSE;
-	rxr->rx_split_packets = 0;
+	rxr->rx_copies = 0;
 	rxr->rx_bytes = 0;
 	rxr->discard = FALSE;
 	rxr->vtag_strip = FALSE;
@@ -4034,12 +4045,8 @@
 
 	/*
 	** Now set up the LRO interface:
-	** 82598 uses software LRO, the
-	** 82599 and X540 use a hardware assist.
 	*/
-	if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
-	    (ifp->if_capenable & IFCAP_RXCSUM) &&
-	    (ifp->if_capenable & IFCAP_LRO))
+	if (ixgbe_rsc_enable)
 		ixgbe_setup_hw_rsc(rxr);
 	else if (ifp->if_capenable & IFCAP_LRO) {
 		int err = tcp_lro_init(lro);
@@ -4158,19 +4165,15 @@
 		srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 		srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 		srrctl |= bufsz;
-		if (rxr->hdr_split) {
-			/* Use a standard mbuf for the header */
-			srrctl |= ((IXGBE_RX_HDR <<
-			    IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
-			    & IXGBE_SRRCTL_BSIZEHDR_MASK);
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-		} else
-			srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
 
 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+
+		/* Set the processing limit */
+		rxr->process_limit = ixgbe_rx_process_limit;
 	}
 
 	if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
@@ -4241,6 +4244,8 @@
 {
 	struct rx_ring *rxr = adapter->rx_rings;
 
+	INIT_DEBUGOUT("ixgbe_free_receive_structures: begin");
+
 	for (int i = 0; i < adapter->num_queues; i++, rxr++) {
 		struct lro_ctrl		*lro = &rxr->lro;
 		ixgbe_free_receive_buffers(rxr);
@@ -4265,32 +4270,20 @@
 	struct adapter		*adapter = rxr->adapter;
 	struct ixgbe_rx_buf	*rxbuf;
 
-	INIT_DEBUGOUT("free_receive_structures: begin");
+	INIT_DEBUGOUT("ixgbe_free_receive_buffers: begin");
 
 	/* Cleanup any existing buffers */
 	if (rxr->rx_buffers != NULL) {
 		for (int i = 0; i < adapter->num_rx_desc; i++) {
 			rxbuf = &rxr->rx_buffers[i];
-			if (rxbuf->m_head != NULL) {
-				bus_dmamap_sync(rxr->htag, rxbuf->hmap,
-				    BUS_DMASYNC_POSTREAD);
-				bus_dmamap_unload(rxr->htag, rxbuf->hmap);
-				rxbuf->m_head->m_flags |= M_PKTHDR;
-				m_freem(rxbuf->m_head);
-			}
-			if (rxbuf->m_pack != NULL) {
+			if (rxbuf->buf != NULL) {
 				bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
 				    BUS_DMASYNC_POSTREAD);
 				bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
-				rxbuf->m_pack->m_flags |= M_PKTHDR;
-				m_freem(rxbuf->m_pack);
+				rxbuf->buf->m_flags |= M_PKTHDR;
+				m_freem(rxbuf->buf);
 			}
-			rxbuf->m_head = NULL;
-			rxbuf->m_pack = NULL;
-			if (rxbuf->hmap != NULL) {
-				bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
-				rxbuf->hmap = NULL;
-			}
+			rxbuf->buf = NULL;
 			if (rxbuf->pmap != NULL) {
 				bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
 				rxbuf->pmap = NULL;
@@ -4302,10 +4295,6 @@
 		}
 	}
 
-	if (rxr->htag != NULL) {
-		bus_dma_tag_destroy(rxr->htag);
-		rxr->htag = NULL;
-	}
 	if (rxr->ptag != NULL) {
 		bus_dma_tag_destroy(rxr->ptag);
 		rxr->ptag = NULL;
@@ -4319,15 +4308,17 @@
 {
                  
         /*
-         * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+         * ATM LRO is only for IP/TCP packets and TCP checksum of the packet
          * should be computed by hardware. Also it should not have VLAN tag in
-         * ethernet header.
+         * ethernet header.  In case of IPv6 we do not yet support ext. hdrs.
          */
         if (rxr->lro_enabled &&
             (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
             (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
-            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
-            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
+            ((ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) ||
+            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV6 | IXGBE_RXDADV_PKTTYPE_TCP)) &&
             (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
             (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
                 /*
@@ -4365,16 +4356,13 @@
 	** the normal refresh path to get new buffers
 	** and mapping.
 	*/
-	if (rbuf->m_head) {
-		m_free(rbuf->m_head);
-		rbuf->m_head = NULL;
+	if (rbuf->buf) {
+		m_free(rbuf->buf);
+		rbuf->buf = NULL;
 	}
+
+	rbuf->flags = 0;
  
-	if (rbuf->m_pack) {
-		m_free(rbuf->m_pack);
-		rbuf->m_pack = NULL;
-	}
-
 	return;
 }
 
@@ -4391,7 +4379,7 @@
  *  Return TRUE for more work, FALSE for all clean.
  *********************************************************************/
 static bool
-ixgbe_rxeof(struct ix_queue *que, int count)
+ixgbe_rxeof(struct ix_queue *que)
 {
 	struct adapter		*adapter = que->adapter;
 	struct rx_ring		*rxr = que->rxr;
@@ -4400,6 +4388,7 @@
 	struct lro_entry	*queued;
 	int			i, nextp, processed = 0;
 	u32			staterr = 0;
+	u16			count = rxr->process_limit;
 	union ixgbe_adv_rx_desc	*cur;
 	struct ixgbe_rx_buf	*rbuf, *nbuf;
 
@@ -4406,28 +4395,15 @@
 	IXGBE_RX_LOCK(rxr);
 
 #ifdef DEV_NETMAP
-	if (ifp->if_capenable & IFCAP_NETMAP) {
-		/*
-		 * Same as the txeof routine: only wakeup clients on intr.
-		 * NKR_PENDINTR in nr_kflags is used to implement interrupt
-		 * mitigation (ixgbe_rxsync() will not look for new packets
-		 * unless NKR_PENDINTR is set).
-		 */
-		struct netmap_adapter *na = NA(ifp);
-
-		na->rx_rings[rxr->me].nr_kflags |= NKR_PENDINTR;
-		selwakeuppri(&na->rx_rings[rxr->me].si, PI_NET);
-		IXGBE_RX_UNLOCK(rxr);
-		IXGBE_CORE_LOCK(adapter);
-		selwakeuppri(&na->rx_si, PI_NET);
-		IXGBE_CORE_UNLOCK(adapter);
+	/* Same as the txeof routine: wakeup clients on intr. */
+	if (netmap_rx_irq(ifp, rxr->me | NETMAP_LOCKED_ENTER, &processed))
 		return (FALSE);
-	}
 #endif /* DEV_NETMAP */
+
 	for (i = rxr->next_to_check; count != 0;) {
-		struct mbuf	*sendmp, *mh, *mp;
+		struct mbuf	*sendmp, *mp;
 		u32		rsc, ptype;
-		u16		hlen, plen, hdr;
+		u16		len;
 		u16		vtag = 0;
 		bool		eop;
  
@@ -4449,23 +4425,16 @@
 		rsc = 0;
 		cur->wb.upper.status_error = 0;
 		rbuf = &rxr->rx_buffers[i];
-		mh = rbuf->m_head;
-		mp = rbuf->m_pack;
+		mp = rbuf->buf;
 
-		plen = le16toh(cur->wb.upper.length);
+		len = le16toh(cur->wb.upper.length);
 		ptype = le32toh(cur->wb.lower.lo_dword.data) &
 		    IXGBE_RXDADV_PKTTYPE_MASK;
-		hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
 
-		/* Process vlan info */
-		if ((rxr->vtag_strip) && (staterr & IXGBE_RXD_STAT_VP))
-			vtag = le16toh(cur->wb.upper.vlan);
-
 		/* Make sure bad packets are discarded */
 		if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
 		    (rxr->discard)) {
-			ifp->if_ierrors++;
 			rxr->rx_discarded++;
 			if (eop)
 				rxr->discard = FALSE;
@@ -4509,102 +4478,57 @@
 			prefetch(nbuf);
 		}
 		/*
-		** The header mbuf is ONLY used when header 
-		** split is enabled, otherwise we get normal 
-		** behavior, ie, both header and payload
-		** are DMA'd into the payload buffer.
-		**
 		** Rather than using the fmp/lmp global pointers
 		** we now keep the head of a packet chain in the
 		** buffer struct and pass this along from one
 		** descriptor to the next, until we get EOP.
 		*/
-		if (rxr->hdr_split && (rbuf->fmp == NULL)) {
-			/* This must be an initial descriptor */
-			hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-			    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-			if (hlen > IXGBE_RX_HDR)
-				hlen = IXGBE_RX_HDR;
-			mh->m_len = hlen;
-			mh->m_flags |= M_PKTHDR;
-			mh->m_next = NULL;
-			mh->m_pkthdr.len = mh->m_len;
-			/* Null buf pointer so it is refreshed */
-			rbuf->m_head = NULL;
-			/*
-			** Check the payload length, this
-			** could be zero if its a small
-			** packet.
-			*/
-			if (plen > 0) {
-				mp->m_len = plen;
-				mp->m_next = NULL;
-				mp->m_flags &= ~M_PKTHDR;
-				mh->m_next = mp;
-				mh->m_pkthdr.len += mp->m_len;
-				/* Null buf pointer so it is refreshed */
-				rbuf->m_pack = NULL;
-				rxr->rx_split_packets++;
-			}
-			/*
-			** Now create the forward
-			** chain so when complete 
-			** we wont have to.
-			*/
-                        if (eop == 0) {
-				/* stash the chain head */
-                                nbuf->fmp = mh;
-				/* Make forward chain */
-                                if (plen)
-                                        mp->m_next = nbuf->m_pack;
-                                else
-                                        mh->m_next = nbuf->m_pack;
-                        } else {
-				/* Singlet, prepare to send */
-                                sendmp = mh;
-				/* If hardware handled vtag */
-                                if (vtag) {
-                                        sendmp->m_pkthdr.ether_vtag = vtag;
-                                        sendmp->m_flags |= M_VLANTAG;
-                                }
-                        }
+		mp->m_len = len;
+		/*
+		** See if there is a stored head
+		** that determines what we are
+		*/
+		sendmp = rbuf->fmp;
+		if (sendmp != NULL) {  /* secondary frag */
+			rbuf->buf = rbuf->fmp = NULL;
+			mp->m_flags &= ~M_PKTHDR;
+			sendmp->m_pkthdr.len += mp->m_len;
 		} else {
 			/*
-			** Either no header split, or a
-			** secondary piece of a fragmented
-			** split packet.
-			*/
-			mp->m_len = plen;
-			/*
-			** See if there is a stored head
-			** that determines what we are
-			*/
-			sendmp = rbuf->fmp;
-			rbuf->m_pack = rbuf->fmp = NULL;
-
-			if (sendmp != NULL) {  /* secondary frag */
-				mp->m_flags &= ~M_PKTHDR;
-				sendmp->m_pkthdr.len += mp->m_len;
-			} else {
-				/* first desc of a non-ps chain */
-				sendmp = mp;
-				sendmp->m_flags |= M_PKTHDR;
-				sendmp->m_pkthdr.len = mp->m_len;
-				if (staterr & IXGBE_RXD_STAT_VP) {
-					sendmp->m_pkthdr.ether_vtag = vtag;
-					sendmp->m_flags |= M_VLANTAG;
+			 * Optimize.  This might be a small packet,
+			 * maybe just a TCP ACK.  Do a fast copy that
+			 * is cache aligned into a new mbuf, and
+			 * leave the old mbuf+cluster for re-use.
+			 */
+			if (eop && len <= IXGBE_RX_COPY_LEN) {
+				sendmp = m_gethdr(M_NOWAIT, MT_DATA);
+				if (sendmp != NULL) {
+					sendmp->m_data +=
+					    IXGBE_RX_COPY_ALIGN;
+					ixgbe_bcopy(mp->m_data,
+					    sendmp->m_data, len);
+					sendmp->m_len = len;
+					rxr->rx_copies++;
+					rbuf->flags |= IXGBE_RX_COPY;
 				}
-                        }
-			/* Pass the head pointer on */
-			if (eop == 0) {
-				nbuf->fmp = sendmp;
-				sendmp = NULL;
-				mp->m_next = nbuf->m_pack;
 			}
+			if (sendmp == NULL) {
+				rbuf->buf = rbuf->fmp = NULL;
+				sendmp = mp;
+			}
+
+			/* first desc of a non-ps chain */
+			sendmp->m_flags |= M_PKTHDR;
+			sendmp->m_pkthdr.len = mp->m_len;
 		}
 		++processed;
-		/* Sending this frame? */
-		if (eop) {
+
+		/* Pass the head pointer on */
+		if (eop == 0) {
+			nbuf->fmp = sendmp;
+			sendmp = NULL;
+			mp->m_next = nbuf->buf;
+		} else { /* Sending this frame */
 			sendmp->m_pkthdr.rcvif = ifp;
 			ifp->if_ipackets++;
 			rxr->rx_packets++;
@@ -4611,6 +4535,14 @@
 			/* capture data for AIM */
 			rxr->bytes += sendmp->m_pkthdr.len;
 			rxr->rx_bytes += sendmp->m_pkthdr.len;
+			/* Process vlan info */
+			if ((rxr->vtag_strip) &&
+			    (staterr & IXGBE_RXD_STAT_VP))
+				vtag = le16toh(cur->wb.upper.vlan);
+			if (vtag) {
+				sendmp->m_pkthdr.ether_vtag = vtag;
+				sendmp->m_flags |= M_VLANTAG;
+			}
 			if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
 				ixgbe_rx_checksum(staterr, sendmp, ptype);
 #if __FreeBSD_version >= 800000
@@ -4623,7 +4555,7 @@
 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
 
 		/* Advance our pointers to the next descriptor. */
-		if (++i == adapter->num_rx_desc)
+		if (++i == rxr->num_desc)
 			i = 0;
 
 		/* Now send to the stack or do LRO */
@@ -4657,15 +4589,12 @@
 	IXGBE_RX_UNLOCK(rxr);
 
 	/*
-	** We still have cleaning to do?
-	** Schedule another interrupt if so.
+	** Still have cleaning to do?
 	*/
-	if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
-		ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
+	if ((staterr & IXGBE_RXD_STAT_DD) != 0)
 		return (TRUE);
-	}
-
-	return (FALSE);
+	else
+		return (FALSE);
 }
 
 
@@ -4820,22 +4749,37 @@
 static void
 ixgbe_enable_intr(struct adapter *adapter)
 {
-	struct ixgbe_hw *hw = &adapter->hw;
-	struct ix_queue *que = adapter->queues;
-	u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+	struct ixgbe_hw	*hw = &adapter->hw;
+	struct ix_queue	*que = adapter->queues;
+	u32		mask, fwsm;
 
-
+	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
 	/* Enable Fan Failure detection */
 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
 		    mask |= IXGBE_EIMS_GPI_SDP1;
-	else {
-		    mask |= IXGBE_EIMS_ECC;
-		    mask |= IXGBE_EIMS_GPI_SDP0;
-		    mask |= IXGBE_EIMS_GPI_SDP1;
-		    mask |= IXGBE_EIMS_GPI_SDP2;
+
+	switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82599EB:
+			mask |= IXGBE_EIMS_ECC;
+			mask |= IXGBE_EIMS_GPI_SDP0;
+			mask |= IXGBE_EIMS_GPI_SDP1;
+			mask |= IXGBE_EIMS_GPI_SDP2;
 #ifdef IXGBE_FDIR
-		    mask |= IXGBE_EIMS_FLOW_DIR;
+			mask |= IXGBE_EIMS_FLOW_DIR;
 #endif
+			break;
+		case ixgbe_mac_X540:
+			mask |= IXGBE_EIMS_ECC;
+			/* Detect if Thermal Sensor is enabled */
+			fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+			if (fwsm & IXGBE_FWSM_TS_ENABLED)
+				mask |= IXGBE_EIMS_TS;
+#ifdef IXGBE_FDIR
+			mask |= IXGBE_EIMS_FLOW_DIR;
+#endif
+		/* falls through */
+		default:
+			break;
 	}
 
 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
@@ -4899,6 +4843,111 @@
 }
 
 /*
+** Get the width and transaction speed of
+** the slot this adapter is plugged into.
+*/
+static void
+ixgbe_get_slot_info(struct ixgbe_hw *hw)
+{
+	device_t		dev = ((struct ixgbe_osdep *)hw->back)->dev;
+	struct ixgbe_mac_info	*mac = &hw->mac;
+	u16			link;
+	u32			offset;
+
+	/* For most devices simply call the shared code routine */
+	if (hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) {
+		ixgbe_get_bus_info(hw);
+		goto display;
+	}
+
+	/*
+	** For the Quad port adapter we need to parse back
+	** up the PCI tree to find the speed of the expansion
+	** slot into which this adapter is plugged. A bit more work.
+	*/
+	dev = device_get_parent(device_get_parent(dev));
+#ifdef IXGBE_DEBUG
+	device_printf(dev, "parent pcib = %x,%x,%x\n",
+	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+#endif
+	dev = device_get_parent(device_get_parent(dev));
+#ifdef IXGBE_DEBUG
+	device_printf(dev, "slot pcib = %x,%x,%x\n",
+	    pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
+#endif
+	/* Now get the PCI Express Capabilities offset */
+	pci_find_cap(dev, PCIY_EXPRESS, &offset);
+	/* ...and read the Link Status Register */
+	link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+	switch (link & IXGBE_PCI_LINK_WIDTH) {
+	case IXGBE_PCI_LINK_WIDTH_1:
+		hw->bus.width = ixgbe_bus_width_pcie_x1;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_2:
+		hw->bus.width = ixgbe_bus_width_pcie_x2;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_4:
+		hw->bus.width = ixgbe_bus_width_pcie_x4;
+		break;
+	case IXGBE_PCI_LINK_WIDTH_8:
+		hw->bus.width = ixgbe_bus_width_pcie_x8;
+		break;
+	default:
+		hw->bus.width = ixgbe_bus_width_unknown;
+		break;
+	}
+
+	switch (link & IXGBE_PCI_LINK_SPEED) {
+	case IXGBE_PCI_LINK_SPEED_2500:
+		hw->bus.speed = ixgbe_bus_speed_2500;
+		break;
+	case IXGBE_PCI_LINK_SPEED_5000:
+		hw->bus.speed = ixgbe_bus_speed_5000;
+		break;
+	case IXGBE_PCI_LINK_SPEED_8000:
+		hw->bus.speed = ixgbe_bus_speed_8000;
+		break;
+	default:
+		hw->bus.speed = ixgbe_bus_speed_unknown;
+		break;
+	}
+
+	mac->ops.set_lan_id(hw);
+
+display:
+	device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+	    ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s":
+	    (hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s":
+	    (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s":"Unknown"),
+	    (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
+	    (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
+	    (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+	    ("Unknown"));
+
+	if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
+	    ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
+	    (hw->bus.speed == ixgbe_bus_speed_2500))) {
+		device_printf(dev, "PCI-Express bandwidth available"
+		    " for this card\n     is not sufficient for"
+		    " optimal performance.\n");
+		device_printf(dev, "For optimal performance a x8 "
+		    "PCIE, or x4 PCIE Gen2 slot is required.\n");
+        }
+	if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
+	    ((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
+	    (hw->bus.speed < ixgbe_bus_speed_8000))) {
+		device_printf(dev, "PCI-Express bandwidth available"
+		    " for this card\n     is not sufficient for"
+		    " optimal performance.\n");
+		device_printf(dev, "For optimal performance a x8 "
+		    "PCIE Gen3 slot is required.\n");
+        }
+
+	return;
+}
+
+
+/*
 ** Setup the correct IVAR register for a particular MSIX interrupt
 **   (yes this is all very magic and confusing :)
 **  - entry is the register array entry
@@ -5063,7 +5112,7 @@
 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
 		hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
 	if (hw->mac.ops.setup_link)
-		hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
+		hw->mac.ops.setup_link(hw, autoneg, TRUE);
 	return;
 }
 
@@ -5107,6 +5156,11 @@
 	adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
 	adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
 
+	/*
+	** Note: these are for the 8 possible traffic classes,
+	**	 which in current implementation is unused,
+	**	 therefore only 0 should read real data.
+	*/
 	for (int i = 0; i < 8; i++) {
 		u32 mp;
 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
@@ -5116,13 +5170,20 @@
         	adapter->stats.mpc[i] += mp;
 		/* Running comprehensive total for stats display */
 		total_missed_rx += adapter->stats.mpc[i];
-		if (hw->mac.type == ixgbe_mac_82598EB)
+		if (hw->mac.type == ixgbe_mac_82598EB) {
 			adapter->stats.rnbc[i] +=
 			    IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+			adapter->stats.qbtc[i] +=
+			    IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+			adapter->stats.qbrc[i] +=
+			    IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+			adapter->stats.pxonrxc[i] +=
+		    	    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+		} else
+			adapter->stats.pxonrxc[i] +=
+		    	    IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
 		adapter->stats.pxontxc[i] +=
 		    IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
-		adapter->stats.pxonrxc[i] +=
-		    IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
 		adapter->stats.pxofftxc[i] +=
 		    IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
 		adapter->stats.pxoffrxc[i] +=
@@ -5133,12 +5194,6 @@
 	for (int i = 0; i < 16; i++) {
 		adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
 		adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
-		adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-		adapter->stats.qbrc[i] += 
-		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
-		adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
-		adapter->stats.qbtc[i] +=
-		    ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
 		adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 	}
 	adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
@@ -5231,11 +5286,12 @@
 	ifp->if_ibytes = adapter->stats.gorc;
 	ifp->if_obytes = adapter->stats.gotc;
 	ifp->if_imcasts = adapter->stats.mprc;
+	ifp->if_omcasts = adapter->stats.mptc;
 	ifp->if_collisions = 0;
 
 	/* Rx Errors */
-	ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
-		adapter->stats.rlec;
+	ifp->if_iqdrops = total_missed_rx;
+	ifp->if_ierrors = adapter->stats.crcerrs + adapter->stats.rlec;
 }
 
 /** ixgbe_sysctl_tdh_handler - Handler function
@@ -5368,15 +5424,9 @@
 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
 			CTLFLAG_RD, &adapter->mbuf_defrag_failed,
 			"m_defrag() failed");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
-			CTLFLAG_RD, &adapter->no_tx_dma_setup,
-			"Driver tx dma failure in xmit");
 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
 			CTLFLAG_RD, &adapter->watchdog_events,
 			"Watchdog timeouts");
-	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
-			CTLFLAG_RD, &adapter->tso_tx,
-			"TSO");
 	SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
 			CTLFLAG_RD, &adapter->link_irq,
 			"Link MSIX IRQ Handled");
@@ -5403,6 +5453,12 @@
 				CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
 				ixgbe_sysctl_tdt_handler, "IU",
 				"Transmit Descriptor Tail");
+		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tso_tx",
+				CTLFLAG_RD, &txr->tso_tx,
+				"TSO");
+		SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "no_tx_dma_setup",
+				CTLFLAG_RD, &txr->no_tx_dma_setup,
+				"Driver tx dma failure in xmit");
 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
 				CTLFLAG_RD, &txr->no_desc_avail,
 				"Queue No Descriptor Available");
@@ -5438,6 +5494,9 @@
 		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
 				CTLFLAG_RD, &rxr->rx_bytes,
 				"Queue Bytes Received");
+		SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_copies",
+				CTLFLAG_RD, &rxr->rx_copies,
+				"Copied RX Frames");
 		SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
 				CTLFLAG_RD, &lro->lro_queued, 0,
 				"LRO Queued");
@@ -5473,16 +5532,18 @@
 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
 			CTLFLAG_RD, &stats->rlec,
 			"Receive Length Errors");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
+
+	/* Flow Control stats */
+	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
 			CTLFLAG_RD, &stats->lxontxc,
 			"Link XON Transmitted");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
+	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
 			CTLFLAG_RD, &stats->lxonrxc,
 			"Link XON Received");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
+	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
 			CTLFLAG_RD, &stats->lxofftxc,
 			"Link XOFF Transmitted");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
+	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
 			CTLFLAG_RD, &stats->lxoffrxc,
 			"Link XOFF Received");
 
@@ -5582,29 +5643,6 @@
 	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
 			CTLFLAG_RD, &stats->ptc1522,
 			"1024-1522 byte frames transmitted");
-
-	/* FC Stats */
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
-		CTLFLAG_RD, &stats->fccrc,
-		"FC CRC Errors");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
-		CTLFLAG_RD, &stats->fclast,
-		"FC Last Error");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
-		CTLFLAG_RD, &stats->fcoerpdc,
-		"FCoE Packets Dropped");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
-		CTLFLAG_RD, &stats->fcoeprc,
-		"FCoE Packets Received");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
-		CTLFLAG_RD, &stats->fcoeptc,
-		"FCoE Packets Transmitted");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
-		CTLFLAG_RD, &stats->fcoedwrc,
-		"FCoE DWords Received");
-	SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
-		CTLFLAG_RD, &stats->fcoedwtc,
-		"FCoE DWords Transmitted");
 }
 
 /*
@@ -5635,10 +5673,17 @@
 		case ixgbe_fc_tx_pause:
 		case ixgbe_fc_full:
 			adapter->hw.fc.requested_mode = adapter->fc;
+			if (adapter->num_queues > 1)
+				ixgbe_disable_rx_drop(adapter);
 			break;
 		case ixgbe_fc_none:
+			adapter->hw.fc.requested_mode = ixgbe_fc_none;
+			if (adapter->num_queues > 1)
+				ixgbe_enable_rx_drop(adapter);
+			break;
 		default:
-			adapter->hw.fc.requested_mode = ixgbe_fc_none;
+			adapter->fc = last;
+			return (EINVAL);
 	}
 	/* Don't autoneg if forcing a value */
 	adapter->hw.fc.disable_fc_autoneg = TRUE;
@@ -5646,16 +5691,6 @@
 	return error;
 }
 
-static void
-ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
-        const char *description, int *limit, int value)
-{
-        *limit = value;
-        SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
-            SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
-            OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
-}
-
 /*
 ** Control link advertise speed:
 **	1 - advertise only 1G
@@ -5677,7 +5712,7 @@
 	last = adapter->advertise;
 
 	error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
-	if ((error) || (adapter->advertise == -1))
+	if ((error) || (req->newptr == NULL))
 		return (error);
 
 	if (adapter->advertise == last) /* no change */
@@ -5685,11 +5720,11 @@
 
 	if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
             (hw->phy.multispeed_fiber)))
-		return (error);
+		return (EINVAL);
 
 	if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
 		device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
-		return (error);
+		return (EINVAL);
 	}
 
 	if (adapter->advertise == 1)
@@ -5699,11 +5734,13 @@
 	else if (adapter->advertise == 3)
                 speed = IXGBE_LINK_SPEED_1GB_FULL |
 			IXGBE_LINK_SPEED_10GB_FULL;
-	else /* bogus value */
-		return (error);
+	else {	/* bogus value */
+		adapter->advertise = last;
+		return (EINVAL);
+	}
 
 	hw->mac.autotry_restart = TRUE;
-	hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
+	hw->mac.ops.setup_link(hw, speed, TRUE);
 
 	return (error);
 }
@@ -5711,6 +5748,7 @@
 /*
 ** Thermal Shutdown Trigger
 **   - cause a Thermal Overtemp IRQ
+**   - this now requires firmware enabling
 */
 static int
 ixgbe_set_thermal_test(SYSCTL_HANDLER_ARGS)
@@ -5735,3 +5773,34 @@
 
 	return (0);
 }
+
+/*
+** Enable the hardware to drop packets when the buffer is
+** full. This is useful when multiqueue,so that no single
+** queue being full stalls the entire RX engine. We only
+** enable this when Multiqueue AND when Flow Control is 
+** disabled.
+*/
+static void
+ixgbe_enable_rx_drop(struct adapter *adapter)
+{
+        struct ixgbe_hw *hw = &adapter->hw;
+
+	for (int i = 0; i < adapter->num_queues; i++) {
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+        	srrctl |= IXGBE_SRRCTL_DROP_EN;
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+	}
+}
+
+static void
+ixgbe_disable_rx_drop(struct adapter *adapter)
+{
+        struct ixgbe_hw *hw = &adapter->hw;
+
+	for (int i = 0; i < adapter->num_queues; i++) {
+        	u32 srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+        	srrctl &= ~IXGBE_SRRCTL_DROP_EN;
+        	IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+	}
+}


Property changes on: trunk/sys/dev/ixgbe/ixgbe.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 
 #ifndef _IXGBE_H_
@@ -39,7 +40,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 #include <sys/buf_ring.h>
 #endif
 #include <sys/mbuf.h>
@@ -89,10 +90,6 @@
 #include <sys/smp.h>
 #include <machine/smp.h>
 
-#ifdef IXGBE_IEEE1588
-#include <sys/ieee1588.h>
-#endif
-
 #include "ixgbe_api.h"
 
 /* Tunables */
@@ -154,6 +151,19 @@
 #define IXGBE_FC_HI		0x20000
 #define IXGBE_FC_LO		0x10000
 
+/*
+ * Used for optimizing small rx mbufs.  Effort is made to keep the copy
+ * small and aligned for the CPU L1 cache.
+ * 
+ * MHLEN is typically 168 bytes, giving us 8-byte alignment.  Getting
+ * 32 byte alignment needed for the fast bcopy results in 8 bytes being
+ * wasted.  Getting 64 byte alignment, which _should_ be ideal for
+ * modern Intel CPUs, results in 40 bytes wasted and a significant drop
+ * in observed efficiency of the optimization, 97.9% -> 81.8%.
+ */
+#define IXGBE_RX_COPY_LEN	160
+#define IXGBE_RX_COPY_ALIGN	(MHLEN - IXGBE_RX_COPY_LEN)
+
 /* Keep older OS drivers building... */
 #if !defined(SYSCTL_ADD_UQUAD)
 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
@@ -185,11 +195,10 @@
 #define IXGBE_VFTA_SIZE			128
 #define IXGBE_BR_SIZE			4096
 #define IXGBE_QUEUE_MIN_FREE		32
-#define IXGBE_QUEUE_IDLE		1
-#define IXGBE_QUEUE_WORKING		2
-#define IXGBE_QUEUE_HUNG		4
-#define IXGBE_QUEUE_DEPLETED		8
 
+/* IOCTL define to gather SFP+ Diagnostic data */
+#define SIOCGI2C	SIOCGIFGENERIC
+
 /* Offload bits in mbuf flag */
 #if __FreeBSD_version >= 800000
 #define CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
@@ -197,16 +206,6 @@
 #define CSUM_OFFLOAD		(CSUM_IP|CSUM_TCP|CSUM_UDP)
 #endif
 
-/* For 6.X code compatibility */
-#if !defined(ETHER_BPF_MTAP)
-#define ETHER_BPF_MTAP		BPF_MTAP
-#endif
-
-#if __FreeBSD_version < 700000
-#define CSUM_TSO		0
-#define IFCAP_TSO4		0
-#endif
-
 /*
  * Interrupt Moderation parameters 
  */
@@ -215,6 +214,7 @@
 #define IXGBE_BULK_LATENCY	1200
 #define IXGBE_LINK_ITR		2000
 
+
 /*
  *****************************************************************************
  * vendor_info_array
@@ -233,18 +233,27 @@
 } ixgbe_vendor_info_t;
 
 
+/* This is used to get SFP+ module data */
+struct ixgbe_i2c_req {
+        u8 dev_addr;
+        u8 offset;
+        u8 len;
+        u8 data[8];
+};
+
 struct ixgbe_tx_buf {
-	u32		eop_index;
+	union ixgbe_adv_tx_desc	*eop;
 	struct mbuf	*m_head;
 	bus_dmamap_t	map;
 };
 
 struct ixgbe_rx_buf {
-	struct mbuf	*m_head;
-	struct mbuf	*m_pack;
+	struct mbuf	*buf;
 	struct mbuf	*fmp;
-	bus_dmamap_t	hmap;
 	bus_dmamap_t	pmap;
+	u_int		flags;
+#define IXGBE_RX_COPY	0x01
+	uint64_t	addr;
 };
 
 /*
@@ -285,19 +294,26 @@
         struct adapter		*adapter;
 	struct mtx		tx_mtx;
 	u32			me;
-	int			queue_status;
 	int			watchdog_time;
 	union ixgbe_adv_tx_desc	*tx_base;
+	struct ixgbe_tx_buf	*tx_buffers;
 	struct ixgbe_dma_alloc	txdma;
-	u32			next_avail_desc;
-	u32			next_to_clean;
-	struct ixgbe_tx_buf	*tx_buffers;
 	volatile u16		tx_avail;
+	u16			next_avail_desc;
+	u16			next_to_clean;
+	u16			process_limit;
+	u16			num_desc;
+	enum {
+	    IXGBE_QUEUE_IDLE,
+	    IXGBE_QUEUE_WORKING,
+	    IXGBE_QUEUE_HUNG,
+	}			queue_status;
 	u32			txd_cmd;
 	bus_dma_tag_t		txtag;
 	char			mtx_name[16];
-#if __FreeBSD_version >= 800000
+#ifndef IXGBE_LEGACY_TX
 	struct buf_ring		*br;
+	struct task		txq_task;
 #endif
 #ifdef IXGBE_FDIR
 	u16			atr_sample;
@@ -306,6 +322,9 @@
 	u32			bytes;  /* used for AIM */
 	u32			packets;
 	/* Soft Stats */
+	unsigned long   	tso_tx;
+	unsigned long   	no_tx_map_avail;
+	unsigned long   	no_tx_dma_setup;
 	u64			no_desc_avail;
 	u64			total_packets;
 };
@@ -322,15 +341,16 @@
 	struct ixgbe_dma_alloc	rxdma;
 	struct lro_ctrl		lro;
 	bool			lro_enabled;
-	bool			hdr_split;
 	bool			hw_rsc;
 	bool			discard;
 	bool			vtag_strip;
-        u32			next_to_refresh;
-        u32 			next_to_check;
+        u16			next_to_refresh;
+        u16 			next_to_check;
+	u16			num_desc;
+	u16			mbuf_sz;
+	u16			process_limit;
 	char			mtx_name[16];
 	struct ixgbe_rx_buf	*rx_buffers;
-	bus_dma_tag_t		htag;
 	bus_dma_tag_t		ptag;
 
 	u32			bytes; /* Used for AIM calc */
@@ -338,7 +358,7 @@
 
 	/* Soft stats */
 	u64			rx_irq;
-	u64			rx_split_packets;
+	u64			rx_copies;
 	u64			rx_packets;
 	u64 			rx_bytes;
 	u64 			rx_discarded;
@@ -426,7 +446,7 @@
 	 *	Allocated at run time, an array of rings.
 	 */
 	struct tx_ring		*tx_rings;
-	int			num_tx_desc;
+	u32			num_tx_desc;
 
 	/*
 	 * Receive rings:
@@ -433,27 +453,25 @@
 	 *	Allocated at run time, an array of rings.
 	 */
 	struct rx_ring		*rx_rings;
-	int			num_rx_desc;
 	u64			que_mask;
-	u32			rx_process_limit;
+	u32			num_rx_desc;
 
 	/* Multicast array memory */
 	u8			*mta;
 
+
 	/* Misc stats maintained by the driver */
 	unsigned long   	dropped_pkts;
 	unsigned long   	mbuf_defrag_failed;
 	unsigned long   	mbuf_header_failed;
 	unsigned long   	mbuf_packet_failed;
-	unsigned long   	no_tx_map_avail;
-	unsigned long   	no_tx_dma_setup;
 	unsigned long   	watchdog_events;
-	unsigned long   	tso_tx;
 	unsigned long		link_irq;
 
 	struct ixgbe_hw_stats 	stats;
 };
 
+
 /* Precision Time Sync (IEEE 1588) defines */
 #define ETHERTYPE_IEEE1588      0x88F7
 #define PICOSECS_PER_TICK       20833
@@ -476,6 +494,10 @@
 #define IXGBE_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
 #define IXGBE_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
 
+/* For backward compatibility */
+#if !defined(PCIER_LINK_STA)
+#define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA
+#endif
 
 static inline bool
 ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -512,12 +534,10 @@
 static inline u16
 ixgbe_rx_unrefreshed(struct rx_ring *rxr)
 {       
-	struct adapter  *adapter = rxr->adapter;
-        
 	if (rxr->next_to_check > rxr->next_to_refresh)
 		return (rxr->next_to_check - rxr->next_to_refresh - 1);
 	else
-		return ((adapter->num_rx_desc + rxr->next_to_check) -
+		return ((rxr->num_desc + rxr->next_to_check) -
 		    rxr->next_to_refresh - 1);
 }       
 


Property changes on: trunk/sys/dev/ixgbe/ixgbe.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_82598.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_82598.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_82598.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_82598.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_type.h"
 #include "ixgbe_82598.h"
@@ -49,11 +50,9 @@
 				      bool link_up_wait_to_complete);
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
 				      ixgbe_link_speed speed,
-				      bool autoneg,
 				      bool autoneg_wait_to_complete);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
 					 ixgbe_link_speed speed,
-					 bool autoneg,
 					 bool autoneg_wait_to_complete);
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
@@ -60,7 +59,8 @@
 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
 				  u32 headroom, int strategy);
-
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 *sff8472_data);
 /**
  *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
  *  @hw: pointer to the HW structure
@@ -155,6 +155,7 @@
 
 	/* SFP+ Module */
 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+	phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
 
 	/* Link */
 	mac->ops.check_link = &ixgbe_check_mac_link_82598;
@@ -166,6 +167,8 @@
 	/* Manageability interface */
 	mac->ops.set_fw_drv_ver = NULL;
 
+	mac->ops.get_rtrup2tc = NULL;
+
 	return ret_val;
 }
 
@@ -712,15 +715,15 @@
  *  ixgbe_setup_mac_link_82598 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
-				      ixgbe_link_speed speed, bool autoneg,
+				      ixgbe_link_speed speed,
 				      bool autoneg_wait_to_complete)
 {
+	bool autoneg = FALSE;
 	s32 status = IXGBE_SUCCESS;
 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
 	u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
@@ -766,7 +769,6 @@
  *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
  *
  *  Sets the link speed in the AUTOC register in the MAC and restarts link.
@@ -773,7 +775,6 @@
  **/
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
 					 ixgbe_link_speed speed,
-					 bool autoneg,
 					 bool autoneg_wait_to_complete)
 {
 	s32 status;
@@ -781,7 +782,7 @@
 	DEBUGFUNC("ixgbe_setup_copper_link_82598");
 
 	/* Setup the PHY according to input speed */
-	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+	status = hw->phy.ops.setup_link_speed(hw, speed,
 					      autoneg_wait_to_complete);
 	/* Set up MAC */
 	ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -1102,24 +1103,34 @@
 }
 
 /**
- *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
  *  @hw: pointer to hardware structure
- *  @byte_offset: EEPROM byte offset to read
+ *  @dev_addr: address to read from
+ *  @byte_offset: byte offset to read from dev_addr
  *  @eeprom_data: value read
  *
  *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
  **/
-s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
-				u8 *eeprom_data)
+static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
+				    u8 byte_offset, u8 *eeprom_data)
 {
 	s32 status = IXGBE_SUCCESS;
 	u16 sfp_addr = 0;
 	u16 sfp_data = 0;
 	u16 sfp_stat = 0;
+	u16 gssr;
 	u32 i;
 
-	DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
+	DEBUGFUNC("ixgbe_read_i2c_phy_82598");
 
+	if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+		gssr = IXGBE_GSSR_PHY1_SM;
+	else
+		gssr = IXGBE_GSSR_PHY0_SM;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+		return IXGBE_ERR_SWFW_SYNC;
+
 	if (hw->phy.type == ixgbe_phy_nl) {
 		/*
 		 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
@@ -1126,19 +1137,19 @@
 		 * 0xC30D. These registers are used to talk to the SFP+
 		 * module's EEPROM through the SDA/SCL (I2C) interface.
 		 */
-		sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+		sfp_addr = (dev_addr << 8) + byte_offset;
 		sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
-		hw->phy.ops.write_reg(hw,
-				      IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
-				      IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-				      sfp_addr);
+		hw->phy.ops.write_reg_mdi(hw,
+					  IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+					  IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+					  sfp_addr);
 
 		/* Poll status */
 		for (i = 0; i < 100; i++) {
-			hw->phy.ops.read_reg(hw,
-					     IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
-					     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
-					     &sfp_stat);
+			hw->phy.ops.read_reg_mdi(hw,
+						IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+						IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+						&sfp_stat);
 			sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
 			if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
 				break;
@@ -1152,20 +1163,50 @@
 		}
 
 		/* Read data */
-		hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
-				     IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+		hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+					IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
 
 		*eeprom_data = (u8)(sfp_data >> 8);
 	} else {
 		status = IXGBE_ERR_PHY;
-		goto out;
 	}
 
 out:
+	hw->mac.ops.release_swfw_sync(hw, gssr);
 	return status;
 }
 
 /**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+				u8 *eeprom_data)
+{
+	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
+					byte_offset, eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
+					u8 *sff8472_data)
+{
+	return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
+					byte_offset, sff8472_data);
+}
+
+/**
  *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
  *  @hw: pointer to hardware structure
  *


Property changes on: trunk/sys/dev/ixgbe/ixgbe_82598.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_82598.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_82598.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_82598.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
   Copyright (c) 2001-2012, Intel Corporation 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_82598.h 246069 2013-01-29 13:49:08Z pfg $*/
 
 #ifndef _IXGBE_82598_H_
 #define _IXGBE_82598_H_


Property changes on: trunk/sys/dev/ixgbe/ixgbe_82598.h
___________________________________________________________________
Deleted: svn:executable
## -1 +0,0 ##
-*
\ No newline at end of property
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_82599.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_82599.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_82599.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_82599.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_type.h"
 #include "ixgbe_82599.h"
@@ -40,7 +41,6 @@
 
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
 					 ixgbe_link_speed speed,
-					 bool autoneg,
 					 bool autoneg_wait_to_complete);
 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
@@ -48,6 +48,25 @@
 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
 					  u16 words, u16 *data);
 
+static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
+{
+	u32 fwsm, manc, factps;
+
+	fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+	if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
+		return FALSE;
+
+	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
+	if (!(manc & IXGBE_MANC_RCV_TCO_EN))
+		return FALSE;
+
+	factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+	if (factps & IXGBE_FACTPS_MNGCG)
+		return FALSE;
+
+	return TRUE;
+}
+
 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
@@ -54,8 +73,12 @@
 
 	DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
 
-	/* enable the laser control functions for SFP+ fiber */
-	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+	/*
+	 * enable the laser control functions for SFP+ fiber
+	 * and MNG not enabled
+	 */
+	if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+	    !hw->mng_fw_enabled) {
 		mac->ops.disable_tx_laser =
 				       &ixgbe_disable_tx_laser_multispeed_fiber;
 		mac->ops.enable_tx_laser =
@@ -135,9 +158,8 @@
 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
 	s32 ret_val = IXGBE_SUCCESS;
-	u32 reg_anlp1 = 0;
-	u32 i = 0;
 	u16 list_offset, data_offset, data_value;
+	bool got_lock = FALSE;
 
 	DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
 
@@ -159,11 +181,13 @@
 			goto setup_sfp_out;
 		}
 
-		hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+		if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+			goto setup_sfp_err;
 		while (data_value != 0xffff) {
 			IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
 			IXGBE_WRITE_FLUSH(hw);
-			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+			if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+				goto setup_sfp_err;
 		}
 
 		/* Release the semaphore */
@@ -171,32 +195,52 @@
 		/* Delay obtaining semaphore again to allow FW access */
 		msec_delay(hw->eeprom.semaphore_delay);
 
-		/* Now restart DSP by setting Restart_AN and clearing LMS */
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
-				IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
-				IXGBE_AUTOC_AN_RESTART));
+		/* Need SW/FW semaphore around AUTOC writes if LESM on,
+		 * likewise reset_pipeline requires lock as it also writes
+		 * AUTOC.
+		 */
+		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val != IXGBE_SUCCESS) {
+				ret_val = IXGBE_ERR_SWFW_SYNC;
+				goto setup_sfp_out;
+			}
 
-		/* Wait for AN to leave state 0 */
-		for (i = 0; i < 10; i++) {
-			msec_delay(4);
-			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
-			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
-				break;
+			got_lock = TRUE;
 		}
-		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+
+		/* Restart DSP and set SFI mode */
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
+				IXGBE_AUTOC_LMS_10G_SERIAL));
+		hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+		ret_val = ixgbe_reset_pipeline_82599(hw);
+
+		if (got_lock) {
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
+			got_lock = FALSE;
+		}
+
+		if (ret_val) {
 			DEBUGOUT("sfp module setup not complete\n");
 			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
 			goto setup_sfp_out;
 		}
 
-		/* Restart DSP by setting Restart_AN and return to SFI mode */
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
-				IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
-				IXGBE_AUTOC_AN_RESTART));
 	}
 
 setup_sfp_out:
 	return ret_val;
+
+setup_sfp_err:
+	/* Release the semaphore */
+	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+	/* Delay obtaining semaphore again to allow FW access */
+	msec_delay(hw->eeprom.semaphore_delay);
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", data_offset);
+	return IXGBE_ERR_PHY;
 }
 
 /**
@@ -216,7 +260,7 @@
 
 	DEBUGFUNC("ixgbe_init_ops_82599");
 
-	ret_val = ixgbe_init_phy_ops_generic(hw);
+	ixgbe_init_phy_ops_generic(hw);
 	ret_val = ixgbe_init_ops_generic(hw);
 
 	/* PHY */
@@ -282,6 +326,11 @@
 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
 
 
+	mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+
+	/* Cache if MNG FW is up */
+	hw->mng_fw_enabled = ixgbe_mng_enabled(hw);
+
 	return ret_val;
 }
 
@@ -289,13 +338,13 @@
  *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
  *  @hw: pointer to hardware structure
  *  @speed: pointer to link speed
- *  @negotiation: TRUE when autoneg or autotry is enabled
+ *  @autoneg: TRUE when autoneg or autotry is enabled
  *
  *  Determines the link capabilities by reading the AUTOC register.
  **/
 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
 				      ixgbe_link_speed *speed,
-				      bool *negotiation)
+				      bool *autoneg)
 {
 	s32 status = IXGBE_SUCCESS;
 	u32 autoc = 0;
@@ -309,7 +358,7 @@
 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
 	    hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = TRUE;
+		*autoneg = TRUE;
 		goto out;
 	}
 
@@ -326,22 +375,22 @@
 	switch (autoc & IXGBE_AUTOC_LMS_MASK) {
 	case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = FALSE;
+		*autoneg = FALSE;
 		break;
 
 	case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		*negotiation = FALSE;
+		*autoneg = FALSE;
 		break;
 
 	case IXGBE_AUTOC_LMS_1G_AN:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = TRUE;
+		*autoneg = TRUE;
 		break;
 
 	case IXGBE_AUTOC_LMS_10G_SERIAL:
 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
-		*negotiation = FALSE;
+		*autoneg = FALSE;
 		break;
 
 	case IXGBE_AUTOC_LMS_KX4_KX_KR:
@@ -353,7 +402,7 @@
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
 		if (autoc & IXGBE_AUTOC_KX_SUPP)
 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = TRUE;
+		*autoneg = TRUE;
 		break;
 
 	case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
@@ -364,12 +413,12 @@
 			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
 		if (autoc & IXGBE_AUTOC_KX_SUPP)
 			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = TRUE;
+		*autoneg = TRUE;
 		break;
 
 	case IXGBE_AUTOC_LMS_SGMII_1G_100M:
 		*speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
-		*negotiation = FALSE;
+		*autoneg = FALSE;
 		break;
 
 	default:
@@ -381,7 +430,7 @@
 	if (hw->phy.multispeed_fiber) {
 		*speed |= IXGBE_LINK_SPEED_10GB_FULL |
 			  IXGBE_LINK_SPEED_1GB_FULL;
-		*negotiation = TRUE;
+		*autoneg = TRUE;
 	}
 
 out:
@@ -424,6 +473,7 @@
 	case IXGBE_DEV_ID_82599_SFP_FCOE:
 	case IXGBE_DEV_ID_82599_SFP_EM:
 	case IXGBE_DEV_ID_82599_SFP_SF2:
+	case IXGBE_DEV_ID_82599_SFP_SF_QP:
 	case IXGBE_DEV_ID_82599EN_SFP:
 		media_type = ixgbe_media_type_fiber;
 		break;
@@ -433,6 +483,10 @@
 	case IXGBE_DEV_ID_82599_T3_LOM:
 		media_type = ixgbe_media_type_copper;
 		break;
+	case IXGBE_DEV_ID_82599_BYPASS:
+		media_type = ixgbe_media_type_fiber_fixed;
+		hw->phy.multispeed_fiber = TRUE;
+		break;
 	default:
 		media_type = ixgbe_media_type_unknown;
 		break;
@@ -442,6 +496,29 @@
 }
 
 /**
+ *  ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables link during D3 power down sequence.
+ *
+ **/
+void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+	u32 autoc2_reg;
+	u16 ee_ctrl_2 = 0;
+
+	DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599");
+	ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
+
+	if (!hw->mng_fw_enabled && !hw->wol_enabled &&
+		ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
+		autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+		autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+	}
+}
+
+/**
  *  ixgbe_start_mac_link_82599 - Setup MAC link settings
  *  @hw: pointer to hardware structure
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
@@ -456,17 +533,32 @@
 	u32 links_reg;
 	u32 i;
 	s32 status = IXGBE_SUCCESS;
+	bool got_lock = FALSE;
 
 	DEBUGFUNC("ixgbe_start_mac_link_82599");
 
 
+	/*  reset_pipeline requires us to hold this lock as it writes to
+	 *  AUTOC.
+	 */
+	if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		status = hw->mac.ops.acquire_swfw_sync(hw,
+						       IXGBE_GSSR_MAC_CSR_SM);
+		if (status != IXGBE_SUCCESS)
+			goto out;
+
+		got_lock = TRUE;
+	}
+
 	/* Restart link */
-	autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
-	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+	ixgbe_reset_pipeline_82599(hw);
 
+	if (got_lock)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
 	/* Only poll for autoneg to complete if specified to do so */
 	if (autoneg_wait_to_complete) {
+		autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 		if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
 		     IXGBE_AUTOC_LMS_KX4_KX_KR ||
 		    (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -490,6 +582,7 @@
 	/* Add delay to filter out noises during initial link setup */
 	msec_delay(50);
 
+out:
 	return status;
 }
 
@@ -555,16 +648,84 @@
 }
 
 /**
+ *  ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ *  @hw: pointer to hardware structure
+ *  @speed: link speed to set
+ *
+ *  We set the module speed differently for fixed fiber.  For other
+ *  multi-speed devices we don't have an error value so here if we
+ *  detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+					ixgbe_link_speed speed)
+{
+	s32 status;
+	u8 rs, eeprom_data;
+
+	switch (speed) {
+	case IXGBE_LINK_SPEED_10GB_FULL:
+		/* one bit mask same as setting on */
+		rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+		break;
+	case IXGBE_LINK_SPEED_1GB_FULL:
+		rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+		break;
+	default:
+		DEBUGOUT("Invalid fixed module speed\n");
+		return;
+	}
+
+	/* Set RS0 */
+	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					   IXGBE_I2C_EEPROM_DEV_ADDR2,
+					   &eeprom_data);
+	if (status) {
+		DEBUGOUT("Failed to read Rx Rate Select RS0\n");
+		goto out;
+	}
+
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+					    eeprom_data);
+	if (status) {
+		DEBUGOUT("Failed to write Rx Rate Select RS0\n");
+		goto out;
+	}
+
+	/* Set RS1 */
+	status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+					   IXGBE_I2C_EEPROM_DEV_ADDR2,
+					   &eeprom_data);
+	if (status) {
+		DEBUGOUT("Failed to read Rx Rate Select RS1\n");
+		goto out;
+	}
+
+	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+					    eeprom_data);
+	if (status) {
+		DEBUGOUT("Failed to write Rx Rate Select RS1\n");
+		goto out;
+	}
+out:
+	return;
+}
+
+/**
  *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-				     ixgbe_link_speed speed, bool autoneg,
+				     ixgbe_link_speed speed,
 				     bool autoneg_wait_to_complete)
 {
 	s32 status = IXGBE_SUCCESS;
@@ -573,13 +734,12 @@
 	u32 speedcnt = 0;
 	u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
 	u32 i = 0;
-	bool link_up = FALSE;
-	bool negotiation;
+	bool autoneg, link_up = FALSE;
 
 	DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
 
 	/* Mask off requested but non-supported speeds */
-	status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+	status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
 	if (status != IXGBE_SUCCESS)
 		return status;
 
@@ -602,9 +762,14 @@
 			goto out;
 
 		/* Set the module link speed */
-		esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-		IXGBE_WRITE_FLUSH(hw);
+		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+			ixgbe_set_fiber_fixed_speed(hw,
+						    IXGBE_LINK_SPEED_10GB_FULL);
+		} else {
+			esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			IXGBE_WRITE_FLUSH(hw);
+		}
 
 		/* Allow module to change analog characteristics (1G->10G) */
 		msec_delay(40);
@@ -611,7 +776,6 @@
 
 		status = ixgbe_setup_mac_link_82599(hw,
 						    IXGBE_LINK_SPEED_10GB_FULL,
-						    autoneg,
 						    autoneg_wait_to_complete);
 		if (status != IXGBE_SUCCESS)
 			return status;
@@ -653,10 +817,15 @@
 			goto out;
 
 		/* Set the module link speed */
-		esdp_reg &= ~IXGBE_ESDP_SDP5;
-		esdp_reg |= IXGBE_ESDP_SDP5_DIR;
-		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
-		IXGBE_WRITE_FLUSH(hw);
+		if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
+			ixgbe_set_fiber_fixed_speed(hw,
+						    IXGBE_LINK_SPEED_1GB_FULL);
+		} else {
+			esdp_reg &= ~IXGBE_ESDP_SDP5;
+			esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+			IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+			IXGBE_WRITE_FLUSH(hw);
+		}
 
 		/* Allow module to change analog characteristics (10G->1G) */
 		msec_delay(40);
@@ -663,7 +832,6 @@
 
 		status = ixgbe_setup_mac_link_82599(hw,
 						    IXGBE_LINK_SPEED_1GB_FULL,
-						    autoneg,
 						    autoneg_wait_to_complete);
 		if (status != IXGBE_SUCCESS)
 			return status;
@@ -690,7 +858,7 @@
 	 */
 	if (speedcnt > 1)
 		status = ixgbe_setup_mac_link_multispeed_fiber(hw,
-			highest_link_speed, autoneg, autoneg_wait_to_complete);
+			highest_link_speed, autoneg_wait_to_complete);
 
 out:
 	/* Set autoneg_advertised value based on input link speed */
@@ -709,13 +877,12 @@
  *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Implements the Intel SmartSpeed algorithm.
  **/
 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-				    ixgbe_link_speed speed, bool autoneg,
+				    ixgbe_link_speed speed,
 				    bool autoneg_wait_to_complete)
 {
 	s32 status = IXGBE_SUCCESS;
@@ -748,7 +915,7 @@
 	/* First, try to get link with full advertisement */
 	hw->phy.smart_speed_active = FALSE;
 	for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
-		status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+		status = ixgbe_setup_mac_link_82599(hw, speed,
 						    autoneg_wait_to_complete);
 		if (status != IXGBE_SUCCESS)
 			goto out;
@@ -783,7 +950,7 @@
 
 	/* Turn SmartSpeed on to disable KR support */
 	hw->phy.smart_speed_active = TRUE;
-	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+	status = ixgbe_setup_mac_link_82599(hw, speed,
 					    autoneg_wait_to_complete);
 	if (status != IXGBE_SUCCESS)
 		goto out;
@@ -808,7 +975,7 @@
 
 	/* We didn't get link.  Turn SmartSpeed back off. */
 	hw->phy.smart_speed_active = FALSE;
-	status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+	status = ixgbe_setup_mac_link_82599(hw, speed,
 					    autoneg_wait_to_complete);
 
 out:
@@ -822,32 +989,30 @@
  *  ixgbe_setup_mac_link_82599 - Set MAC link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
-			       ixgbe_link_speed speed, bool autoneg,
+			       ixgbe_link_speed speed,
 			       bool autoneg_wait_to_complete)
 {
+	bool autoneg = FALSE;
 	s32 status = IXGBE_SUCCESS;
-	u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	u32 autoc, pma_pmd_1g, link_mode, start_autoc;
 	u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-	u32 start_autoc = autoc;
 	u32 orig_autoc = 0;
-	u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
-	u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
 	u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
 	u32 links_reg;
 	u32 i;
 	ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+	bool got_lock = FALSE;
 
 	DEBUGFUNC("ixgbe_setup_mac_link_82599");
 
 	/* Check to see if speed passed in is supported. */
 	status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
-	if (status != IXGBE_SUCCESS)
+	if (status)
 		goto out;
 
 	speed &= link_capabilities;
@@ -859,21 +1024,27 @@
 
 	/* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
 	if (hw->mac.orig_link_settings_stored)
-		orig_autoc = hw->mac.orig_autoc;
+		autoc = hw->mac.orig_autoc;
 	else
-		orig_autoc = autoc;
+		autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 
+	orig_autoc = autoc;
+	start_autoc = hw->mac.cached_autoc;
+	link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+	pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+
 	if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
 	    link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
 		/* Set KX4/KX/KR support according to speed requested */
 		autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
-		if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+		if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
 			if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
 				autoc |= IXGBE_AUTOC_KX4_SUPP;
 			if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
 			    (hw->phy.smart_speed_active == FALSE))
 				autoc |= IXGBE_AUTOC_KR_SUPP;
+		}
 		if (speed & IXGBE_LINK_SPEED_1GB_FULL)
 			autoc |= IXGBE_AUTOC_KX_SUPP;
 	} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
@@ -899,10 +1070,32 @@
 	}
 
 	if (autoc != start_autoc) {
+		/* Need SW/FW semaphore around AUTOC writes if LESM is on,
+		 * likewise reset_pipeline requires us to hold this lock as
+		 * it also writes to AUTOC.
+		 */
+		if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			status = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (status != IXGBE_SUCCESS) {
+				status = IXGBE_ERR_SWFW_SYNC;
+				goto out;
+			}
+
+			got_lock = TRUE;
+		}
+
 		/* Restart link */
-		autoc |= IXGBE_AUTOC_AN_RESTART;
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+		hw->mac.cached_autoc = autoc;
+		ixgbe_reset_pipeline_82599(hw);
 
+		if (got_lock) {
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
+			got_lock = FALSE;
+		}
+
 		/* Only poll for autoneg to complete if specified to do so */
 		if (autoneg_wait_to_complete) {
 			if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
@@ -936,7 +1129,6 @@
  *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
  *
  *  Restarts link on PHY and MAC based on settings passed in.
@@ -943,7 +1135,6 @@
  **/
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
 					 ixgbe_link_speed speed,
-					 bool autoneg,
 					 bool autoneg_wait_to_complete)
 {
 	s32 status;
@@ -951,7 +1142,7 @@
 	DEBUGFUNC("ixgbe_setup_copper_link_82599");
 
 	/* Setup the PHY according to input speed */
-	status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+	status = hw->phy.ops.setup_link_speed(hw, speed,
 					      autoneg_wait_to_complete);
 	/* Set up MAC */
 	ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -971,7 +1162,8 @@
 {
 	ixgbe_link_speed link_speed;
 	s32 status;
-	u32 ctrl, i, autoc, autoc2;
+	u32 ctrl, i, autoc2;
+	u32 curr_lms;
 	bool link_up = FALSE;
 
 	DEBUGFUNC("ixgbe_reset_hw_82599");
@@ -1005,6 +1197,13 @@
 	if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
 		hw->phy.ops.reset(hw);
 
+	/* remember AUTOC from before we reset */
+	if (hw->mac.cached_autoc)
+		curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
+	else
+		curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
+					  IXGBE_AUTOC_LMS_MASK;
+
 mac_reset_top:
 	/*
 	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
@@ -1053,17 +1252,60 @@
 	 * stored off yet.  Otherwise restore the stored original
 	 * values since the reset operation sets back to defaults.
 	 */
-	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+
+	/* Enable link if disabled in NVM */
+	if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+		autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
 	if (hw->mac.orig_link_settings_stored == FALSE) {
-		hw->mac.orig_autoc = autoc;
+		hw->mac.orig_autoc = hw->mac.cached_autoc;
 		hw->mac.orig_autoc2 = autoc2;
 		hw->mac.orig_link_settings_stored = TRUE;
 	} else {
-		if (autoc != hw->mac.orig_autoc)
-			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
-					IXGBE_AUTOC_AN_RESTART));
 
+		/* If MNG FW is running on a multi-speed device that
+		 * doesn't autoneg with out driver support we need to
+		 * leave LMS in the state it was before we MAC reset.
+		 * Likewise if we support WoL we don't want change the
+		 * LMS state.
+		 */
+		if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) ||
+		    hw->wol_enabled)
+			hw->mac.orig_autoc =
+				(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
+				curr_lms;
+
+		if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
+			/* Need SW/FW semaphore around AUTOC writes if LESM is
+			 * on, likewise reset_pipeline requires us to hold
+			 * this lock as it also writes to AUTOC.
+			 */
+			bool got_lock = FALSE;
+			if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+				status = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+				if (status != IXGBE_SUCCESS) {
+					status = IXGBE_ERR_SWFW_SYNC;
+					goto reset_hw_out;
+				}
+
+				got_lock = TRUE;
+			}
+
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+			hw->mac.cached_autoc = hw->mac.orig_autoc;
+			ixgbe_reset_pipeline_82599(hw);
+
+			if (got_lock)
+				hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
+		}
+
 		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
 		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
 			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
@@ -1167,7 +1409,7 @@
 		if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
 				   IXGBE_FDIRCTRL_INIT_DONE)
 			break;
-		usec_delay(10);
+		msec_delay(1);
 	}
 	if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
 		DEBUGOUT("Flow Director Signature poll time exceeded!\n");
@@ -2097,7 +2339,7 @@
 {
 	s32 status = IXGBE_ERR_EEPROM_VERSION;
 	u16 fw_offset, fw_ptp_cfg_offset;
-	u16 fw_version = 0;
+	u16 fw_version;
 
 	DEBUGFUNC("ixgbe_verify_fw_version_82599");
 
@@ -2108,22 +2350,37 @@
 	}
 
 	/* get the offset to the Firmware Module block */
-	hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+	if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed", IXGBE_FW_PTR);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
 
 	if ((fw_offset == 0) || (fw_offset == 0xFFFF))
 		goto fw_version_out;
 
 	/* get the offset to the Pass Through Patch Configuration block */
-	hw->eeprom.ops.read(hw, (fw_offset +
+	if (hw->eeprom.ops.read(hw, (fw_offset +
 				 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
-				 &fw_ptp_cfg_offset);
+				 &fw_ptp_cfg_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      fw_offset +
+			      IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
 
 	if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
 		goto fw_version_out;
 
 	/* get the firmware version */
-	hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
-			    IXGBE_FW_PATCH_VERSION_4), &fw_version);
+	if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+			    IXGBE_FW_PATCH_VERSION_4), &fw_version)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4);
+		return IXGBE_ERR_EEPROM_VERSION;
+	}
 
 	if (fw_version > 0x5)
 		status = IXGBE_SUCCESS;
@@ -2242,4 +2499,55 @@
 	return ret_val;
 }
 
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ *  @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+	s32 ret_val;
+	u32 anlp1_reg = 0;
+	u32 i, autoc_reg, autoc2_reg;
 
+	/* Enable link if disabled in NVM */
+	autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
+		autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+		IXGBE_WRITE_FLUSH(hw);
+	}
+
+	autoc_reg = hw->mac.cached_autoc;
+	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+	/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+	/* Wait for AN to leave state 0 */
+	for (i = 0; i < 10; i++) {
+		msec_delay(4);
+		anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+		if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+			break;
+	}
+
+	if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+		DEBUGOUT("auto negotiation not completed\n");
+		ret_val = IXGBE_ERR_RESET_FAILED;
+		goto reset_pipeline_out;
+	}
+
+	ret_val = IXGBE_SUCCESS;
+
+reset_pipeline_out:
+	/* Write AUTOC register with original LMS field and Restart_AN */
+	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+	IXGBE_WRITE_FLUSH(hw);
+
+	return ret_val;
+}
+
+
+


Property changes on: trunk/sys/dev/ixgbe/ixgbe_82599.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_82599.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_82599.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_82599.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_82599.h 248287 2013-03-14 21:39:39Z jfv $*/
 
 #ifndef _IXGBE_82599_H_
 #define _IXGBE_82599_H_
@@ -42,15 +43,15 @@
 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
-					  ixgbe_link_speed speed, bool autoneg,
+					  ixgbe_link_speed speed,
 					  bool autoneg_wait_to_complete);
 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
-				    ixgbe_link_speed speed, bool autoneg,
+				    ixgbe_link_speed speed,
 				    bool autoneg_wait_to_complete);
 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
 			       bool autoneg_wait_to_complete);
 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			       bool autoneg, bool autoneg_wait_to_complete);
+			       bool autoneg_wait_to_complete);
 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
@@ -61,5 +62,4 @@
 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 #endif /* _IXGBE_82599_H_ */


Property changes on: trunk/sys/dev/ixgbe/ixgbe_82599.h
___________________________________________________________________
Deleted: svn:executable
## -1 +0,0 ##
-*
\ No newline at end of property
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_api.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_api.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_api.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,12 +31,25 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_api.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_api.h"
 #include "ixgbe_common.h"
 
 /**
+ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
+{
+	if (hw->mac.ops.get_rtrup2tc)
+		hw->mac.ops.get_rtrup2tc(hw, map);
+}
+
+/**
  *  ixgbe_init_shared_code - Initialize the shared code
  *  @hw: pointer to hardware structure
  *
@@ -93,53 +107,62 @@
 
 	DEBUGFUNC("ixgbe_set_mac_type\n");
 
-	if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
-		switch (hw->device_id) {
-		case IXGBE_DEV_ID_82598:
-		case IXGBE_DEV_ID_82598_BX:
-		case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-		case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-		case IXGBE_DEV_ID_82598AT:
-		case IXGBE_DEV_ID_82598AT2:
-		case IXGBE_DEV_ID_82598EB_CX4:
-		case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-		case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-		case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-		case IXGBE_DEV_ID_82598EB_XF_LR:
-		case IXGBE_DEV_ID_82598EB_SFP_LOM:
-			hw->mac.type = ixgbe_mac_82598EB;
-			break;
-		case IXGBE_DEV_ID_82599_KX4:
-		case IXGBE_DEV_ID_82599_KX4_MEZZ:
-		case IXGBE_DEV_ID_82599_XAUI_LOM:
-		case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
-		case IXGBE_DEV_ID_82599_KR:
-		case IXGBE_DEV_ID_82599_SFP:
-		case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
-		case IXGBE_DEV_ID_82599_SFP_FCOE:
-		case IXGBE_DEV_ID_82599_SFP_EM:
-		case IXGBE_DEV_ID_82599_SFP_SF2:
-		case IXGBE_DEV_ID_82599EN_SFP:
-		case IXGBE_DEV_ID_82599_CX4:
-		case IXGBE_DEV_ID_82599_T3_LOM:
-			hw->mac.type = ixgbe_mac_82599EB;
-			break;
-		case IXGBE_DEV_ID_82599_VF:
-			hw->mac.type = ixgbe_mac_82599_vf;
-			break;
-		case IXGBE_DEV_ID_X540_VF:
-			hw->mac.type = ixgbe_mac_X540_vf;
-			break;
-		case IXGBE_DEV_ID_X540T:
-		case IXGBE_DEV_ID_X540T1:
-			hw->mac.type = ixgbe_mac_X540;
-			break;
-		default:
-			ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
-			break;
-		}
-	} else {
+	if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) {
+		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+			     "Unsupported vendor id: %x", hw->vendor_id);
+		return IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+	}
+
+	switch (hw->device_id) {
+	case IXGBE_DEV_ID_82598:
+	case IXGBE_DEV_ID_82598_BX:
+	case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+	case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+	case IXGBE_DEV_ID_82598AT:
+	case IXGBE_DEV_ID_82598AT2:
+	case IXGBE_DEV_ID_82598EB_CX4:
+	case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+	case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+	case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+	case IXGBE_DEV_ID_82598EB_XF_LR:
+	case IXGBE_DEV_ID_82598EB_SFP_LOM:
+		hw->mac.type = ixgbe_mac_82598EB;
+		break;
+	case IXGBE_DEV_ID_82599_KX4:
+	case IXGBE_DEV_ID_82599_KX4_MEZZ:
+	case IXGBE_DEV_ID_82599_XAUI_LOM:
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+	case IXGBE_DEV_ID_82599_KR:
+	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+	case IXGBE_DEV_ID_82599_SFP_FCOE:
+	case IXGBE_DEV_ID_82599_SFP_EM:
+	case IXGBE_DEV_ID_82599_SFP_SF2:
+	case IXGBE_DEV_ID_82599_SFP_SF_QP:
+	case IXGBE_DEV_ID_82599EN_SFP:
+	case IXGBE_DEV_ID_82599_CX4:
+	case IXGBE_DEV_ID_82599_BYPASS:
+	case IXGBE_DEV_ID_82599_T3_LOM:
+		hw->mac.type = ixgbe_mac_82599EB;
+		break;
+	case IXGBE_DEV_ID_82599_VF:
+	case IXGBE_DEV_ID_82599_VF_HV:
+		hw->mac.type = ixgbe_mac_82599_vf;
+		break;
+	case IXGBE_DEV_ID_X540_VF:
+	case IXGBE_DEV_ID_X540_VF_HV:
+		hw->mac.type = ixgbe_mac_X540_vf;
+		break;
+	case IXGBE_DEV_ID_X540T:
+	case IXGBE_DEV_ID_X540_BYPASS:
+		hw->mac.type = ixgbe_mac_X540;
+		break;
+	default:
 		ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+		ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+			     "Unsupported device id: %x",
+			     hw->device_id);
+		break;
 	}
 
 	DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
@@ -507,16 +530,14 @@
  *  ixgbe_setup_phy_link_speed - Set auto advertise
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *
  *  Sets the auto advertised capabilities
  **/
 s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			       bool autoneg,
 			       bool autoneg_wait_to_complete)
 {
 	return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
-			       autoneg, autoneg_wait_to_complete),
+			       autoneg_wait_to_complete),
 			       IXGBE_NOT_IMPLEMENTED);
 }
 
@@ -576,17 +597,15 @@
  *  ixgbe_setup_link - Set link speed
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *
  *  Configures link settings.  Restarts the link.
  *  Performs autonegotiation if needed.
  **/
 s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-		     bool autoneg,
 		     bool autoneg_wait_to_complete)
 {
 	return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
-			       autoneg, autoneg_wait_to_complete),
+			       autoneg_wait_to_complete),
 			       IXGBE_NOT_IMPLEMENTED);
 }
 
@@ -999,6 +1018,8 @@
 }
 
 
+
+
 /**
  *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
  *  @hw: pointer to hardware structure


Property changes on: trunk/sys/dev/ixgbe/ixgbe_api.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_api.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_api.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_api.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_api.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_API_H_
 #define _IXGBE_API_H_
@@ -37,6 +38,8 @@
 
 #include "ixgbe_type.h"
 
+void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map);
+
 s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
 
 extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
@@ -72,13 +75,12 @@
 			 bool *link_up);
 s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
 			       ixgbe_link_speed speed,
-			       bool autoneg,
 			       bool autoneg_wait_to_complete);
 void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
 void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
 void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
 s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-		     bool autoneg, bool autoneg_wait_to_complete);
+		     bool autoneg_wait_to_complete);
 s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 		     bool *link_up, bool link_up_wait_to_complete);
 s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
@@ -135,6 +137,7 @@
 s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
 s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
 s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
+s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@@ -159,6 +162,7 @@
 					  union ixgbe_atr_input *mask);
 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
 				     union ixgbe_atr_hash_dword common);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
 s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
 			u8 *data);
 s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,


Property changes on: trunk/sys/dev/ixgbe/ixgbe_api.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_common.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_common.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_common.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,10 +31,12 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_common.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
 #include "ixgbe_api.h"
 
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
@@ -134,33 +137,63 @@
 	mac->ops.get_link_capabilities = NULL;
 	mac->ops.setup_link = NULL;
 	mac->ops.check_link = NULL;
+	mac->ops.dmac_config = NULL;
+	mac->ops.dmac_update_tcs = NULL;
+	mac->ops.dmac_config_tcs = NULL;
 
 	return IXGBE_SUCCESS;
 }
 
 /**
- *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
- *  control
- *  @hw: pointer to hardware structure
+ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
+ * of flow control
+ * @hw: pointer to hardware structure
  *
- *  There are several phys that do not support autoneg flow control. This
- *  function check the device id to see if the associated phy supports
- *  autoneg flow control.
+ * This function returns TRUE if the device supports flow control
+ * autonegotiation, and FALSE if it does not.
+ *
  **/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
 {
+	bool supported = FALSE;
+	ixgbe_link_speed speed;
+	bool link_up;
 
 	DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
 
-	switch (hw->device_id) {
-	case IXGBE_DEV_ID_X540T:
-	case IXGBE_DEV_ID_X540T1:
-		return IXGBE_SUCCESS;
-	case IXGBE_DEV_ID_82599_T3_LOM:
-		return IXGBE_SUCCESS;
+	switch (hw->phy.media_type) {
+	case ixgbe_media_type_fiber_fixed:
+	case ixgbe_media_type_fiber:
+		hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+		/* if link is down, assume supported */
+		if (link_up)
+			supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+				TRUE : FALSE;
+		else
+			supported = TRUE;
+		break;
+	case ixgbe_media_type_backplane:
+		supported = TRUE;
+		break;
+	case ixgbe_media_type_copper:
+		/* only some copper devices support flow control autoneg */
+		switch (hw->device_id) {
+		case IXGBE_DEV_ID_82599_T3_LOM:
+		case IXGBE_DEV_ID_X540T:
+		case IXGBE_DEV_ID_X540_BYPASS:
+			supported = TRUE;
+			break;
+		default:
+			supported = FALSE;
+		}
 	default:
-		return IXGBE_ERR_FC_NOT_SUPPORTED;
+		break;
 	}
+
+	ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+		      "Device %x does not support flow control autoneg",
+		      hw->device_id);
+	return supported;
 }
 
 /**
@@ -174,6 +207,7 @@
 	s32 ret_val = IXGBE_SUCCESS;
 	u32 reg = 0, reg_bp = 0;
 	u16 reg_cu = 0;
+	bool got_lock = FALSE;
 
 	DEBUGFUNC("ixgbe_setup_fc");
 
@@ -182,7 +216,8 @@
 	 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
 	 */
 	if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
-		DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+			   "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
 		ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
 		goto out;
 	}
@@ -200,6 +235,7 @@
 	 * we link at 10G, the 1G advertisement is harmless and vice versa.
 	 */
 	switch (hw->phy.media_type) {
+	case ixgbe_media_type_fiber_fixed:
 	case ixgbe_media_type_fiber:
 	case ixgbe_media_type_backplane:
 		reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -268,7 +304,8 @@
 			reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
 		break;
 	default:
-		DEBUGOUT("Flow control param set incorrectly\n");
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+			     "Flow control param set incorrectly\n");
 		ret_val = IXGBE_ERR_CONFIG;
 		goto out;
 		break;
@@ -297,9 +334,30 @@
 	 */
 	if (hw->phy.media_type == ixgbe_media_type_backplane) {
 		reg_bp |= IXGBE_AUTOC_AN_RESTART;
+		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+		 * LESM is on, likewise reset_pipeline requries the lock as
+		 * it also writes AUTOC.
+		 */
+		if ((hw->mac.type == ixgbe_mac_82599EB) &&
+		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val != IXGBE_SUCCESS) {
+				ret_val = IXGBE_ERR_SWFW_SYNC;
+				goto out;
+			}
+			got_lock = TRUE;
+		}
+
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+		if (hw->mac.type == ixgbe_mac_82599EB)
+			ixgbe_reset_pipeline_82599(hw);
+
+		if (got_lock)
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
 	} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
-		    (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+		    (ixgbe_device_supports_autoneg_fc(hw))) {
 		hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
 				      IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
 	}
@@ -680,6 +738,195 @@
 }
 
 /**
+ *  ixgbe_read_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @max_pba_block_size: PBA block size limit
+ *  @pba: pointer to output PBA structure
+ *
+ *  Reads PBA from EEPROM image when eeprom_buf is not NULL.
+ *  Reads PBA from physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+		       u32 eeprom_buf_size, u16 max_pba_block_size,
+		       struct ixgbe_pba *pba)
+{
+	s32 ret_val;
+	u16 pba_block_size;
+
+	if (pba == NULL)
+		return IXGBE_ERR_PARAM;
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						     &pba->word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+			pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (pba->pba_block == NULL)
+			return IXGBE_ERR_PARAM;
+
+		ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
+						   eeprom_buf_size,
+						   &pba_block_size);
+		if (ret_val)
+			return ret_val;
+
+		if (pba_block_size > max_pba_block_size)
+			return IXGBE_ERR_PARAM;
+
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
+							     pba_block_size,
+							     pba->pba_block);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > (u32)(pba->word[1] +
+					      pba->pba_block[0])) {
+				memcpy(pba->pba_block,
+				       &eeprom_buf[pba->word[1]],
+				       pba_block_size * sizeof(u16));
+			} else {
+				return IXGBE_ERR_PARAM;
+			}
+		}
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_pba_raw
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba: pointer to PBA structure
+ *
+ *  Writes PBA to EEPROM image when eeprom_buf is not NULL.
+ *  Writes PBA to physical EEPROM device when eeprom_buf is NULL.
+ *
+ **/
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			u32 eeprom_buf_size, struct ixgbe_pba *pba)
+{
+	s32 ret_val;
+
+	if (pba == NULL)
+		return IXGBE_ERR_PARAM;
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						      &pba->word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
+			eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (pba->pba_block == NULL)
+			return IXGBE_ERR_PARAM;
+
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
+							      pba->pba_block[0],
+							      pba->pba_block);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > (u32)(pba->word[1] +
+					      pba->pba_block[0])) {
+				memcpy(&eeprom_buf[pba->word[1]],
+				       pba->pba_block,
+				       pba->pba_block[0] * sizeof(u16));
+			} else {
+				return IXGBE_ERR_PARAM;
+			}
+		}
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_pba_block_size
+ *  @hw: pointer to the HW structure
+ *  @eeprom_buf: optional pointer to EEPROM image
+ *  @eeprom_buf_size: size of EEPROM image in words
+ *  @pba_data_size: pointer to output variable
+ *
+ *  Returns the size of the PBA block in words. Function operates on EEPROM
+ *  image if the eeprom_buf pointer is not NULL otherwise it accesses physical
+ *  EEPROM device.
+ *
+ **/
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			     u32 eeprom_buf_size, u16 *pba_block_size)
+{
+	s32 ret_val;
+	u16 pba_word[2];
+	u16 length;
+
+	DEBUGFUNC("ixgbe_get_pba_block_size");
+
+	if (eeprom_buf == NULL) {
+		ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
+						     &pba_word[0]);
+		if (ret_val)
+			return ret_val;
+	} else {
+		if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
+			pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
+			pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
+		} else {
+			return IXGBE_ERR_PARAM;
+		}
+	}
+
+	if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
+		if (eeprom_buf == NULL) {
+			ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
+						      &length);
+			if (ret_val)
+				return ret_val;
+		} else {
+			if (eeprom_buf_size > pba_word[1])
+				length = eeprom_buf[pba_word[1] + 0];
+			else
+				return IXGBE_ERR_PARAM;
+		}
+
+		if (length == 0xFFFF || length == 0)
+			return IXGBE_ERR_PBA_SECTION;
+	} else {
+		/* PBA number in legacy format, there is no PBA Block. */
+		length = 0;
+	}
+
+	if (pba_block_size != NULL)
+		*pba_block_size = length;
+
+	return IXGBE_SUCCESS;
+}
+
+/**
  *  ixgbe_get_mac_addr_generic - Generic get MAC address
  *  @hw: pointer to hardware structure
  *  @mac_addr: Adapter MAC address
@@ -709,23 +956,18 @@
 }
 
 /**
- *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  ixgbe_set_pci_config_data_generic - Generic store PCI bus info
  *  @hw: pointer to hardware structure
+ *  @link_status: the link status returned by the PCI config space
  *
- *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ *  Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
  **/
-s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
-	u16 link_status;
 
-	DEBUGFUNC("ixgbe_get_bus_info_generic");
-
 	hw->bus.type = ixgbe_bus_type_pci_express;
 
-	/* Get the negotiated link width and speed from PCI config space */
-	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
-
 	switch (link_status & IXGBE_PCI_LINK_WIDTH) {
 	case IXGBE_PCI_LINK_WIDTH_1:
 		hw->bus.width = ixgbe_bus_width_pcie_x1;
@@ -760,7 +1002,26 @@
 	}
 
 	mac->ops.set_lan_id(hw);
+}
 
+/**
+ *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Gets the PCI bus info (speed, width, type) then calls helper function to
+ *  store this data within the ixgbe_hw structure.
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+	u16 link_status;
+
+	DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+	/* Get the negotiated link width and speed from PCI config space */
+	link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+	ixgbe_set_pci_config_data_generic(hw, link_status);
+
 	return IXGBE_SUCCESS;
 }
 
@@ -1259,16 +1520,18 @@
 
 	if (words == 0) {
 		status = IXGBE_ERR_INVALID_ARGUMENT;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
 		goto out;
 	}
 
 	if (offset >= hw->eeprom.word_size) {
 		status = IXGBE_ERR_EEPROM;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
 		goto out;
 	}
 
 	for (i = 0; i < words; i++) {
-		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+		eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
 		       IXGBE_EEPROM_RW_REG_START;
 
 		IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
@@ -1365,11 +1628,13 @@
 
 	if (words == 0) {
 		status = IXGBE_ERR_INVALID_ARGUMENT;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
 		goto out;
 	}
 
 	if (offset >= hw->eeprom.word_size) {
 		status = IXGBE_ERR_EEPROM;
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
 		goto out;
 	}
 
@@ -1438,6 +1703,11 @@
 		}
 		usec_delay(5);
 	}
+
+	if (i == IXGBE_EERD_EEWR_ATTEMPTS)
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "EEPROM read/write done polling timed out");
+
 	return status;
 }
 
@@ -1573,14 +1843,15 @@
 		 * was not granted because we don't have access to the EEPROM
 		 */
 		if (i >= timeout) {
-			DEBUGOUT("SWESMBI Software EEPROM semaphore "
-				 "not granted.\n");
+			ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			    "SWESMBI Software EEPROM semaphore not granted.\n");
 			ixgbe_release_eeprom_semaphore(hw);
 			status = IXGBE_ERR_EEPROM;
 		}
 	} else {
-		DEBUGOUT("Software semaphore SMBI between device drivers "
-			 "not granted.\n");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Software semaphore SMBI between device drivers "
+			     "not granted.\n");
 	}
 
 	return status;
@@ -2004,7 +2275,8 @@
 
 	/* Make sure we are using a valid rar index range */
 	if (index >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", index);
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", index);
 		return IXGBE_ERR_INVALID_ARGUMENT;
 	}
 
@@ -2053,7 +2325,8 @@
 
 	/* Make sure we are using a valid rar index range */
 	if (index >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", index);
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", index);
 		return IXGBE_ERR_INVALID_ARGUMENT;
 	}
 
@@ -2496,7 +2769,8 @@
 		fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
 		break;
 	default:
-		DEBUGOUT("Flow control param set incorrectly\n");
+		ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
+			     "Flow control param set incorrectly\n");
 		ret_val = IXGBE_ERR_CONFIG;
 		goto out;
 		break;
@@ -2557,8 +2831,13 @@
 static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
 			      u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
 {
-	if ((!(adv_reg)) ||  (!(lp_reg)))
+	if ((!(adv_reg)) ||  (!(lp_reg))) {
+		ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
+			     "Local or link partner's advertised flow control "
+			     "settings are NULL. Local: %x, link partner: %x\n",
+			     adv_reg, lp_reg);
 		return IXGBE_ERR_FC_NOT_NEGOTIATED;
+	}
 
 	if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
 		/*
@@ -2609,8 +2888,11 @@
 
 	linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
 	if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
-	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
+	    (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Auto-Negotiation did not complete or timed out");
 		goto out;
+	}
 
 	pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 	pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2642,13 +2924,19 @@
 	 * - we are 82599 and link partner is not AN enabled
 	 */
 	links = IXGBE_READ_REG(hw, IXGBE_LINKS);
-	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
+	if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Auto-Negotiation did not complete");
 		goto out;
+	}
 
 	if (hw->mac.type == ixgbe_mac_82599EB) {
 		links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
-		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
+		if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+			ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+				     "Link partner is not AN enabled");
 			goto out;
+		}
 	}
 	/*
 	 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2710,15 +2998,21 @@
 	 * - FC autoneg is disabled, or if
 	 * - link is not up.
 	 */
-	if (hw->fc.disable_fc_autoneg)
+	if (hw->fc.disable_fc_autoneg) {
+		ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+			     "Flow control autoneg is disabled");
 		goto out;
+	}
 
 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
-	if (!link_up)
+	if (!link_up) {
+		ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
 		goto out;
+	}
 
 	switch (hw->phy.media_type) {
 	/* Autoneg flow control on fiber adapters */
+	case ixgbe_media_type_fiber_fixed:
 	case ixgbe_media_type_fiber:
 		if (speed == IXGBE_LINK_SPEED_1GB_FULL)
 			ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2731,7 +3025,7 @@
 
 	/* Autoneg flow control on copper adapters */
 	case ixgbe_media_type_copper:
-		if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+		if (ixgbe_device_supports_autoneg_fc(hw))
 			ret_val = ixgbe_fc_autoneg_copper(hw);
 		break;
 
@@ -2748,6 +3042,53 @@
 	}
 }
 
+/*
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec.  Never return less than
+ * 800 = 80 millisec.
+ */
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+	s16 devctl2;
+	u32 pollcnt;
+
+	devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+	devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+	switch (devctl2) {
+	case IXGBE_PCIDEVCTRL2_65_130ms:
+		pollcnt = 1300;		/* 130 millisec */
+		break;
+	case IXGBE_PCIDEVCTRL2_260_520ms:
+		pollcnt = 5200;		/* 520 millisec */
+		break;
+	case IXGBE_PCIDEVCTRL2_1_2s:
+		pollcnt = 20000;	/* 2 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_4_8s:
+		pollcnt = 80000;	/* 8 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_17_34s:
+		pollcnt = 34000;	/* 34 sec */
+		break;
+	case IXGBE_PCIDEVCTRL2_50_100us:	/* 100 microsecs */
+	case IXGBE_PCIDEVCTRL2_1_2ms:		/* 2 millisecs */
+	case IXGBE_PCIDEVCTRL2_16_32ms:		/* 32 millisec */
+	case IXGBE_PCIDEVCTRL2_16_32ms_def:	/* 32 millisec default */
+	default:
+		pollcnt = 800;		/* 80 millisec minimum */
+		break;
+	}
+
+	/* add 10% to spec maximum */
+	return (pollcnt * 11) / 10;
+}
+
 /**
  *  ixgbe_disable_pcie_master - Disable PCI-express master access
  *  @hw: pointer to hardware structure
@@ -2760,7 +3101,7 @@
 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
 	s32 status = IXGBE_SUCCESS;
-	u32 i;
+	u32 i, poll;
 
 	DEBUGFUNC("ixgbe_disable_pcie_master");
 
@@ -2767,7 +3108,7 @@
 	/* Always set this bit to ensure any future transactions are blocked */
 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
 
-	/* Exit if master requets are blocked */
+	/* Exit if master requests are blocked */
 	if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
 		goto out;
 
@@ -2793,7 +3134,8 @@
 	 * Before proceeding, make sure that the PCIe block does not have
 	 * transactions pending.
 	 */
-	for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+	poll = ixgbe_pcie_timeout_poll(hw);
+	for (i = 0; i < poll; i++) {
 		usec_delay(100);
 		if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
 		    IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
@@ -2800,7 +3142,8 @@
 			goto out;
 	}
 
-	DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
+	ERROR_REPORT1(IXGBE_ERROR_POLLING,
+		     "PCIe transaction pending bit also did not clear.\n");
 	status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
 
 out:
@@ -2817,44 +3160,41 @@
  **/
 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
 {
-	u32 gssr;
+	u32 gssr = 0;
 	u32 swmask = mask;
 	u32 fwmask = mask << 5;
-	s32 timeout = 200;
+	u32 timeout = 200;
+	u32 i;
 
 	DEBUGFUNC("ixgbe_acquire_swfw_sync");
 
-	while (timeout) {
+	for (i = 0; i < timeout; i++) {
 		/*
-		 * SW EEPROM semaphore bit is used for access to all
-		 * SW_FW_SYNC/GSSR bits (not just EEPROM)
+		 * SW NVM semaphore bit is used for access to all
+		 * SW_FW_SYNC bits (not just NVM)
 		 */
 		if (ixgbe_get_eeprom_semaphore(hw))
 			return IXGBE_ERR_SWFW_SYNC;
 
 		gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
-		if (!(gssr & (fwmask | swmask)))
-			break;
-
-		/*
-		 * Firmware currently using resource (fwmask) or other software
-		 * thread currently using resource (swmask)
-		 */
-		ixgbe_release_eeprom_semaphore(hw);
-		msec_delay(5);
-		timeout--;
+		if (!(gssr & (fwmask | swmask))) {
+			gssr |= swmask;
+			IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+			ixgbe_release_eeprom_semaphore(hw);
+			return IXGBE_SUCCESS;
+		} else {
+			/* Resource is currently in use by FW or SW */
+			ixgbe_release_eeprom_semaphore(hw);
+			msec_delay(5);
+		}
 	}
 
-	if (!timeout) {
-		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
-		return IXGBE_ERR_SWFW_SYNC;
-	}
+	/* If time expired clear the bits holding the lock and retry */
+	if (gssr & (fwmask | swmask))
+		ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
 
-	gssr |= swmask;
-	IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
-
-	ixgbe_release_eeprom_semaphore(hw);
-	return IXGBE_SUCCESS;
+	msec_delay(5);
+	return IXGBE_ERR_SWFW_SYNC;
 }
 
 /**
@@ -2965,6 +3305,7 @@
 	bool link_up = 0;
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = IXGBE_SUCCESS;
 
 	DEBUGFUNC("ixgbe_blink_led_start_generic");
 
@@ -2975,10 +3316,29 @@
 	hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
 
 	if (!link_up) {
+		/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+		 * LESM is on.
+		 */
+		bool got_lock = FALSE;
+		if ((hw->mac.type == ixgbe_mac_82599EB) &&
+		    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+			ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+							IXGBE_GSSR_MAC_CSR_SM);
+			if (ret_val != IXGBE_SUCCESS) {
+				ret_val = IXGBE_ERR_SWFW_SYNC;
+				goto out;
+			}
+			got_lock = TRUE;
+		}
+
 		autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 		autoc_reg |= IXGBE_AUTOC_FLU;
 		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 		IXGBE_WRITE_FLUSH(hw);
+
+		if (got_lock)
+			hw->mac.ops.release_swfw_sync(hw,
+						      IXGBE_GSSR_MAC_CSR_SM);
 		msec_delay(10);
 	}
 
@@ -2987,7 +3347,8 @@
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-	return IXGBE_SUCCESS;
+out:
+	return ret_val;
 }
 
 /**
@@ -2999,8 +3360,23 @@
 {
 	u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
 	u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+	s32 ret_val = IXGBE_SUCCESS;
+	bool got_lock = FALSE;
 
 	DEBUGFUNC("ixgbe_blink_led_stop_generic");
+	/* Need the SW/FW semaphore around AUTOC writes if 82599 and
+	 * LESM is on.
+	 */
+	if ((hw->mac.type == ixgbe_mac_82599EB) &&
+	    ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+		ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+						IXGBE_GSSR_MAC_CSR_SM);
+		if (ret_val != IXGBE_SUCCESS) {
+			ret_val = IXGBE_ERR_SWFW_SYNC;
+			goto out;
+		}
+		got_lock = TRUE;
+	}
 
 
 	autoc_reg &= ~IXGBE_AUTOC_FLU;
@@ -3007,6 +3383,12 @@
 	autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 	IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
 
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		ixgbe_reset_pipeline_82599(hw);
+
+	if (got_lock)
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
 	led_reg &= ~IXGBE_LED_MODE_MASK(index);
 	led_reg &= ~IXGBE_LED_BLINK(index);
 	led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
@@ -3013,7 +3395,8 @@
 	IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
 	IXGBE_WRITE_FLUSH(hw);
 
-	return IXGBE_SUCCESS;
+out:
+	return ret_val;
 }
 
 /**
@@ -3028,6 +3411,8 @@
 static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
 					 u16 *san_mac_offset)
 {
+	s32 ret_val;
+
 	DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
 
 	/*
@@ -3034,9 +3419,15 @@
 	 * First read the EEPROM pointer to see if the MAC addresses are
 	 * available.
 	 */
-	hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+				      san_mac_offset);
+	if (ret_val) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom at offset %d failed",
+			      IXGBE_SAN_MAC_ADDR_PTR);
+	}
 
-	return IXGBE_SUCCESS;
+	return ret_val;
 }
 
 /**
@@ -3053,6 +3444,7 @@
 {
 	u16 san_mac_data, san_mac_offset;
 	u8 i;
+	s32 ret_val;
 
 	DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
 
@@ -3060,18 +3452,9 @@
 	 * First read the EEPROM pointer to see if the MAC addresses are
 	 * available.  If they're not, no point in calling set_lan_id() here.
 	 */
-	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
-
-	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-		/*
-		 * No addresses available in this EEPROM.  It's not an
-		 * error though, so just wipe the local address and return.
-		 */
-		for (i = 0; i < 6; i++)
-			san_mac_addr[i] = 0xFF;
-
+	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
 		goto san_mac_addr_out;
-	}
 
 	/* make sure we know which port we need to program */
 	hw->mac.ops.set_lan_id(hw);
@@ -3079,13 +3462,27 @@
 	(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
 			 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
 	for (i = 0; i < 3; i++) {
-		hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+		ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+					      &san_mac_data);
+		if (ret_val) {
+			ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+				      "eeprom read at offset %d failed",
+				      san_mac_offset);
+			goto san_mac_addr_out;
+		}
 		san_mac_addr[i * 2] = (u8)(san_mac_data);
 		san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
 		san_mac_offset++;
 	}
+	return IXGBE_SUCCESS;
 
 san_mac_addr_out:
+	/*
+	 * No addresses available in this EEPROM.  It's not an
+	 * error though, so just wipe the local address and return.
+	 */
+	for (i = 0; i < 6; i++)
+		san_mac_addr[i] = 0xFF;
 	return IXGBE_SUCCESS;
 }
 
@@ -3098,7 +3495,7 @@
  **/
 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
 {
-	s32 status = IXGBE_SUCCESS;
+	s32 ret_val;
 	u16 san_mac_data, san_mac_offset;
 	u8 i;
 
@@ -3105,13 +3502,10 @@
 	DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
 
 	/* Look for SAN mac address pointer.  If not defined, return */
-	ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+	if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
+		return IXGBE_ERR_NO_SAN_ADDR_PTR;
 
-	if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
-		status = IXGBE_ERR_NO_SAN_ADDR_PTR;
-		goto san_mac_addr_out;
-	}
-
 	/* Make sure we know which port we need to write */
 	hw->mac.ops.set_lan_id(hw);
 	/* Apply the port offset to the address offset */
@@ -3125,8 +3519,7 @@
 		san_mac_offset++;
 	}
 
-san_mac_addr_out:
-	return status;
+	return IXGBE_SUCCESS;
 }
 
 /**
@@ -3253,7 +3646,8 @@
 
 	/* Make sure we are using a valid rar index range */
 	if (rar >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", rar);
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", rar);
 		return IXGBE_ERR_INVALID_ARGUMENT;
 	}
 
@@ -3302,7 +3696,8 @@
 
 	/* Make sure we are using a valid rar index range */
 	if (rar >= rar_entries) {
-		DEBUGOUT1("RAR index %d is out of range.\n", rar);
+		ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
+			     "RAR index %d is out of range.\n", rar);
 		return IXGBE_ERR_INVALID_ARGUMENT;
 	}
 
@@ -3401,7 +3796,8 @@
 		if (first_empty_slot)
 			regindex = first_empty_slot;
 		else {
-			DEBUGOUT("No space in VLVF.\n");
+			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+				     "No space in VLVF.\n");
 			regindex = IXGBE_ERR_NO_SPACE;
 		}
 	}
@@ -3691,8 +4087,9 @@
 	*wwpn_prefix = 0xFFFF;
 
 	/* check if alternative SAN MAC is supported */
-	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
-			    &alt_san_mac_blk_offset);
+	offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+	if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+		goto wwn_prefix_err;
 
 	if ((alt_san_mac_blk_offset == 0) ||
 	    (alt_san_mac_blk_offset == 0xFFFF))
@@ -3700,19 +4097,29 @@
 
 	/* check capability in alternative san mac address block */
 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
-	hw->eeprom.ops.read(hw, offset, &caps);
+	if (hw->eeprom.ops.read(hw, offset, &caps))
+		goto wwn_prefix_err;
 	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
 		goto wwn_prefix_out;
 
 	/* get the corresponding prefix for WWNN/WWPN */
 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
-	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+	if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed", offset);
+	}
 
 	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
-	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+	if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+		goto wwn_prefix_err;
 
 wwn_prefix_out:
 	return IXGBE_SUCCESS;
+
+wwn_prefix_err:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", offset);
+	return IXGBE_SUCCESS;
 }
 
 /**
@@ -3882,7 +4289,7 @@
  *  Calculates the checksum for some buffer on a specified length.  The
  *  checksum calculated is returned.
  **/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
 {
 	u32 i;
 	u8 sum = 0;
@@ -3908,8 +4315,8 @@
  *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
  *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
  **/
-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
-					u32 length)
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+				 u32 length)
 {
 	u32 hicr, i, bi;
 	u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
@@ -4158,3 +4565,21 @@
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 }
 
+
+/**
+ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
+ * @hw: pointer to hardware structure
+ * @map: pointer to u8 arr for returning map
+ *
+ * Read the rtrup2tc HW register and resolve its content into map
+ **/
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
+{
+	u32 reg, i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		map[i] = IXGBE_RTRUP2TC_UP_MASK &
+			(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
+	return;
+}


Property changes on: trunk/sys/dev/ixgbe/ixgbe_common.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_common.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_common.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_common.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_common.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_COMMON_H_
 #define _IXGBE_COMMON_H_
@@ -41,9 +42,16 @@
 		IXGBE_WRITE_REG(hw, reg, (u32) value); \
 		IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
 	} while (0)
+#if !defined(NO_READ_PBA_RAW) || !defined(NO_WRITE_PBA_RAW)
+struct ixgbe_pba {
+	u16 word[2];
+	u16 *pba_block;
+};
+#endif
 
+void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map);
+
 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
-
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -52,8 +60,16 @@
 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
 				  u32 pba_num_size);
+s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+		       u32 eeprom_buf_size, u16 max_pba_block_size,
+		       struct ixgbe_pba *pba);
+s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			u32 eeprom_buf_size, struct ixgbe_pba *pba);
+s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
+			     u32 eeprom_buf_size, u16 *pba_block_size);
 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status);
 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
 
@@ -96,6 +112,7 @@
 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw);
 
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
 void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
 
 s32 ixgbe_validate_mac_addr(u8 *mac_addr);
@@ -137,5 +154,12 @@
 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 				 u8 build, u8 ver);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+				 u32 length);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+
+extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
+
 #endif /* IXGBE_COMMON */


Property changes on: trunk/sys/dev/ixgbe/ixgbe_common.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Added: trunk/sys/dev/ixgbe/ixgbe_dcb.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb.c	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,719 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb.c 252898 2013-07-06 21:38:55Z jfv $*/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programmable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max,
+				   int max_frame_size)
+{
+	int min_percent = 100;
+	int min_credit, multiplier;
+	int i;
+
+	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+			IXGBE_DCB_CREDIT_QUANTUM;
+
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if (bw[i] < min_percent && bw[i])
+			min_percent = bw[i];
+	}
+
+	multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the hw credits for each TC */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL);
+
+		if (val < min_credit)
+			val = min_credit;
+		refill[i] = (u16)val;
+
+		max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit;
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits
+ * @ixgbe_dcb_config: Struct containing DCB settings.
+ * @direction: Configuring either Tx or Rx.
+ *
+ * This function calculates the credits allocated to each traffic class.
+ * It should be called only after the rules are checked by
+ * ixgbe_dcb_check_config_cee().
+ */
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw,
+				   struct ixgbe_dcb_config *dcb_config,
+				   u32 max_frame_size, u8 direction)
+{
+	struct ixgbe_dcb_tc_path *p;
+	u32 min_multiplier	= 0;
+	u16 min_percent		= 100;
+	s32 ret_val =		IXGBE_SUCCESS;
+	/* Initialization values default for Tx settings */
+	u32 min_credit		= 0;
+	u32 credit_refill	= 0;
+	u32 credit_max		= 0;
+	u16 link_percentage	= 0;
+	u8  bw_percent		= 0;
+	u8  i;
+
+	if (dcb_config == NULL) {
+		ret_val = IXGBE_ERR_CONFIG;
+		goto out;
+	}
+
+	min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) /
+		     IXGBE_DCB_CREDIT_QUANTUM;
+
+	/* Find smallest link percentage */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+		link_percentage = p->bwg_percent;
+
+		link_percentage = (link_percentage * bw_percent) / 100;
+
+		if (link_percentage && link_percentage < min_percent)
+			min_percent = link_percentage;
+	}
+
+	/*
+	 * The ratio between traffic classes will control the bandwidth
+	 * percentages seen on the wire. To calculate this ratio we use
+	 * a multiplier. It is required that the refill credits must be
+	 * larger than the max frame size so here we find the smallest
+	 * multiplier that will allow all bandwidth percentages to be
+	 * greater than the max frame size.
+	 */
+	min_multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the link percentage for each TC first */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		p = &dcb_config->tc_config[i].path[direction];
+		bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+
+		link_percentage = p->bwg_percent;
+		/* Must be careful of integer division for very small nums */
+		link_percentage = (link_percentage * bw_percent) / 100;
+		if (p->bwg_percent > 0 && link_percentage == 0)
+			link_percentage = 1;
+
+		/* Save link_percentage for reference */
+		p->link_percent = (u8)link_percentage;
+
+		/* Calculate credit refill ratio using multiplier */
+		credit_refill = min(link_percentage * min_multiplier,
+				    (u32)IXGBE_DCB_MAX_CREDIT_REFILL);
+		p->data_credits_refill = (u16)credit_refill;
+
+		/* Calculate maximum credit for the TC */
+		credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100;
+
+		/*
+		 * Adjustment based on rule checking, if the percentage
+		 * of a TC is too small, the maximum credit may not be
+		 * enough to send out a jumbo frame in data plane arbitration.
+		 */
+		if (credit_max && (credit_max < min_credit))
+			credit_max = min_credit;
+
+		if (direction == IXGBE_DCB_TX_CONFIG) {
+			/*
+			 * Adjustment based on rule checking, if the
+			 * percentage of a TC is too small, the maximum
+			 * credit may not be enough to send out a TSO
+			 * packet in descriptor plane arbitration.
+			 */
+			if (credit_max && (credit_max <
+			    IXGBE_DCB_MIN_TSO_CREDIT)
+			    && (hw->mac.type == ixgbe_mac_82598EB))
+				credit_max = IXGBE_DCB_MIN_TSO_CREDIT;
+
+			dcb_config->tc_config[i].desc_credits_max =
+								(u16)credit_max;
+		}
+
+		p->data_credits_max = (u16)credit_max;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info
+ * @cfg: dcb configuration to unpack into hardware consumable fields
+ * @map: user priority to traffic class map
+ * @pfc_up: u8 to store user priority PFC bitmask
+ *
+ * This unpacks the dcb configuration PFC info which is stored per
+ * traffic class into a 8bit user priority bitmask that can be
+ * consumed by hardware routines. The priority to tc map must be
+ * updated before calling this routine to use current up-to maps.
+ */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int up;
+
+	/*
+	 * If the TC for this user priority has PFC enabled then set the
+	 * matching bit in 'pfc_up' to reflect that PFC is enabled.
+	 */
+	for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) {
+		if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled)
+			*pfc_up |= 1 << up;
+	}
+}
+
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction,
+			     u16 *refill)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		refill[tc] = tc_config[tc].path[direction].data_credits_refill;
+}
+
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		max[tc] = tc_config[tc].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction,
+			    u8 *bwgid)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		bwgid[tc] = tc_config[tc].path[direction].bwg_id;
+}
+
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction,
+			   u8 *tsa)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	int tc;
+
+	for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++)
+		tsa[tc] = tc_config[tc].path[direction].tsa;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+	struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0];
+	u8 prio_mask = 1 << up;
+	u8 tc = cfg->num_tcs.pg_tcs;
+
+	/* If tc is 0 then DCB is likely not enabled or supported */
+	if (!tc)
+		goto out;
+
+	/*
+	 * Test from maximum TC to 1 and report the first match we find.  If
+	 * we find no match we can assume that the TC is 0 since the TC must
+	 * be set for all user priorities
+	 */
+	for (tc--; tc; tc--) {
+		if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+			break;
+	}
+out:
+	return tc;
+}
+
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
+			      u8 *map)
+{
+	u8 up;
+
+	for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++)
+		map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
+}
+
+/**
+ * ixgbe_dcb_config - Struct containing DCB settings.
+ * @dcb_config: Pointer to DCB config structure
+ *
+ * This function checks DCB rules for DCB settings.
+ * The following rules are checked:
+ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%.
+ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth
+ *    Group must total 100.
+ * 3. A Traffic Class should not be set to both Link Strict Priority
+ *    and Group Strict Priority.
+ * 4. Link strict Bandwidth Groups can only have link strict traffic classes
+ *    with zero bandwidth.
+ */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config)
+{
+	struct ixgbe_dcb_tc_path *p;
+	s32 ret_val = IXGBE_SUCCESS;
+	u8 i, j, bw = 0, bw_id;
+	u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP];
+	bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP];
+
+	memset(bw_sum, 0, sizeof(bw_sum));
+	memset(link_strict, 0, sizeof(link_strict));
+
+	/* First Tx, then Rx */
+	for (i = 0; i < 2; i++) {
+		/* Check each traffic class for rule violation */
+		for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) {
+			p = &dcb_config->tc_config[j].path[i];
+
+			bw = p->bwg_percent;
+			bw_id = p->bwg_id;
+
+			if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) {
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+			if (p->tsa == ixgbe_dcb_tsa_strict) {
+				link_strict[i][bw_id] = TRUE;
+				/* Link strict should have zero bandwidth */
+				if (bw) {
+					ret_val = IXGBE_ERR_CONFIG;
+					goto err_config;
+				}
+			} else if (!bw) {
+				/*
+				 * Traffic classes without link strict
+				 * should have non-zero bandwidth.
+				 */
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+			bw_sum[i][bw_id] += bw;
+		}
+
+		bw = 0;
+
+		/* Check each bandwidth group for rule violation */
+		for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) {
+			bw += dcb_config->bw_percentage[i][j];
+			/*
+			 * Sum of bandwidth percentages of all traffic classes
+			 * within a Bandwidth Group must total 100 except for
+			 * link strict group (zero bandwidth).
+			 */
+			if (link_strict[i][j]) {
+				if (bw_sum[i][j]) {
+					/*
+					 * Link strict group should have zero
+					 * bandwidth.
+					 */
+					ret_val = IXGBE_ERR_CONFIG;
+					goto err_config;
+				}
+			} else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT &&
+				   bw_sum[i][j] != 0) {
+				ret_val = IXGBE_ERR_CONFIG;
+				goto err_config;
+			}
+		}
+
+		if (bw != IXGBE_DCB_BW_PERCENT) {
+			ret_val = IXGBE_ERR_CONFIG;
+			goto err_config;
+		}
+	}
+
+err_config:
+	DEBUGOUT2("DCB error code %d while checking %s settings.\n",
+		  ret_val, (i == IXGBE_DCB_TX_CONFIG) ? "Tx" : "Rx");
+
+	return ret_val;
+}
+
+/**
+ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+			   u8 tc_count)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats,
+			    u8 tc_count)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw,
+				struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY]	= { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]	= { 0 };
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid,
+							tsa, map);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw,
+				     struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+							     bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+							     bwgid, tsa);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw,
+				     struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+							     bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+							     bwgid, tsa,
+							     map);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_cee - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw,
+			 struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 pfc_en;
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+	ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_tc_stats_82598(hw);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+/**
+ * ixgbe_dcb_hw_config_cee - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw,
+			struct ixgbe_dcb_config *dcb_config)
+{
+	s32 ret = IXGBE_NOT_IMPLEMENTED;
+	u8 pfc_en;
+	u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
+	u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+
+	/* Unpack CEE standard containers */
+	ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max_cee(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa);
+	ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map);
+
+	hw->mac.ops.setup_rxpba(hw, dcb_config->num_tcs.pg_tcs,
+				0, dcb_config->rx_pba_cfg);
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed,
+						refill, max, bwgid, tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ixgbe_dcb_config_82599(hw, dcb_config);
+		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed,
+						refill, max, bwgid,
+						tsa, map);
+
+		ixgbe_dcb_config_tc_stats_82599(hw, dcb_config);
+		break;
+#endif
+	default:
+		break;
+	}
+
+	if (!ret && dcb_config->pfc_mode_enable) {
+		ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
+		ret = ixgbe_dcb_config_pfc(hw, pfc_en, map);
+	}
+
+	return ret;
+}
+
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	int ret = IXGBE_ERR_PARAM;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map);
+		break;
+#endif
+	default:
+		break;
+	}
+	return ret;
+}
+
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max,
+			    u8 *bwg_id, u8 *tsa, u8 *map)
+{
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+						       tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+						       tsa);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+#if !defined(NO_82599_SUPPORT) || !defined(NO_X540_SUPPORT)
+		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
+						  tsa, map);
+		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+						       tsa);
+		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+						       tsa, map);
+		break;
+#endif
+	default:
+		break;
+	}
+	return 0;
+}

Added: trunk/sys/dev/ixgbe/ixgbe_dcb.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb.h	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,177 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb.h 252898 2013-07-06 21:38:55Z jfv $*/
+
+#ifndef _IXGBE_DCB_H_
+#define _IXGBE_DCB_H_
+
+
+#include "ixgbe_type.h"
+
+/* DCB defines */
+/* DCB credit calculation defines */
+#define IXGBE_DCB_CREDIT_QUANTUM	64
+#define IXGBE_DCB_MAX_CREDIT_REFILL	200   /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_TSO_SIZE		(32 * 1024) /* Max TSO pkt size in DCB*/
+#define IXGBE_DCB_MAX_CREDIT		(2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+
+/* 513 for 32KB TSO packet */
+#define IXGBE_DCB_MIN_TSO_CREDIT	\
+	((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1)
+
+/* DCB configuration defines */
+#define IXGBE_DCB_MAX_USER_PRIORITY	8
+#define IXGBE_DCB_MAX_BW_GROUP		8
+#define IXGBE_DCB_BW_PERCENT		100
+
+#define IXGBE_DCB_TX_CONFIG		0
+#define IXGBE_DCB_RX_CONFIG		1
+
+/* DCB capability defines */
+#define IXGBE_DCB_PG_SUPPORT	0x00000001
+#define IXGBE_DCB_PFC_SUPPORT	0x00000002
+#define IXGBE_DCB_BCN_SUPPORT	0x00000004
+#define IXGBE_DCB_UP2TC_SUPPORT	0x00000008
+#define IXGBE_DCB_GSP_SUPPORT	0x00000010
+
+struct ixgbe_dcb_support {
+	u32 capabilities; /* DCB capabilities */
+
+	/* Each bit represents a number of TCs configurable in the hw.
+	 * If 8 traffic classes can be configured, the value is 0x80. */
+	u8 traffic_classes;
+	u8 pfc_traffic_classes;
+};
+
+enum ixgbe_dcb_tsa {
+	ixgbe_dcb_tsa_ets = 0,
+	ixgbe_dcb_tsa_group_strict_cee,
+	ixgbe_dcb_tsa_strict
+};
+
+/* Traffic class bandwidth allocation per direction */
+struct ixgbe_dcb_tc_path {
+	u8 bwg_id; /* Bandwidth Group (BWG) ID */
+	u8 bwg_percent; /* % of BWG's bandwidth */
+	u8 link_percent; /* % of link bandwidth */
+	u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */
+	u16 data_credits_refill; /* Credit refill amount in 64B granularity */
+	u16 data_credits_max; /* Max credits for a configured packet buffer
+			       * in 64B granularity.*/
+	enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */
+};
+
+enum ixgbe_dcb_pfc {
+	ixgbe_dcb_pfc_disabled = 0,
+	ixgbe_dcb_pfc_enabled,
+	ixgbe_dcb_pfc_enabled_txonly,
+	ixgbe_dcb_pfc_enabled_rxonly
+};
+
+/* Traffic class configuration */
+struct ixgbe_dcb_tc_config {
+	struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */
+	enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */
+
+	u16 desc_credits_max; /* For Tx Descriptor arbitration */
+	u8 tc; /* Traffic class (TC) */
+};
+
+enum ixgbe_dcb_pba {
+	/* PBA[0-7] each use 64KB FIFO */
+	ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL,
+	/* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
+	ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED
+};
+
+struct ixgbe_dcb_num_tcs {
+	u8 pg_tcs;
+	u8 pfc_tcs;
+};
+
+struct ixgbe_dcb_config {
+	struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS];
+	struct ixgbe_dcb_support support;
+	struct ixgbe_dcb_num_tcs num_tcs;
+	u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */
+	bool pfc_mode_enable;
+	bool round_robin_enable;
+
+	enum ixgbe_dcb_pba rx_pba_cfg;
+
+	u32 dcb_cfg_version; /* Not used...OS-specific? */
+	u32 link_speed; /* For bandwidth allocation validation purpose */
+	bool vt_mode;
+};
+
+/* DCB driver APIs */
+
+/* DCB rule checking */
+s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *);
+
+/* DCB credits calculation */
+s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int);
+s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *,
+				       struct ixgbe_dcb_config *, u32, u8);
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *);
+s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *,
+					 struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *,
+					 struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *,
+				    struct ixgbe_dcb_config *);
+
+/* DCB unpack routines */
+void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *);
+void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+#endif /* _IXGBE_DCB_H_ */

Added: trunk/sys/dev/ixgbe/ixgbe_dcb_82598.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb_82598.c	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb_82598.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,360 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb_82598.c 252898 2013-07-06 21:38:55Z jfv $*/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82598.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw,
+				 struct ixgbe_hw_stats *stats,
+				 u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_tc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	/* Statistics pertaining to each traffic class */
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Transmitted Packets */
+		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+		/* Transmitted Bytes */
+		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc));
+		/* Received Packets */
+		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+		/* Received Bytes */
+		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc));
+
+#if 0
+		/* Can we get rid of these??  Consequently, getting rid
+		 * of the tc_stats structure.
+		 */
+		tc_stats_array[up]->in_overflow_discards = 0;
+		tc_stats_array[up]->out_overflow_discards = 0;
+#endif
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw,
+				  struct ixgbe_hw_stats *stats,
+				  u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_pfc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Priority XOFF Transmitted */
+		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+		/* Priority XOFF Received */
+		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill,
+				      u16 *max, u8 *tsa)
+{
+	u32 reg = 0;
+	u32 credit_refill = 0;
+	u32 credit_max = 0;
+	u8 i = 0;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA;
+	IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	/* Enable Arbiter */
+	reg &= ~IXGBE_RMCS_ARBDIS;
+	/* Enable Receive Recycle within the BWG */
+	reg |= IXGBE_RMCS_RRM;
+	/* Enable Deficit Fixed Priority arbitration*/
+	reg |= IXGBE_RMCS_DFP;
+
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max = max[i];
+
+		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RT2CR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
+	}
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+	reg |= IXGBE_RDRXCTL_RDMTS_1_2;
+	reg |= IXGBE_RDRXCTL_MPBEN;
+	reg |= IXGBE_RDRXCTL_MCEN;
+	IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+	/* Make sure there is enough descriptors before arbitration */
+	reg &= ~IXGBE_RXCTRL_DMBYPS;
+	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82598 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+					   u16 *refill, u16 *max, u8 *bwg_id,
+					   u8 *tsa)
+{
+	u32 reg, max_credits;
+	u8 i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+
+	/* Enable arbiter */
+	reg &= ~IXGBE_DPMCS_ARBDIS;
+	reg |= IXGBE_DPMCS_TSOEF;
+
+	/* Configure Max TSO packet size 34KB including payload and headers */
+	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
+
+	IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_TDTQ2TCCR_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_TDTQ2TCCR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82598 - Config Tx data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Data Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+					   u16 *refill, u16 *max, u8 *bwg_id,
+					   u8 *tsa)
+{
+	u32 reg;
+	u8 i;
+
+	reg = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
+	/* Enable Data Plane Arbiter */
+	reg &= ~IXGBE_PDPMCS_ARBDIS;
+	/* Enable DFP and Transmit Recycle Mode */
+	reg |= (IXGBE_PDPMCS_TPPAC | IXGBE_PDPMCS_TRM);
+
+	IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_TDPT2TCCR_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_TDPT2TCCR_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
+	}
+
+	/* Enable Tx packet buffer division */
+	reg = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
+	reg |= IXGBE_DTXCTL_ENDBUBD;
+	IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82598 - Config priority flow control
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Priority Flow Control for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
+{
+	u32 fcrtl, reg;
+	u8 i;
+
+	/* Enable Transmit Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+	reg &= ~IXGBE_RMCS_TFCE_802_3X;
+	reg |= IXGBE_RMCS_TFCE_PRIORITY;
+	IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg);
+
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+	reg &= ~(IXGBE_FCTRL_RPFCE | IXGBE_FCTRL_RFCE);
+
+	if (pfc_en)
+		reg |= IXGBE_FCTRL_RPFCE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
+
+	/* Configure PFC Tx thresholds per TC */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		if (!(pfc_en & (1 << i))) {
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
+			continue;
+		}
+
+		fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+		reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
+	}
+
+	/* Configure pause time */
+	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82598 - Configure traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+{
+	u32 reg = 0;
+	u8 i = 0;
+	u8 j = 0;
+
+	/* Receive Queues stats setting -  8 queues per statistics reg */
+	for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i));
+		reg |= ((0x1010101) * j);
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		reg = IXGBE_READ_REG(hw, IXGBE_RQSMR(i + 1));
+		reg |= ((0x1010101) * j);
+		IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg);
+	}
+	/* Transmit Queues stats setting -  4 queues per statistics reg*/
+	for (i = 0; i < 8; i++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i));
+		reg |= ((0x1010101) * i);
+		IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82598 - Config and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed,
+			      u16 *refill, u16 *max, u8 *bwg_id,
+			      u8 *tsa)
+{
+	ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa);
+	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tc_stats_82598(hw);
+
+
+	return IXGBE_SUCCESS;
+}

Added: trunk/sys/dev/ixgbe/ixgbe_dcb_82598.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb_82598.h	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb_82598.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,101 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb_82598.h 252898 2013-07-06 21:38:55Z jfv $*/
+
+#ifndef _IXGBE_DCB_82598_H_
+#define _IXGBE_DCB_82598_H_
+
+/* DCB register definitions */
+
+#define IXGBE_DPMCS_MTSOS_SHIFT	16
+#define IXGBE_DPMCS_TDPAC	0x00000001 /* 0 Round Robin,
+					    * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_DPMCS_TRM		0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_DPMCS_ARBDIS	0x00000040 /* DCB arbiter disable */
+#define IXGBE_DPMCS_TSOEF	0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */
+
+#define IXGBE_RUPPBMR_MQA	0x80000000 /* Enable UP to queue mapping */
+
+#define IXGBE_RT2CR_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RT2CR_LSP		0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
+					    * buffers enable */
+#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
+					    * (RSS) enable */
+
+#define IXGBE_TDTQ2TCCR_MCL_SHIFT	12
+#define IXGBE_TDTQ2TCCR_BWG_SHIFT	9
+#define IXGBE_TDTQ2TCCR_GSP	0x40000000
+#define IXGBE_TDTQ2TCCR_LSP	0x80000000
+
+#define IXGBE_TDPT2TCCR_MCL_SHIFT	12
+#define IXGBE_TDPT2TCCR_BWG_SHIFT	9
+#define IXGBE_TDPT2TCCR_GSP	0x40000000
+#define IXGBE_TDPT2TCCR_LSP	0x80000000
+
+#define IXGBE_PDPMCS_TPPAC	0x00000020 /* 0 Round Robin,
+					    * 1 DFP - Deficit Fixed Priority */
+#define IXGBE_PDPMCS_ARBDIS	0x00000040 /* Arbiter disable */
+#define IXGBE_PDPMCS_TRM	0x00000100 /* Transmit Recycle Mode enable */
+
+#define IXGBE_DTXCTL_ENDBUBD	0x00000004 /* Enable DBU buffer division */
+
+#define IXGBE_TXPBSIZE_40KB	0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB	0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB	0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB	0x00014000 /* 80KB Packet Buffer */
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *);
+s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *,
+				 struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *,
+				  struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *);
+#endif /* _IXGBE_DCB_82958_H_ */

Added: trunk/sys/dev/ixgbe/ixgbe_dcb_82599.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb_82599.c	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb_82599.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,587 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb_82599.c 252898 2013-07-06 21:38:55Z jfv $*/
+
+
+#include "ixgbe_type.h"
+#include "ixgbe_dcb.h"
+#include "ixgbe_dcb_82599.h"
+
+/**
+ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the status data for each of the Traffic Classes in use.
+ */
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
+				 struct ixgbe_hw_stats *stats,
+				 u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_tc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	/* Statistics pertaining to each traffic class */
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Transmitted Packets */
+		stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
+		/* Transmitted Bytes (read low first to prevent missed carry) */
+		stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
+		stats->qbtc[tc] +=
+			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
+		/* Received Packets */
+		stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
+		/* Received Bytes (read low first to prevent missed carry) */
+		stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
+		stats->qbrc[tc] +=
+			(((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
+
+		/* Received Dropped Packet */
+		stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
+ * @hw: pointer to hardware structure
+ * @stats: pointer to statistics structure
+ * @tc_count:  Number of elements in bwg_array.
+ *
+ * This function returns the CBFC status data for each of the Traffic Classes.
+ */
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
+				  struct ixgbe_hw_stats *stats,
+				  u8 tc_count)
+{
+	int tc;
+
+	DEBUGFUNC("dcb_get_pfc_stats");
+
+	if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
+		return IXGBE_ERR_PARAM;
+
+	for (tc = 0; tc < tc_count; tc++) {
+		/* Priority XOFF Transmitted */
+		stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
+		/* Priority XOFF Received */
+		stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Rx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+				      u16 *max, u8 *bwg_id, u8 *tsa,
+				      u8 *map)
+{
+	u32 reg = 0;
+	u32 credit_refill = 0;
+	u32 credit_max = 0;
+	u8  i = 0;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; WSP)
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		credit_refill = refill[i];
+		credit_max = max[i];
+		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
+
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTRPT4C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
+	}
+
+	/*
+	 * Configure Rx packet plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
+	IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Descriptor Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa)
+{
+	u32 reg, max_credits;
+	u8  i;
+
+	/* Clear the per-Tx queue credits; we use per-TC instead */
+	for (i = 0; i < 128; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
+	}
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		max_credits = max[i];
+		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_RTTDT2C_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTTDT2C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
+	}
+
+	/*
+	 * Configure Tx descriptor plane (recycle mode; WSP) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure Tx Packet Arbiter and credits for each traffic class.
+ */
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
+					   u16 *max, u8 *bwg_id, u8 *tsa,
+					   u8 *map)
+{
+	u32 reg;
+	u8 i;
+
+	/*
+	 * Disable the arbiter before changing parameters
+	 * (always enable recycle mode; SP; arb delay)
+	 */
+	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
+	      IXGBE_RTTPCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+	/*
+	 * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
+	 * bits sets for the UPs that needs to be mappped to that TC.
+	 * e.g if priorities 6 and 7 are to be mapped to a TC then the
+	 * up_to_tc_bitmap value for that TC will be 11000000 in binary.
+	 */
+	reg = 0;
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
+		reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
+
+	IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
+
+	/* Configure traffic class credits and priority */
+	for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
+
+		if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
+			reg |= IXGBE_RTTPT2C_GSP;
+
+		if (tsa[i] == ixgbe_dcb_tsa_strict)
+			reg |= IXGBE_RTTPT2C_LSP;
+
+		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
+	}
+
+	/*
+	 * Configure Tx packet plane (recycle mode; SP; arb delay) and
+	 * enable arbiter
+	 */
+	reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
+	      (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
+	IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
+ * @hw: pointer to hardware structure
+ * @pfc_en: enabled pfc bitmask
+ * @map: priority to tc assignments indexed by priority
+ *
+ * Configure Priority Flow Control (PFC) for each traffic class.
+ */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
+{
+	u32 i, j, fcrtl, reg;
+	u8 max_tc = 0;
+
+	/* Enable Transmit Priority Flow Control */
+	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
+
+	/* Enable Receive Priority Flow Control */
+	reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+	reg |= IXGBE_MFLCN_DPF;
+
+	/*
+	 * X540 supports per TC Rx priority flow control.  So
+	 * clear all TCs and only enable those that should be
+	 * enabled.
+	 */
+	reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
+
+	if (hw->mac.type == ixgbe_mac_X540)
+		reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
+	if (pfc_en)
+		reg |= IXGBE_MFLCN_RPFCE;
+
+	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
+
+	for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
+		if (map[i] > max_tc)
+			max_tc = map[i];
+	}
+
+
+	/* Configure PFC Tx thresholds per TC */
+	for (i = 0; i <= max_tc; i++) {
+		int enabled = 0;
+
+		for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
+			if ((map[j] == i) && (pfc_en & (1 << j))) {
+				enabled = 1;
+				break;
+			}
+		}
+
+		if (enabled) {
+			reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+			fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
+		} else {
+			reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
+			IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
+	}
+
+	for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
+	}
+
+	/* Configure pause time (2 TCs per register) */
+	reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
+	for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
+		IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
+
+	/* Configure flow control refresh threshold value */
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
+ * @hw: pointer to hardware structure
+ *
+ * Configure queue statistics registers, all queues belonging to same traffic
+ * class uses a single set of queue statistics counters.
+ */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
+				    struct ixgbe_dcb_config *dcb_config)
+{
+	u32 reg = 0;
+	u8  i   = 0;
+	u8 tc_count = 8;
+	bool vt_mode = FALSE;
+
+	if (dcb_config != NULL) {
+		tc_count = dcb_config->num_tcs.pg_tcs;
+		vt_mode = dcb_config->vt_mode;
+	}
+
+	if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
+		return IXGBE_ERR_PARAM;
+
+	if (tc_count == 8 && vt_mode == FALSE) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Set all 16 queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++) {
+			reg = 0x01010101 * (i / 4);
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		}
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Set all queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 * Tx queues are allocated non-uniformly to TCs:
+		 * 32, 32, 16, 16, 8, 8, 8, 8.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i < 8)
+				reg = 0x00000000;
+			else if (i < 16)
+				reg = 0x01010101;
+			else if (i < 20)
+				reg = 0x02020202;
+			else if (i < 24)
+				reg = 0x03030303;
+			else if (i < 26)
+				reg = 0x04040404;
+			else if (i < 28)
+				reg = 0x05050505;
+			else if (i < 30)
+				reg = 0x06060606;
+			else
+				reg = 0x07070707;
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+		}
+	} else if (tc_count == 4 && vt_mode == FALSE) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Set all 16 queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i % 8 > 3)
+				/* In 4 TC mode, odd 16-queue ranges are
+				 *  not used.
+				*/
+				continue;
+			reg = 0x01010101 * (i / 8);
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
+		}
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Set all queues of each TC to the same stat
+		 * with TC 'n' going to stat 'n'.
+		 * Tx queues are allocated non-uniformly to TCs:
+		 * 64, 32, 16, 16.
+		 */
+		for (i = 0; i < 32; i++) {
+			if (i < 16)
+				reg = 0x00000000;
+			else if (i < 24)
+				reg = 0x01010101;
+			else if (i < 28)
+				reg = 0x02020202;
+			else
+				reg = 0x03030303;
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
+		}
+	} else if (tc_count == 4 && vt_mode == TRUE) {
+		/*
+		 * Receive Queues stats setting
+		 * 32 RQSMR registers, each configuring 4 queues.
+		 *
+		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+		 * pool. Set all 32 queues of each TC across pools to the same
+		 * stat with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
+		/*
+		 * Transmit Queues stats setting
+		 * 32 TQSM registers, each controlling 4 queues.
+		 *
+		 * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
+		 * pool. Set all 32 queues of each TC across pools to the same
+		 * stat with TC 'n' going to stat 'n'.
+		 */
+		for (i = 0; i < 32; i++)
+			IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
+	}
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_config_82599 - Configure general DCB parameters
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure general DCB parameters.
+ */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
+			   struct ixgbe_dcb_config *dcb_config)
+{
+	u32 reg;
+	u32 q;
+
+	/* Disable the Tx desc arbiter so that MTQC can be changed */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg |= IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
+	if (dcb_config->num_tcs.pg_tcs == 8) {
+		/* Enable DCB for Rx with 8 TCs */
+		switch (reg & IXGBE_MRQC_MRQE_MASK) {
+		case 0:
+		case IXGBE_MRQC_RT4TCEN:
+			/* RSS disabled cases */
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RT8TCEN;
+			break;
+		case IXGBE_MRQC_RSSEN:
+		case IXGBE_MRQC_RTRSS4TCEN:
+			/* RSS enabled cases */
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RTRSS8TCEN;
+			break;
+		default:
+			/*
+			 * Unsupported value, assume stale data,
+			 * overwrite no RSS
+			 */
+			ASSERT(0);
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RT8TCEN;
+		}
+	}
+	if (dcb_config->num_tcs.pg_tcs == 4) {
+		/* We support both VT-on and VT-off with 4 TCs. */
+		if (dcb_config->vt_mode)
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_VMDQRT4TCEN;
+		else
+			reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
+			      IXGBE_MRQC_RTRSS4TCEN;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+	/* Enable DCB for Tx with 8 TCs */
+	if (dcb_config->num_tcs.pg_tcs == 8)
+		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+	else {
+		/* We support both VT-on and VT-off with 4 TCs. */
+		reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+		if (dcb_config->vt_mode)
+			reg |= IXGBE_MTQC_VT_ENA;
+	}
+	IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+
+	/* Disable drop for all queues */
+	for (q = 0; q < 128; q++)
+		IXGBE_WRITE_REG(hw, IXGBE_QDE,
+				(IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+
+	/* Enable the Tx desc arbiter */
+	reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+	reg &= ~IXGBE_RTTDCS_ARBDIS;
+	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+
+	/* Enable Security TX Buffer IFG for DCB */
+	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+	reg |= IXGBE_SECTX_DCB;
+	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure dcb settings and enable dcb mode.
+ */
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
+			      u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
+			      u8 *map)
+{
+
+	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
+					  map);
+	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
+					       tsa);
+	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
+					       tsa, map);
+
+	return IXGBE_SUCCESS;
+}
+

Added: trunk/sys/dev/ixgbe/ixgbe_dcb_82599.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_dcb_82599.h	                        (rev 0)
+++ trunk/sys/dev/ixgbe/ixgbe_dcb_82599.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -0,0 +1,155 @@
+/* $MidnightBSD$ */
+/******************************************************************************
+
+  Copyright (c) 2001-2013, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_dcb_82599.h 252898 2013-07-06 21:38:55Z jfv $*/
+
+#ifndef _IXGBE_DCB_82599_H_
+#define _IXGBE_DCB_82599_H_
+
+/* DCB register definitions */
+#define IXGBE_RTTDCS_TDPAC	0x00000001 /* 0 Round Robin,
+					    * 1 WSP - Weighted Strict Priority
+					    */
+#define IXGBE_RTTDCS_VMPAC	0x00000002 /* 0 Round Robin,
+					    * 1 WRR - Weighted Round Robin
+					    */
+#define IXGBE_RTTDCS_TDRM	0x00000010 /* Transmit Recycle Mode */
+#define IXGBE_RTTDCS_BDPM	0x00400000 /* Bypass Data Pipe - must clear! */
+#define IXGBE_RTTDCS_BPBFSM	0x00800000 /* Bypass PB Free Space - must
+					     * clear!
+					     */
+#define IXGBE_RTTDCS_SPEED_CHG	0x80000000 /* Link speed change */
+
+/* Receive UP2TC mapping */
+#define IXGBE_RTRUP2TC_UP_SHIFT	3
+#define IXGBE_RTRUP2TC_UP_MASK	7
+/* Transmit UP2TC mapping */
+#define IXGBE_RTTUP2TC_UP_SHIFT	3
+
+#define IXGBE_RTRPT4C_MCL_SHIFT	12 /* Offset to Max Credit Limit setting */
+#define IXGBE_RTRPT4C_BWG_SHIFT	9  /* Offset to BWG index */
+#define IXGBE_RTRPT4C_GSP	0x40000000 /* GSP enable bit */
+#define IXGBE_RTRPT4C_LSP	0x80000000 /* LSP enable bit */
+
+#define IXGBE_RDRXCTL_MPBEN	0x00000010 /* DMA config for multiple packet
+					    * buffers enable
+					    */
+#define IXGBE_RDRXCTL_MCEN	0x00000040 /* DMA config for multiple cores
+					    * (RSS) enable
+					    */
+
+/* RTRPCS Bit Masks */
+#define IXGBE_RTRPCS_RRM	0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RTRPCS_RAC	0x00000004
+#define IXGBE_RTRPCS_ARBDIS	0x00000040 /* Arbitration disable bit */
+
+/* RTTDT2C Bit Masks */
+#define IXGBE_RTTDT2C_MCL_SHIFT	12
+#define IXGBE_RTTDT2C_BWG_SHIFT	9
+#define IXGBE_RTTDT2C_GSP	0x40000000
+#define IXGBE_RTTDT2C_LSP	0x80000000
+
+#define IXGBE_RTTPT2C_MCL_SHIFT	12
+#define IXGBE_RTTPT2C_BWG_SHIFT	9
+#define IXGBE_RTTPT2C_GSP	0x40000000
+#define IXGBE_RTTPT2C_LSP	0x80000000
+
+/* RTTPCS Bit Masks */
+#define IXGBE_RTTPCS_TPPAC	0x00000020 /* 0 Round Robin,
+					    * 1 SP - Strict Priority
+					    */
+#define IXGBE_RTTPCS_ARBDIS	0x00000040 /* Arbiter disable */
+#define IXGBE_RTTPCS_TPRM	0x00000100 /* Transmit Recycle Mode enable */
+#define IXGBE_RTTPCS_ARBD_SHIFT	22
+#define IXGBE_RTTPCS_ARBD_DCB	0x4 /* Arbitration delay in DCB mode */
+
+#define IXGBE_TXPBTHRESH_DCB	0xA /* THRESH value for DCB mode */
+
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB		0x00001F00 /* DCB TX Buffer SEC IFG */
+
+/* BCN register definitions */
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT	14
+#define IXGBE_RTTBCNRC_RS_ENA		0x80000000
+
+#define IXGBE_RTTBCNCR_MNG_CMTGI	0x00000001
+#define IXGBE_RTTBCNCR_MGN_BCNA_MODE	0x00000002
+#define IXGBE_RTTBCNCR_RSV7_11_SHIFT	5
+#define IXGBE_RTTBCNCR_G		0x00000400
+#define IXGBE_RTTBCNCR_I		0x00000800
+#define IXGBE_RTTBCNCR_H		0x00001000
+#define IXGBE_RTTBCNCR_VER_SHIFT	14
+#define IXGBE_RTTBCNCR_CMT_ETH_SHIFT	16
+
+#define IXGBE_RTTBCNACL_SMAC_L_SHIFT	16
+
+#define IXGBE_RTTBCNTG_BCNA_MODE	0x80000000
+
+#define IXGBE_RTTBCNRTT_TS_SHIFT	3
+#define IXGBE_RTTBCNRTT_TXQ_IDX_SHIFT	16
+
+#define IXGBE_RTTBCNRD_BCN_CLEAR_ALL	0x00000002
+#define IXGBE_RTTBCNRD_DRIFT_FAC_SHIFT	2
+#define IXGBE_RTTBCNRD_DRIFT_INT_SHIFT	16
+#define IXGBE_RTTBCNRD_DRIFT_ENA	0x80000000
+
+
+/* DCB driver APIs */
+
+/* DCB PFC */
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *);
+
+/* DCB stats */
+s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *,
+				    struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *,
+				 struct ixgbe_hw_stats *, u8);
+s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *,
+				  struct ixgbe_hw_stats *, u8);
+
+/* DCB config arbiters */
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *);
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *,
+					   u8 *, u8 *, u8 *);
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *,
+				      u8 *, u8 *);
+
+/* DCB initialization */
+s32 ixgbe_dcb_config_82599(struct ixgbe_hw *,
+			   struct ixgbe_dcb_config *);
+
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *,
+			      u8 *, u8 *);
+#endif /* _IXGBE_DCB_82959_H_ */

Modified: trunk/sys/dev/ixgbe/ixgbe_mbx.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_mbx.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_mbx.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
   Copyright (c) 2001-2012, Intel Corporation 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_mbx.c 235528 2012-05-17 00:06:54Z jfv $*/
 
 #include "ixgbe_type.h"
 #include "ixgbe_mbx.h"


Property changes on: trunk/sys/dev/ixgbe/ixgbe_mbx.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_mbx.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_mbx.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_mbx.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_mbx.h 248287 2013-03-14 21:39:39Z jfv $*/
 
 #ifndef _IXGBE_MBX_H_
 #define _IXGBE_MBX_H_
@@ -84,9 +85,21 @@
 #define IXGBE_VF_SET_MAC_ADDR	0x02 /* VF requests PF to set MAC addr */
 #define IXGBE_VF_SET_MULTICAST	0x03 /* VF requests PF to set MC addr */
 #define IXGBE_VF_SET_VLAN	0x04 /* VF requests PF to set VLAN */
+
+/* mailbox API, version 1.0 VF requests */
 #define IXGBE_VF_SET_LPE	0x05 /* VF requests PF to set VMOLR.LPE */
 #define IXGBE_VF_SET_MACVLAN	0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE	0x08 /* negotiate API version */
 
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES	0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES	1	/* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES	2	/* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN	3	/* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE	4	/* Default queue offset */
+
 /* length of permanent address message returned from PF */
 #define IXGBE_VF_PERMADDR_MSG_LEN	4
 /* word in permanent address message with the current multicast type */


Property changes on: trunk/sys/dev/ixgbe/ixgbe_mbx.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_osdep.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_osdep.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_osdep.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_osdep.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_OS_H_
 #define _IXGBE_OS_H_
@@ -69,14 +70,26 @@
 	#define DEBUGOUT1(S,A)      printf(S "\n",A)
 	#define DEBUGOUT2(S,A,B)    printf(S "\n",A,B)
 	#define DEBUGOUT3(S,A,B,C)  printf(S "\n",A,B,C)
+	#define DEBUGOUT4(S,A,B,C,D)  printf(S "\n",A,B,C,D)
+	#define DEBUGOUT5(S,A,B,C,D,E)  printf(S "\n",A,B,C,D,E)
+	#define DEBUGOUT6(S,A,B,C,D,E,F)  printf(S "\n",A,B,C,D,E,F)
 	#define DEBUGOUT7(S,A,B,C,D,E,F,G)  printf(S "\n",A,B,C,D,E,F,G)
+	#define ERROR_REPORT1(S,A)      printf(S "\n",A)
+	#define ERROR_REPORT2(S,A,B)    printf(S "\n",A,B)
+	#define ERROR_REPORT3(S,A,B,C)  printf(S "\n",A,B,C)
 #else
 	#define DEBUGOUT(S)
 	#define DEBUGOUT1(S,A)
 	#define DEBUGOUT2(S,A,B)
 	#define DEBUGOUT3(S,A,B,C)
+	#define DEBUGOUT4(S,A,B,C,D)
+	#define DEBUGOUT5(S,A,B,C,D,E)
 	#define DEBUGOUT6(S,A,B,C,D,E,F)
 	#define DEBUGOUT7(S,A,B,C,D,E,F,G)
+
+	#define ERROR_REPORT1(S,A)
+	#define ERROR_REPORT2(S,A,B)
+	#define ERROR_REPORT3(S,A,B,C)
 #endif
 
 #define FALSE               0
@@ -86,6 +99,9 @@
 #define CMD_MEM_WRT_INVALIDATE          0x0010  /* BIT_4 */
 #define PCI_COMMAND_REGISTER            PCIR_COMMAND
 
+/* Shared code dropped this define.. */
+#define IXGBE_INTEL_VENDOR_ID		0x8086
+
 /* Bunch of defines for shared code bogosity */
 #define UNREFERENCED_PARAMETER(_p)
 #define UNREFERENCED_1PARAMETER(_p)
@@ -104,6 +120,7 @@
 typedef uint8_t		u8;
 typedef int8_t		s8;
 typedef uint16_t	u16;
+typedef int16_t		s16;
 typedef uint32_t	u32;
 typedef int32_t		s32;
 typedef uint64_t	u64;
@@ -143,6 +160,25 @@
 #define prefetch(x)
 #endif
 
+/*
+ * Optimized bcopy thanks to Luigi Rizzo's investigative work.  Assumes
+ * non-overlapping regions and 32-byte padding on both src and dst.
+ */
+static __inline int
+ixgbe_bcopy(void *_src, void *_dst, int l)
+{
+	uint64_t *src = _src;
+	uint64_t *dst = _dst;
+
+	for (; l > 0; l -= 32) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+	return (0);
+}
+
 struct ixgbe_osdep
 {
 	bus_space_tag_t    mem_bus_space_tag;


Property changes on: trunk/sys/dev/ixgbe/ixgbe_osdep.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_phy.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_phy.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_phy.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_phy.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_api.h"
 #include "ixgbe_common.h"
@@ -47,6 +48,8 @@
 static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
 static bool ixgbe_get_i2c_data(u32 *i2cctl);
+static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 *sff8472_data);
 
 /**
  *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
@@ -65,6 +68,8 @@
 	phy->ops.reset = &ixgbe_reset_phy_generic;
 	phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
 	phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+	phy->ops.read_reg_mdi = &ixgbe_read_phy_reg_mdi;
+	phy->ops.write_reg_mdi = &ixgbe_write_phy_reg_mdi;
 	phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
 	phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
 	phy->ops.check_link = NULL;
@@ -71,6 +76,7 @@
 	phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
 	phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
 	phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+	phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic;
 	phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
 	phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
 	phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
@@ -122,8 +128,11 @@
 			}
 		}
 		/* clear value if nothing found */
-		if (status != IXGBE_SUCCESS)
+		if (status != IXGBE_SUCCESS) {
 			hw->phy.addr = 0;
+			ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
+				     "Could not identify valid PHY address");
+		}
 	} else {
 		status = IXGBE_SUCCESS;
 	}
@@ -262,7 +271,8 @@
 
 	if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
 		status = IXGBE_ERR_RESET_FAILED;
-		DEBUGOUT("PHY reset polling failed to complete.\n");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "PHY reset polling failed to complete.\n");
 	}
 
 out:
@@ -270,7 +280,87 @@
 }
 
 /**
+ *  ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ *  the SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+		       u16 *phy_data)
+{
+	u32 i, data, command;
+
+	/* Setup and write the address cycle command */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The MDI Command bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+				break;
+	}
+
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	/*
+	 * Address cycle complete, setup and write the read
+	 * command
+	 */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+	/*
+	 * Check every 10 usec to see if the address cycle
+	 * completed. The MDI Command bit will clear when the
+	 * operation is complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
+
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
+
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+		return IXGBE_ERR_PHY;
+	}
+
+	/*
+	 * Read operation is complete.  Get the data
+	 * from MSRWD
+	 */
+	data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+	data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+	*phy_data = (u16)(data);
+
+	return IXGBE_SUCCESS;
+}
+
+/**
  *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ *  using the SWFW lock - this function is needed in most cases
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit address of PHY register to read
  *  @phy_data: Pointer to read data from PHY register
@@ -278,10 +368,7 @@
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
 			       u32 device_type, u16 *phy_data)
 {
-	u32 command;
-	u32 i;
-	u32 data;
-	s32 status = IXGBE_SUCCESS;
+	s32 status;
 	u16 gssr;
 
 	DEBUGFUNC("ixgbe_read_phy_reg_generic");
@@ -291,85 +378,94 @@
 	else
 		gssr = IXGBE_GSSR_PHY0_SM;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+		status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+						phy_data);
+		hw->mac.ops.release_swfw_sync(hw, gssr);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
+	}
 
-	if (status == IXGBE_SUCCESS) {
-		/* Setup and write the address cycle command */
-		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+	return status;
+}
 
-		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+/**
+ *  ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ *  without SWFW lock
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+				u32 device_type, u16 phy_data)
+{
+	u32 i, command;
 
-		/*
-		 * Check every 10 usec to see if the address cycle completed.
-		 * The MDI Command bit will clear when the operation is
-		 * complete
-		 */
-		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-			usec_delay(10);
+	/* Put the data in the MDI single read and write data register*/
+	IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
 
-			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+	/* Setup and write the address cycle command */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
 
-			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-				break;
-		}
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
-		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-			DEBUGOUT("PHY address command did not complete.\n");
-			status = IXGBE_ERR_PHY;
-		}
+	/*
+	 * Check every 10 usec to see if the address cycle completed.
+	 * The MDI Command bit will clear when the operation is
+	 * complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
 
-		if (status == IXGBE_SUCCESS) {
-			/*
-			 * Address cycle complete, setup and write the read
-			 * command
-			 */
-			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-				   (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
 
-			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n");
+		return IXGBE_ERR_PHY;
+	}
 
-			/*
-			 * Check every 10 usec to see if the address cycle
-			 * completed. The MDI Command bit will clear when the
-			 * operation is complete
-			 */
-			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-				usec_delay(10);
+	/*
+	 * Address cycle complete, setup and write the write
+	 * command
+	 */
+	command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+		   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+		   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+		   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
 
-				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+	IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
 
-				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-					break;
-			}
+	/*
+	 * Check every 10 usec to see if the address cycle
+	 * completed. The MDI Command bit will clear when the
+	 * operation is complete
+	 */
+	for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+		usec_delay(10);
 
-			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-				DEBUGOUT("PHY read command didn't complete\n");
-				status = IXGBE_ERR_PHY;
-			} else {
-				/*
-				 * Read operation is complete.  Get the data
-				 * from MSRWD
-				 */
-				data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
-				data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
-				*phy_data = (u16)(data);
-			}
-		}
+		command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+		if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+			break;
+	}
 
-		hw->mac.ops.release_swfw_sync(hw, gssr);
+	if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+		ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n");
+		return IXGBE_ERR_PHY;
 	}
 
-	return status;
+	return IXGBE_SUCCESS;
 }
 
 /**
  *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ *  using SWFW lock- this function is needed in most cases
  *  @hw: pointer to hardware structure
  *  @reg_addr: 32 bit PHY register to write
  *  @device_type: 5 bit device type
@@ -378,9 +474,7 @@
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
 				u32 device_type, u16 phy_data)
 {
-	u32 command;
-	u32 i;
-	s32 status = IXGBE_SUCCESS;
+	s32 status;
 	u16 gssr;
 
 	DEBUGFUNC("ixgbe_write_phy_reg_generic");
@@ -390,73 +484,12 @@
 	else
 		gssr = IXGBE_GSSR_PHY0_SM;
 
-	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+	if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
+		status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+						 phy_data);
+		hw->mac.ops.release_swfw_sync(hw, gssr);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
-
-	if (status == IXGBE_SUCCESS) {
-		/* Put the data in the MDI single read and write data register*/
-		IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
-		/* Setup and write the address cycle command */
-		command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-			   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-			   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-			   (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
-		IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-		/*
-		 * Check every 10 usec to see if the address cycle completed.
-		 * The MDI Command bit will clear when the operation is
-		 * complete
-		 */
-		for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-			usec_delay(10);
-
-			command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
-			if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-				break;
-		}
-
-		if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-			DEBUGOUT("PHY address cmd didn't complete\n");
-			status = IXGBE_ERR_PHY;
-		}
-
-		if (status == IXGBE_SUCCESS) {
-			/*
-			 * Address cycle complete, setup and write the write
-			 * command
-			 */
-			command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
-				   (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
-				   (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
-				   (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
-			IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
-			/*
-			 * Check every 10 usec to see if the address cycle
-			 * completed. The MDI Command bit will clear when the
-			 * operation is complete
-			 */
-			for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
-				usec_delay(10);
-
-				command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
-				if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
-					break;
-			}
-
-			if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
-				DEBUGOUT("PHY address cmd didn't complete\n");
-				status = IXGBE_ERR_PHY;
-			}
-		}
-
-		hw->mac.ops.release_swfw_sync(hw, gssr);
 	}
 
 	return status;
@@ -553,7 +586,8 @@
 
 	if (time_out == max_time_out) {
 		status = IXGBE_ERR_LINK_SETUP;
-		DEBUGOUT("ixgbe_setup_phy_link_generic: time out");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "PHY autonegotiation time out");
 	}
 
 	return status;
@@ -563,14 +597,12 @@
  *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  **/
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 				       ixgbe_link_speed speed,
-				       bool autoneg,
 				       bool autoneg_wait_to_complete)
 {
-	UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
+	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
 
 	DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
 
@@ -862,6 +894,8 @@
 		 * Read control word from PHY init contents offset
 		 */
 		ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+		if (ret_val)
+			goto err_eeprom;
 		control = (eword & IXGBE_CONTROL_MASK_NL) >>
 			   IXGBE_CONTROL_SHIFT_NL;
 		edata = eword & IXGBE_DATA_MASK_NL;
@@ -874,10 +908,16 @@
 		case IXGBE_DATA_NL:
 			DEBUGOUT("DATA:\n");
 			data_offset++;
-			hw->eeprom.ops.read(hw, data_offset++,
-					    &phy_offset);
+			ret_val = hw->eeprom.ops.read(hw, data_offset,
+						      &phy_offset);
+			if (ret_val)
+				goto err_eeprom;
+			data_offset++;
 			for (i = 0; i < edata; i++) {
-				hw->eeprom.ops.read(hw, data_offset, &eword);
+				ret_val = hw->eeprom.ops.read(hw, data_offset,
+							      &eword);
+				if (ret_val)
+					goto err_eeprom;
 				hw->phy.ops.write_reg(hw, phy_offset,
 						      IXGBE_TWINAX_DEV, eword);
 				DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
@@ -909,6 +949,11 @@
 
 out:
 	return ret_val;
+
+err_eeprom:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", data_offset);
+	return IXGBE_ERR_PHY;
 }
 
 /**
@@ -969,9 +1014,7 @@
 					     IXGBE_SFF_IDENTIFIER,
 					     &identifier);
 
-	if (status == IXGBE_ERR_SWFW_SYNC ||
-	    status == IXGBE_ERR_I2C ||
-	    status == IXGBE_ERR_SFP_NOT_PRESENT)
+	if (status != IXGBE_SUCCESS)
 		goto err_read_i2c_eeprom;
 
 	/* LAN ID is needed for sfp_type determination */
@@ -985,9 +1028,7 @@
 						     IXGBE_SFF_1GBE_COMP_CODES,
 						     &comp_codes_1g);
 
-		if (status == IXGBE_ERR_SWFW_SYNC ||
-		    status == IXGBE_ERR_I2C ||
-		    status == IXGBE_ERR_SFP_NOT_PRESENT)
+		if (status != IXGBE_SUCCESS)
 			goto err_read_i2c_eeprom;
 
 		status = hw->phy.ops.read_i2c_eeprom(hw,
@@ -994,17 +1035,13 @@
 						     IXGBE_SFF_10GBE_COMP_CODES,
 						     &comp_codes_10g);
 
-		if (status == IXGBE_ERR_SWFW_SYNC ||
-		    status == IXGBE_ERR_I2C ||
-		    status == IXGBE_ERR_SFP_NOT_PRESENT)
+		if (status != IXGBE_SUCCESS)
 			goto err_read_i2c_eeprom;
 		status = hw->phy.ops.read_i2c_eeprom(hw,
 						     IXGBE_SFF_CABLE_TECHNOLOGY,
 						     &cable_tech);
 
-		if (status == IXGBE_ERR_SWFW_SYNC ||
-		    status == IXGBE_ERR_I2C ||
-		    status == IXGBE_ERR_SFP_NOT_PRESENT)
+		if (status != IXGBE_SUCCESS)
 			goto err_read_i2c_eeprom;
 
 		 /* ID Module
@@ -1102,9 +1139,7 @@
 						    IXGBE_SFF_VENDOR_OUI_BYTE0,
 						    &oui_bytes[0]);
 
-			if (status == IXGBE_ERR_SWFW_SYNC ||
-			    status == IXGBE_ERR_I2C ||
-			    status == IXGBE_ERR_SFP_NOT_PRESENT)
+			if (status != IXGBE_SUCCESS)
 				goto err_read_i2c_eeprom;
 
 			status = hw->phy.ops.read_i2c_eeprom(hw,
@@ -1111,9 +1146,7 @@
 						    IXGBE_SFF_VENDOR_OUI_BYTE1,
 						    &oui_bytes[1]);
 
-			if (status == IXGBE_ERR_SWFW_SYNC ||
-			    status == IXGBE_ERR_I2C ||
-			    status == IXGBE_ERR_SFP_NOT_PRESENT)
+			if (status != IXGBE_SUCCESS)
 				goto err_read_i2c_eeprom;
 
 			status = hw->phy.ops.read_i2c_eeprom(hw,
@@ -1120,9 +1153,7 @@
 						    IXGBE_SFF_VENDOR_OUI_BYTE2,
 						    &oui_bytes[2]);
 
-			if (status == IXGBE_ERR_SWFW_SYNC ||
-			    status == IXGBE_ERR_I2C ||
-			    status == IXGBE_ERR_SFP_NOT_PRESENT)
+			if (status != IXGBE_SUCCESS)
 				goto err_read_i2c_eeprom;
 
 			vendor_oui =
@@ -1172,7 +1203,7 @@
 		if (comp_codes_10g == 0 &&
 		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
 		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0  ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
 		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
 			hw->phy.type = ixgbe_phy_sfp_unsupported;
 			status = IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1187,10 +1218,10 @@
 
 		ixgbe_get_device_caps(hw, &enforce_sfp);
 		if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
-		    !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
-		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
-		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0)  ||
-		      (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
+		    !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+		      hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
 			/* Make sure we're a supported PHY type */
 			if (hw->phy.type == ixgbe_phy_sfp_intel) {
 				status = IXGBE_SUCCESS;
@@ -1275,7 +1306,12 @@
 		sfp_type = ixgbe_sfp_type_srlr_core1;
 
 	/* Read offset to PHY init contents */
-	hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+	if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			      "eeprom read at offset %d failed",
+			      IXGBE_PHY_INIT_OFFSET_NL);
+		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+	}
 
 	if ((!*list_offset) || (*list_offset == 0xFFFF))
 		return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1287,12 +1323,14 @@
 	 * Find the matching SFP ID in the EEPROM
 	 * and program the init sequence
 	 */
-	hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+	if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+		goto err_phy;
 
 	while (sfp_id != IXGBE_PHY_INIT_END_NL) {
 		if (sfp_id == sfp_type) {
 			(*list_offset)++;
-			hw->eeprom.ops.read(hw, *list_offset, data_offset);
+			if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+				goto err_phy;
 			if ((!*data_offset) || (*data_offset == 0xFFFF)) {
 				DEBUGOUT("SFP+ module not supported\n");
 				return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1302,7 +1340,7 @@
 		} else {
 			(*list_offset) += 2;
 			if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
-				return IXGBE_ERR_PHY;
+				goto err_phy;
 		}
 	}
 
@@ -1312,6 +1350,11 @@
 	}
 
 	return IXGBE_SUCCESS;
+
+err_phy:
+	ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+		      "eeprom read at offset %d failed", *list_offset);
+	return IXGBE_ERR_PHY;
 }
 
 /**
@@ -1333,6 +1376,22 @@
 }
 
 /**
+ *  ixgbe_read_i2c_sff8472_generic - Reads 8 bit word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset at address 0xA2
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's SFF-8472 data over I2C
+ **/
+static s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
+					  u8 *sff8472_data)
+{
+	return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+					 IXGBE_I2C_EEPROM_DEV_ADDR2,
+					 sff8472_data);
+}
+
+/**
  *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
  *  @hw: pointer to hardware structure
  *  @byte_offset: EEPROM byte offset to write
@@ -1425,9 +1484,9 @@
 		break;
 
 fail:
+		ixgbe_i2c_bus_clear(hw);
 		hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 		msec_delay(100);
-		ixgbe_i2c_bus_clear(hw);
 		retry++;
 		if (retry < max_retry)
 			DEBUGOUT("I2C byte read error - Retrying.\n");
@@ -1661,7 +1720,8 @@
 	}
 
 	if (ack == 1) {
-		DEBUGOUT("I2C ack was not received.\n");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "I2C ack was not received.\n");
 		status = IXGBE_ERR_I2C;
 	}
 
@@ -1731,7 +1791,8 @@
 		usec_delay(IXGBE_I2C_T_LOW);
 	} else {
 		status = IXGBE_ERR_I2C;
-		DEBUGOUT1("I2C data was not set to %X\n", data);
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			     "I2C data was not set to %X\n", data);
 	}
 
 	return status;
@@ -1815,7 +1876,9 @@
 	*i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
 	if (data != ixgbe_get_i2c_data(i2cctl)) {
 		status = IXGBE_ERR_I2C;
-		DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+		ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
+			     "Error - I2C data was not set to %X.\n",
+			     data);
 	}
 
 	return status;
@@ -1902,6 +1965,7 @@
 		goto out;
 
 	status = IXGBE_ERR_OVERTEMP;
+	ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature");
 out:
 	return status;
 }


Property changes on: trunk/sys/dev/ixgbe/ixgbe_phy.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_phy.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_phy.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_phy.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,13 +31,15 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_phy.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_PHY_H_
 #define _IXGBE_PHY_H_
 
 #include "ixgbe_type.h"
-#define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR	0xA0
+#define IXGBE_I2C_EEPROM_DEV_ADDR2	0xA2
+#define IXGBE_I2C_EEPROM_BANK_LEN	0xFF
 
 /* EEPROM byte offsets */
 #define IXGBE_SFF_IDENTIFIER		0x0
@@ -48,6 +51,10 @@
 #define IXGBE_SFF_10GBE_COMP_CODES	0x3
 #define IXGBE_SFF_CABLE_TECHNOLOGY	0x8
 #define IXGBE_SFF_CABLE_SPEC_COMP	0x3C
+#define IXGBE_SFF_SFF_8472_SWAP		0x5C
+#define IXGBE_SFF_SFF_8472_COMP		0x5E
+#define IXGBE_SFF_SFF_8472_OSCB		0x6E
+#define IXGBE_SFF_SFF_8472_ESCB		0x76
 
 /* Bitmasks */
 #define IXGBE_SFF_DA_PASSIVE_CABLE	0x4
@@ -58,6 +65,9 @@
 #define IXGBE_SFF_1GBASET_CAPABLE	0x8
 #define IXGBE_SFF_10GBASESR_CAPABLE	0x10
 #define IXGBE_SFF_10GBASELR_CAPABLE	0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK	0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G	0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G	0x0
 #define IXGBE_I2C_EEPROM_READ_MASK	0x100
 #define IXGBE_I2C_EEPROM_STATUS_MASK	0x3
 #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION	0x0
@@ -95,6 +105,14 @@
 #define IXGBE_TN_LASI_STATUS_REG	0x9005
 #define IXGBE_TN_LASI_STATUS_TEMP_ALARM	0x0008
 
+/* SFP+ SFF-8472 Compliance */
+#define IXGBE_SFF_SFF_8472_UNSUP	0x00
+#define IXGBE_SFF_SFF_8472_REV_9_3	0x01
+#define IXGBE_SFF_SFF_8472_REV_9_5	0x02
+#define IXGBE_SFF_SFF_8472_REV_10_2	0x03
+#define IXGBE_SFF_SFF_8472_REV_10_4	0x04
+#define IXGBE_SFF_SFF_8472_REV_11_0	0x05
+
 s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
 bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
 enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -101,6 +119,10 @@
 s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
 s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			   u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+			    u16 phy_data);
 s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
 			       u32 device_type, u16 *phy_data);
 s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
@@ -108,7 +130,6 @@
 s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
 s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
 				       ixgbe_link_speed speed,
-				       bool autoneg,
 				       bool autoneg_wait_to_complete);
 s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 					       ixgbe_link_speed *speed,


Property changes on: trunk/sys/dev/ixgbe/ixgbe_phy.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_type.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_type.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_type.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,11 +31,49 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_type.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_TYPE_H_
 #define _IXGBE_TYPE_H_
 
+/*
+ * The following is a brief description of the error categories used by the
+ * ERROR_REPORT* macros.
+ *
+ * - IXGBE_ERROR_INVALID_STATE
+ * This category is for errors which represent a serious failure state that is
+ * unexpected, and could be potentially harmful to device operation. It should
+ * not be used for errors relating to issues that can be worked around or
+ * ignored.
+ *
+ * - IXGBE_ERROR_POLLING
+ * This category is for errors related to polling/timeout issues and should be
+ * used in any case where the timeout occured, or a failure to obtain a lock, or
+ * failure to receive data within the time limit.
+ *
+ * - IXGBE_ERROR_CAUTION
+ * This category should be used for reporting issues that may be the cause of
+ * other errors, such as temperature warnings. It should indicate an event which
+ * could be serious, but hasn't necessarily caused problems yet.
+ *
+ * - IXGBE_ERROR_SOFTWARE
+ * This category is intended for errors due to software state preventing
+ * something. The category is not intended for errors due to bad arguments, or
+ * due to unsupported features. It should be used when a state occurs which
+ * prevents action but is not a serious issue.
+ *
+ * - IXGBE_ERROR_ARGUMENT
+ * This category is for when a bad or invalid argument is passed. It should be
+ * used whenever a function is called and error checking has detected the
+ * argument is wrong or incorrect.
+ *
+ * - IXGBE_ERROR_UNSUPPORTED
+ * This category is for errors which are due to unsupported circumstances or
+ * configuration issues. It should not be used when the issue is due to an
+ * invalid argument, but for when something has occurred that is unsupported
+ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.)
+ */
+
 #include "ixgbe_osdep.h"
 
 
@@ -62,18 +101,27 @@
 #define IXGBE_DEV_ID_82599_CX4			0x10F9
 #define IXGBE_DEV_ID_82599_SFP			0x10FB
 #define IXGBE_SUBDEV_ID_82599_SFP		0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC		0x1F72
 #define IXGBE_SUBDEV_ID_82599_560FLR		0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP		0x0470
+#define IXGBE_SUBDEV_ID_82599_SP_560FLR		0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SFP		0x8976
 #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE	0x152A
 #define IXGBE_DEV_ID_82599_SFP_FCOE		0x1529
 #define IXGBE_DEV_ID_82599_SFP_EM		0x1507
 #define IXGBE_DEV_ID_82599_SFP_SF2		0x154D
+#define IXGBE_DEV_ID_82599_SFP_SF_QP		0x154A
 #define IXGBE_DEV_ID_82599EN_SFP		0x1557
+#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1	0x0001
 #define IXGBE_DEV_ID_82599_XAUI_LOM		0x10FC
 #define IXGBE_DEV_ID_82599_T3_LOM		0x151C
 #define IXGBE_DEV_ID_82599_VF			0x10ED
+#define IXGBE_DEV_ID_82599_VF_HV		0x152E
+#define IXGBE_DEV_ID_82599_BYPASS		0x155D
+#define IXGBE_DEV_ID_X540T			0x1528
 #define IXGBE_DEV_ID_X540_VF			0x1515
-#define IXGBE_DEV_ID_X540T			0x1528
-#define IXGBE_DEV_ID_X540T1			0x1560
+#define IXGBE_DEV_ID_X540_VF_HV			0x1530
+#define IXGBE_DEV_ID_X540_BYPASS		0x155C
 
 /* General Registers */
 #define IXGBE_CTRL		0x00000
@@ -209,12 +257,12 @@
 				 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
 				 (0x0D00C + (((_i) - 64) * 0x40))))
 #define IXGBE_RDRXCTL		0x02F00
-#define IXGBE_RDRXCTL_RSC_PUSH	0x80
 /* 8 of these 0x03C00 - 0x03C1C */
 #define IXGBE_RXPBSIZE(_i)	(0x03C00 + ((_i) * 4))
 #define IXGBE_RXCTRL		0x03000
 #define IXGBE_DROPEN		0x03D04
 #define IXGBE_RXPBSIZE_SHIFT	10
+#define IXGBE_RXPBSIZE_MASK	0x000FFC00
 
 /* Receive Registers */
 #define IXGBE_RXCSUM		0x05000
@@ -280,6 +328,7 @@
 #define IXGBE_RETA(_i)		(0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
 #define IXGBE_RSSRK(_i)		(0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
 
+
 /* Flow Director registers */
 #define IXGBE_FDIRCTRL	0x0EE00
 #define IXGBE_FDIRHKEY	0x0EE68
@@ -360,11 +409,16 @@
 
 #define IXGBE_WUPL	0x05900
 #define IXGBE_WUPM	0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+
 #define IXGBE_FHFT(_n)	(0x09000 + (_n * 0x100)) /* Flex host filter table */
 /* Ext Flexible Host Filter Table */
 #define IXGBE_FHFT_EXT(_n)	(0x09800 + (_n * 0x100))
 
+/* Four Flexible Filters are supported */
 #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX		4
+
+/* Six Flexible Filters are supported */
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6	6
 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX	2
 
 /* Each Flexible Filter is at most 128 (0x80) bytes in length */
@@ -396,10 +450,11 @@
 #define IXGBE_WUFC_FLX3	0x00080000 /* Flexible Filter 3 Enable */
 #define IXGBE_WUFC_FLX4	0x00100000 /* Flexible Filter 4 Enable */
 #define IXGBE_WUFC_FLX5	0x00200000 /* Flexible Filter 5 Enable */
-#define IXGBE_WUFC_FLX_FILTERS	0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_FLX_FILTERS		0x000F0000 /* Mask for 4 flex filters */
 /* Mask for Ext. flex filters */
 #define IXGBE_WUFC_EXT_FLX_FILTERS	0x00300000
-#define IXGBE_WUFC_ALL_FILTERS	0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_ALL_FILTERS		0x000F00FF /* Mask all 4 flex filters */
+#define IXGBE_WUFC_ALL_FILTERS_6	0x003F00FF /* Mask all 6 flex filters */
 #define IXGBE_WUFC_FLX_OFFSET	16 /* Offset to the Flexible Filters bits */
 
 /* Wake Up Status */
@@ -420,7 +475,6 @@
 #define IXGBE_WUS_FLX5		IXGBE_WUFC_FLX5
 #define IXGBE_WUS_FLX_FILTERS	IXGBE_WUFC_FLX_FILTERS
 
-/* Wake Up Packet Length */
 #define IXGBE_WUPL_LENGTH_MASK	0xFFFF
 
 /* DCB registers */
@@ -437,6 +491,7 @@
 #define IXGBE_TDPT2TCSR(_i)	(0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
 
 
+
 /* Security Control Registers */
 #define IXGBE_SECTXCTRL		0x08800
 #define IXGBE_SECTXSTAT		0x08804
@@ -574,13 +629,12 @@
 #define IXGBE_RTTBCNRTT	0x05150
 #define IXGBE_RTTBCNRD	0x0498C
 
+
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL		0x02410 /* FC User Desc. PTR Low */
 #define IXGBE_FCPTRH		0x02414 /* FC USer Desc. PTR High */
 #define IXGBE_FCBUFF		0x02418 /* FC Buffer Control */
 #define IXGBE_FCDMARW		0x02420 /* FC Receive DMA RW */
-#define IXGBE_FCINVST0		0x03FC0 /* FC Invalid DMA Context Status Reg 0*/
-#define IXGBE_FCINVST(_i)	(IXGBE_FCINVST0 + ((_i) * 4))
 #define IXGBE_FCBUFF_VALID	(1 << 0)   /* DMA Context Valid */
 #define IXGBE_FCBUFF_BUFFSIZE	(3 << 3)   /* User Buffer Size */
 #define IXGBE_FCBUFF_WRCONTX	(1 << 7)   /* 0: Initiator, 1: Target */
@@ -754,11 +808,14 @@
 #define IXGBE_BMCIP_IPADDR_VALID	0x00000002
 
 /* Management Bit Fields and Masks */
+#define IXGBE_MANC_RCV_TCO_EN	0x00020000 /* Rcv TCO packet enable */
 #define IXGBE_MANC_EN_BMC2OS	0x10000000 /* Ena BMC2OS and OS2BMC traffic */
 #define IXGBE_MANC_EN_BMC2OS_SHIFT	28
 
 /* Firmware Semaphore Register */
 #define IXGBE_FWSM_MODE_MASK	0xE
+#define IXGBE_FWSM_TS_ENABLED	0x1
+#define IXGBE_FWSM_FW_MODE_PT	0x4
 
 /* ARC Subsystem registers */
 #define IXGBE_HICR		0x15F00
@@ -874,8 +931,6 @@
 #define IXGBE_RDPROBE		0x02F20
 #define IXGBE_RDMAM		0x02F30
 #define IXGBE_RDMAD		0x02F34
-#define IXGBE_TDSTATCTL		0x07C20
-#define IXGBE_TDSTAT(_i)	(0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
 #define IXGBE_TDHMPN		0x07F08
 #define IXGBE_TDHMPN2		0x082FC
 #define IXGBE_TXDESCIC		0x082CC
@@ -1014,6 +1069,7 @@
 #define IXGBE_RSCCTL_MAXDESC_4	0x04
 #define IXGBE_RSCCTL_MAXDESC_8	0x08
 #define IXGBE_RSCCTL_MAXDESC_16	0x0C
+#define IXGBE_RSCCTL_TS_DIS	0x02
 
 /* RSCDBU Bit Masks */
 #define IXGBE_RSCDBU_RSCSMALDIS_MASK	0x0000007F
@@ -1023,10 +1079,12 @@
 #define IXGBE_RDRXCTL_RDMTS_1_2		0x00000000 /* Rx Desc Min THLD Size */
 #define IXGBE_RDRXCTL_CRCSTRIP		0x00000002 /* CRC Strip */
 #define IXGBE_RDRXCTL_MVMEN		0x00000020
+#define IXGBE_RDRXCTL_RSC_PUSH_DIS	0x00000020
 #define IXGBE_RDRXCTL_DMAIDONE		0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_RSC_PUSH		0x00000080
 #define IXGBE_RDRXCTL_AGGDIS		0x00010000 /* Aggregation disable */
 #define IXGBE_RDRXCTL_RSCFRSTSIZE	0x003E0000 /* RSC First packet size */
-#define IXGBE_RDRXCTL_RSCLLIDIS		0x00800000 /* Disabl RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCLLIDIS		0x00800000 /* Disable RSC compl on LLI*/
 #define IXGBE_RDRXCTL_RSCACKC		0x02000000 /* must set 1 when RSC ena */
 #define IXGBE_RDRXCTL_FCOE_WRFIX	0x04000000 /* must set 1 when RSC ena */
 
@@ -1052,6 +1110,7 @@
 #define IXGBE_CTRL_RST_MASK	(IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
 
 /* FACTPS */
+#define IXGBE_FACTPS_MNGCG	0x20000000 /* Manageblility Clock Gated */
 #define IXGBE_FACTPS_LFS	0x40000000 /* LAN Function Select */
 
 /* MHADD Bit Masks */
@@ -1590,6 +1649,7 @@
 #define IXGBE_ESDP_SDP7		0x00000080 /* SDP7 Data Value */
 #define IXGBE_ESDP_SDP0_DIR	0x00000100 /* SDP0 IO direction */
 #define IXGBE_ESDP_SDP1_DIR	0x00000200 /* SDP1 IO direction */
+#define IXGBE_ESDP_SDP2_DIR	0x00000400 /* SDP1 IO direction */
 #define IXGBE_ESDP_SDP3_DIR	0x00000800 /* SDP3 IO direction */
 #define IXGBE_ESDP_SDP4_DIR	0x00001000 /* SDP4 IO direction */
 #define IXGBE_ESDP_SDP5_DIR	0x00002000 /* SDP5 IO direction */
@@ -1668,6 +1728,8 @@
 #define IXGBE_AUTOC2_10G_KR	(0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC2_10G_XFI	(0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
 #define IXGBE_AUTOC2_10G_SFI	(0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK	0x50000000
+#define IXGBE_AUTOC2_LINK_DISABLE_MASK		0x70000000
 
 #define IXGBE_MACC_FLU		0x00000001
 #define IXGBE_MACC_FSV_10G	0x00030000
@@ -1838,8 +1900,10 @@
 #define IXGBE_ETH_LENGTH_OF_ADDRESS	6
 
 #define IXGBE_EEPROM_PAGE_SIZE_MAX	128
-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT	512 /* words rd in burst */
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT	256 /* words rd in burst */
 #define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT	256 /* words wr in burst */
+#define IXGBE_EEPROM_CTRL_2		1 /* EEPROM CTRL word 2 */
+#define IXGBE_EEPROM_CCD_BIT		2
 
 #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
 #define IXGBE_EEPROM_GRANT_ATTEMPTS	1000 /* EEPROM attempts to gain grant */
@@ -1880,6 +1944,18 @@
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC	0x0 /* Alt SAN MAC exists */
 #define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN	0x1 /* Alt WWN base exists */
 
+/* FW header offset */
+#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR	0x4
+#define IXGBE_X540_FW_MODULE_MASK			0x7FFF
+/* 4KB multiplier */
+#define IXGBE_X540_FW_MODULE_LENGTH			0x1000
+/* version word 2 (month & day) */
+#define IXGBE_X540_FW_PATCH_VERSION_2		0x5
+/* version word 3 (silicon compatibility & year) */
+#define IXGBE_X540_FW_PATCH_VERSION_3		0x6
+/* version word 4 (major & minor numbers) */
+#define IXGBE_X540_FW_PATCH_VERSION_4		0x7
+
 #define IXGBE_DEVICE_CAPS_WOL_PORT0_1	0x4 /* WoL supported on ports 0 & 1 */
 #define IXGBE_DEVICE_CAPS_WOL_PORT0	0x8 /* WoL supported on port 0 */
 #define IXGBE_DEVICE_CAPS_WOL_MASK	0xC /* Mask for WoL capabilities */
@@ -1902,6 +1978,17 @@
 #define IXGBE_PCI_HEADER_TYPE_MULTIFUNC	0x80
 #define IXGBE_PCI_DEVICE_CONTROL2_16ms	0x0005
 
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK	0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def	0x0
+#define IXGBE_PCIDEVCTRL2_50_100us	0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms		0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms	0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms	0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms	0x9
+#define IXGBE_PCIDEVCTRL2_1_2s		0xa
+#define IXGBE_PCIDEVCTRL2_4_8s		0xd
+#define IXGBE_PCIDEVCTRL2_17_34s	0xe
+
 /* Number of 100 microseconds we wait for PCI Express master disable */
 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT	800
 
@@ -2331,6 +2418,14 @@
 #define IXGBE_FDIR_DROP_QUEUE			127
 
 #define IXGBE_STATUS_OVERHEATING_BIT		20 /* STATUS overtemp bit num */
+/* iTS sensor related defines*/
+#define IXGBE_TEMP_STATUS_ADDR_X540		0xC830
+#define IXGBE_TEMP_VALUE_ADDR_X540		0xC820
+#define IXGBE_TEMP_PROV_2_ADDR_X540		0xC421
+#define IXGBE_TEMP_PROV_4_ADDR_X540		0xC423
+#define IXGBE_TEMP_STATUS_PAGE_X540		0x1E
+#define IXGBE_TEMP_HIGH_FAILURE_BIT_X540	0xE
+#define IXGBE_TEMP_HIGH_WARNING_BIT_X540	0xC
 
 /* Manageablility Host Interface defines */
 #define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH	1792 /* Num of bytes in range */
@@ -2524,7 +2619,6 @@
 					 IXGBE_LINK_SPEED_1GB_FULL | \
 					 IXGBE_LINK_SPEED_10GB_FULL)
 
-
 /* Physical layer type */
 typedef u32 ixgbe_physical_layer;
 #define IXGBE_PHYSICAL_LAYER_UNKNOWN		0
@@ -2757,6 +2851,7 @@
 enum ixgbe_media_type {
 	ixgbe_media_type_unknown = 0,
 	ixgbe_media_type_fiber,
+	ixgbe_media_type_fiber_fixed,
 	ixgbe_media_type_copper,
 	ixgbe_media_type_backplane,
 	ixgbe_media_type_cx4,
@@ -2975,7 +3070,7 @@
 	void (*disable_tx_laser)(struct ixgbe_hw *);
 	void (*enable_tx_laser)(struct ixgbe_hw *);
 	void (*flap_tx_laser)(struct ixgbe_hw *);
-	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+	s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
 	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
 	s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
 				     bool *);
@@ -3016,6 +3111,10 @@
 
 	/* Manageability interface */
 	s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+	s32 (*dmac_config)(struct ixgbe_hw *hw);
+	s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
+	s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+	void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
 };
 
 struct ixgbe_phy_operations {
@@ -3025,13 +3124,15 @@
 	s32 (*reset)(struct ixgbe_hw *);
 	s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
 	s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+	s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+	s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
 	s32 (*setup_link)(struct ixgbe_hw *);
-	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
-				bool);
+	s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
 	s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
 	s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
 	s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
 	s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+	s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
 	s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
 	s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
 	void (*i2c_bus_clear)(struct ixgbe_hw *);
@@ -3069,7 +3170,9 @@
 	u32 max_tx_queues;
 	u32 max_rx_queues;
 	u32 orig_autoc;
+	u32 cached_autoc;
 	u8  san_mac_rar_index;
+	bool get_link_status;
 	u32 orig_autoc2;
 	u16 max_msix_vectors;
 	bool arc_subsystem_valid;
@@ -3142,8 +3245,11 @@
 	u16 subsystem_vendor_id;
 	u8 revision_id;
 	bool adapter_stopped;
+	int api_version;
 	bool force_full_reset;
 	bool allow_unsupported_sfp;
+	bool mng_fw_enabled;
+	bool wol_enabled;
 };
 
 #define ixgbe_call_func(hw, func, params, error) \
@@ -3185,8 +3291,9 @@
 #define IXGBE_ERR_INVALID_ARGUMENT		-32
 #define IXGBE_ERR_HOST_INTERFACE_COMMAND	-33
 #define IXGBE_ERR_OUT_OF_MEM			-34
+#define IXGBE_ERR_FEATURE_NOT_SUPPORTED		-36
+#define IXGBE_ERR_EEPROM_PROTECTED_REGION	-37
 
 #define IXGBE_NOT_IMPLEMENTED			0x7FFFFFFF
 
-
 #endif /* _IXGBE_TYPE_H_ */


Property changes on: trunk/sys/dev/ixgbe/ixgbe_type.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_vf.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_vf.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_vf.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_vf.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 
 #include "ixgbe_api.h"
@@ -142,6 +143,7 @@
 	/* Call adapter stop to disable tx/rx and clear interrupts */
 	hw->mac.ops.stop_adapter(hw);
 
+
 	DEBUGOUT("Issuing a function level reset to MAC\n");
 
 	ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
@@ -156,35 +158,34 @@
 		usec_delay(5);
 	}
 
-	if (timeout) {
-		/* mailbox timeout can now become active */
-		mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+	if (!timeout)
+		return IXGBE_ERR_RESET_FAILED;
 
-		msgbuf[0] = IXGBE_VF_RESET;
-		mbx->ops.write_posted(hw, msgbuf, 1, 0);
+	/* mailbox timeout can now become active */
+	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
 
-		msec_delay(10);
+	msgbuf[0] = IXGBE_VF_RESET;
+	mbx->ops.write_posted(hw, msgbuf, 1, 0);
 
-		/*
-		 * set our "perm_addr" based on info provided by PF
-		 * also set up the mc_filter_type which is piggy backed
-		 * on the mac address in word 3
-		 */
-		ret_val = mbx->ops.read_posted(hw, msgbuf,
-					       IXGBE_VF_PERMADDR_MSG_LEN, 0);
-		if (!ret_val) {
-			if (msgbuf[0] == (IXGBE_VF_RESET |
-					  IXGBE_VT_MSGTYPE_ACK)) {
-				memcpy(hw->mac.perm_addr, addr,
-				       IXGBE_ETH_LENGTH_OF_ADDRESS);
-				hw->mac.mc_filter_type =
-					msgbuf[IXGBE_VF_MC_TYPE_WORD];
-			} else {
-				ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
-			}
-		}
-	}
+	msec_delay(10);
 
+	/*
+	 * set our "perm_addr" based on info provided by PF
+	 * also set up the mc_filter_type which is piggy backed
+	 * on the mac address in word 3
+	 */
+	ret_val = mbx->ops.read_posted(hw, msgbuf,
+			IXGBE_VF_PERMADDR_MSG_LEN, 0);
+	if (ret_val)
+		return ret_val;
+
+	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
+	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+		return IXGBE_ERR_INVALID_MAC_ADDR;
+
+	memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
 	return ret_val;
 }
 
@@ -272,6 +273,17 @@
 	return vector;
 }
 
+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+					u32 *msg, u16 size)
+{
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+	s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+
+	if (!retval)
+		mbx->ops.read_posted(hw, retmsg, size, 0);
+}
+
 /**
  *  ixgbe_set_rar_vf - set device MAC address
  *  @hw: pointer to hardware structure
@@ -463,11 +475,10 @@
  *
  *  Set the link speed in the AUTOC register and restarts link.
  **/
-s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
-			    ixgbe_link_speed speed, bool autoneg,
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
 			    bool autoneg_wait_to_complete)
 {
-	UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
+	UNREFERENCED_3PARAMETER(hw, speed, autoneg_wait_to_complete);
 	return IXGBE_SUCCESS;
 }
 
@@ -483,23 +494,26 @@
 s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 			    bool *link_up, bool autoneg_wait_to_complete)
 {
+	struct ixgbe_mbx_info *mbx = &hw->mbx;
+	struct ixgbe_mac_info *mac = &hw->mac;
+	s32 ret_val = IXGBE_SUCCESS;
 	u32 links_reg;
+	u32 in_msg = 0;
 	UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
 
-	if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
-		*link_up = FALSE;
-		*speed = 0;
-		return -1;
-	}
+	/* If we were hit with a reset drop the link */
+	if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+		mac->get_link_status = TRUE;
 
-	links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
+	if (!mac->get_link_status)
+		goto out;
 
-	if (links_reg & IXGBE_LINKS_UP)
-		*link_up = TRUE;
-	else
-		*link_up = FALSE;
+	/* if link status is down no point in checking to see if pf is up */
+	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+	if (!(links_reg & IXGBE_LINKS_UP))
+		goto out;
 
-	switch (links_reg & IXGBE_LINKS_SPEED_10G_82599) {
+	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
 	case IXGBE_LINKS_SPEED_10G_82599:
 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
 		break;
@@ -511,6 +525,87 @@
 		break;
 	}
 
+	/* if the read failed it could just be a mailbox collision, best wait
+	 * until we are called again and don't report an error
+	 */
+	if (mbx->ops.read(hw, &in_msg, 1, 0))
+		goto out;
+
+	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+		/* msg is not CTS and is NACK we must have lost CTS status */
+		if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+			ret_val = -1;
+		goto out;
+	}
+
+	/* the pf is talking, if we timed out in the past we reinit */
+	if (!mbx->timeout) {
+		ret_val = -1;
+		goto out;
+	}
+
+	/* if we passed all the tests above then the link is up and we no
+	 * longer need to check for link
+	 */
+	mac->get_link_status = FALSE;
+
+out:
+	*link_up = !mac->get_link_status;
+	return ret_val;
+}
+
+/**
+ *  ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+	u32 msgbuf[2];
+
+	msgbuf[0] = IXGBE_VF_SET_LPE;
+	msgbuf[1] = max_size;
+	ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+}
+
+/**
+ *  ixgbevf_negotiate_api_version - Negotiate supported API version
+ *  @hw: pointer to the HW structure
+ *  @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+	int err;
+	u32 msg[3];
+
+	/* Negotiate the mailbox API version */
+	msg[0] = IXGBE_VF_API_NEGOTIATE;
+	msg[1] = api;
+	msg[2] = 0;
+	err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
+
+	if (!err)
+		err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+
+	if (!err) {
+		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+		/* Store value and return 0 on success */
+		if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+			hw->api_version = api;
+			return 0;
+		}
+
+		err = IXGBE_ERR_INVALID_ARGUMENT;
+	}
+
+	return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc)
+{
+	UNREFERENCED_3PARAMETER(hw, num_tcs, default_tc);
 	return IXGBE_SUCCESS;
 }
 


Property changes on: trunk/sys/dev/ixgbe/ixgbe_vf.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_vf.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_vf.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_vf.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_vf.h 248287 2013-03-14 21:39:39Z jfv $*/
 
 #ifndef __IXGBE_VF_H__
 #define __IXGBE_VF_H__
@@ -39,6 +40,9 @@
 #define IXGBE_VF_MAX_TX_QUEUES	8
 #define IXGBE_VF_MAX_RX_QUEUES	8
 
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS	8
+
 #define IXGBE_VFCTRL		0x00000
 #define IXGBE_VFSTATUS		0x00008
 #define IXGBE_VFLINKS		0x00010
@@ -117,7 +121,7 @@
 u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
 s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
 s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			    bool autoneg, bool autoneg_wait_to_complete);
+			    bool autoneg_wait_to_complete);
 s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
 			    bool *link_up, bool autoneg_wait_to_complete);
 s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
@@ -127,5 +131,9 @@
 				 u32 mc_addr_count, ixgbe_mc_addr_itr,
 				 bool clear);
 s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+		       unsigned int *default_tc);
 
 #endif /* __IXGBE_VF_H__ */


Property changes on: trunk/sys/dev/ixgbe/ixgbe_vf.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_x540.c
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_x540.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_x540.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_x540.c 252898 2013-07-06 21:38:55Z jfv $*/
 
 #include "ixgbe_x540.h"
 #include "ixgbe_type.h"
@@ -38,7 +39,6 @@
 #include "ixgbe_common.h"
 #include "ixgbe_phy.h"
 
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
 static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
@@ -116,6 +116,7 @@
 	mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
 	mac->ops.check_link = &ixgbe_check_mac_link_generic;
 
+
 	mac->mcft_size		= 128;
 	mac->vft_size		= 128;
 	mac->num_rar_entries	= 128;
@@ -141,6 +142,8 @@
 	/* Manageability interface */
 	mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
 
+	mac->ops.get_rtrup2tc = &ixgbe_dcb_get_rtrup2tc_generic;
+
 	return ret_val;
 }
 
@@ -177,16 +180,14 @@
  *  ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
  *  @hw: pointer to hardware structure
  *  @speed: new link speed
- *  @autoneg: TRUE if autonegotiation enabled
  *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
  **/
 s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
-			      ixgbe_link_speed speed, bool autoneg,
+			      ixgbe_link_speed speed,
 			      bool autoneg_wait_to_complete)
 {
 	DEBUGFUNC("ixgbe_setup_mac_link_X540");
-	return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
-					    autoneg_wait_to_complete);
+	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete);
 }
 
 /**
@@ -227,7 +228,8 @@
 
 	if (ctrl & IXGBE_CTRL_RST_MASK) {
 		status = IXGBE_ERR_RESET_FAILED;
-		DEBUGOUT("Reset polling failed to complete.\n");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Reset polling failed to complete.\n");
 	}
 	msec_delay(100);
 
@@ -373,12 +375,13 @@
 
 	DEBUGFUNC("ixgbe_read_eerd_X540");
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS)
+	    IXGBE_SUCCESS) {
 		status = ixgbe_read_eerd_generic(hw, offset, data);
-	else
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
+	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
 }
 
@@ -398,13 +401,14 @@
 
 	DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS)
+	    IXGBE_SUCCESS) {
 		status = ixgbe_read_eerd_buffer_generic(hw, offset,
 							words, data);
-	else
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
+	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
 }
 
@@ -422,12 +426,13 @@
 
 	DEBUGFUNC("ixgbe_write_eewr_X540");
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS)
+	    IXGBE_SUCCESS) {
 		status = ixgbe_write_eewr_generic(hw, offset, data);
-	else
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
+	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
 }
 
@@ -447,13 +452,14 @@
 
 	DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
 	if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-	    IXGBE_SUCCESS)
+	    IXGBE_SUCCESS) {
 		status = ixgbe_write_eewr_buffer_generic(hw, offset,
 							 words, data);
-	else
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
+	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	return status;
 }
 
@@ -579,17 +585,20 @@
 		 * Verify read checksum from EEPROM is the same as
 		 * calculated checksum
 		 */
-		if (read_checksum != checksum)
+		if (read_checksum != checksum) {
 			status = IXGBE_ERR_EEPROM_CHECKSUM;
+			ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+				     "Invalid EEPROM checksum");
+		}
 
 		/* If the user cares, return the calculated checksum */
 		if (checksum_val)
 			*checksum_val = checksum;
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
 	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
 out:
 	return status;
 }
@@ -630,14 +639,13 @@
 		status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
 						  checksum);
 
-	if (status == IXGBE_SUCCESS)
-		status = ixgbe_update_flash_X540(hw);
-	else
+		if (status == IXGBE_SUCCESS)
+			status = ixgbe_update_flash_X540(hw);
+		hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+	} else {
 		status = IXGBE_ERR_SWFW_SYNC;
 	}
 
-	hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
 	return status;
 }
 
@@ -648,7 +656,7 @@
  *  Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
  *  EEPROM from shadow RAM to the flash device.
  **/
-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
 {
 	u32 flup;
 	s32 status = IXGBE_ERR_EEPROM;
@@ -670,7 +678,7 @@
 	else
 		DEBUGOUT("Flash update time out\n");
 
-	if (hw->revision_id == 0) {
+	if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) {
 		flup = IXGBE_READ_REG(hw, IXGBE_EEC);
 
 		if (flup & IXGBE_EEC_SEC1VAL) {
@@ -711,6 +719,11 @@
 		}
 		usec_delay(5);
 	}
+
+	if (i == IXGBE_FLUDONE_ATTEMPTS)
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Flash update status polling timed out");
+
 	return status;
 }
 
@@ -756,7 +769,6 @@
 			swfw_sync |= swmask;
 			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
 			ixgbe_release_swfw_sync_semaphore(hw);
-			msec_delay(5);
 			goto out;
 		} else {
 			/*
@@ -772,11 +784,13 @@
 	/* Failed to get SW only semaphore */
 	if (swmask == IXGBE_GSSR_SW_MNG_SM) {
 		ret_val = IXGBE_ERR_SWFW_SYNC;
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Failed to get SW only semaphore");
 		goto out;
 	}
 
 	/* If the resource is not released by the FW/HW the SW can assume that
-	 * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
+	 * the FW/HW malfunctions. In that case the SW should set the SW bit(s)
 	 * of the requested resource(s) while ignoring the corresponding FW/HW
 	 * bits in the SW_FW_SYNC register.
 	 */
@@ -792,6 +806,17 @@
 		ixgbe_release_swfw_sync_semaphore(hw);
 		msec_delay(5);
 	}
+	/* If the resource is not released by other SW the SW can assume that
+	 * the other SW malfunctions. In that case the SW should clear all SW
+	 * flags that it does not own and then repeat the whole process once
+	 * again.
+	 */
+	else if (swfw_sync & swmask) {
+		ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM |
+			IXGBE_GSSR_PHY0_SM | IXGBE_GSSR_PHY1_SM |
+			IXGBE_GSSR_MAC_CSR_SM);
+		ret_val = IXGBE_ERR_SWFW_SYNC;
+	}
 
 out:
 	return ret_val;
@@ -819,7 +844,6 @@
 	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
 
 	ixgbe_release_swfw_sync_semaphore(hw);
-	msec_delay(5);
 }
 
 /**
@@ -866,14 +890,15 @@
 		 * was not granted because we don't have access to the EEPROM
 		 */
 		if (i >= timeout) {
-			DEBUGOUT("REGSMP Software NVM semaphore not "
-				 "granted.\n");
+			ERROR_REPORT1(IXGBE_ERROR_POLLING,
+				"REGSMP Software NVM semaphore not granted.\n");
 			ixgbe_release_swfw_sync_semaphore(hw);
 			status = IXGBE_ERR_EEPROM;
 		}
 	} else {
-		DEBUGOUT("Software semaphore SMBI between device drivers "
-			 "not granted.\n");
+		ERROR_REPORT1(IXGBE_ERROR_POLLING,
+			     "Software semaphore SMBI between device drivers "
+			     "not granted.\n");
 	}
 
 	return status;
@@ -973,3 +998,4 @@
 	return IXGBE_SUCCESS;
 }
 
+


Property changes on: trunk/sys/dev/ixgbe/ixgbe_x540.c
___________________________________________________________________
Deleted: svn:executable
## -1 +0,0 ##
-*
\ No newline at end of property
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixgbe_x540.h
===================================================================
--- trunk/sys/dev/ixgbe/ixgbe_x540.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixgbe_x540.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixgbe_x540.h 252898 2013-07-06 21:38:55Z jfv $*/
 
 #ifndef _IXGBE_X540_H_
 #define _IXGBE_X540_H_
@@ -41,7 +42,7 @@
 				     ixgbe_link_speed *speed, bool *autoneg);
 enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
 s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
-			      bool autoneg, bool link_up_wait_to_complete);
+			      bool link_up_wait_to_complete);
 s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
 u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
@@ -56,6 +57,7 @@
 s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
 u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 
 s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
 void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
@@ -63,3 +65,4 @@
 s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
 #endif /* _IXGBE_X540_H_ */
+


Property changes on: trunk/sys/dev/ixgbe/ixgbe_x540.h
___________________________________________________________________
Deleted: svn:executable
## -1 +0,0 ##
-*
\ No newline at end of property
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixv.c
===================================================================
--- trunk/sys/dev/ixgbe/ixv.c	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixv.c	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,6 +1,7 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
-  Copyright (c) 2001-2012, Intel Corporation 
+  Copyright (c) 2001-2013, Intel Corporation 
   All rights reserved.
   
   Redistribution and use in source and binary forms, with or without 
@@ -30,13 +31,10 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixv.c 254573 2013-08-20 17:50:30Z jfv $*/
 
-#ifdef HAVE_KERNEL_OPTION_HEADERS
 #include "opt_inet.h"
 #include "opt_inet6.h"
-#endif
-
 #include "ixv.h"
 
 /*********************************************************************
@@ -169,7 +167,7 @@
 	DEVMETHOD(device_attach, ixv_attach),
 	DEVMETHOD(device_detach, ixv_detach),
 	DEVMETHOD(device_shutdown, ixv_shutdown),
-	{0, 0}
+	DEVMETHOD_END
 };
 
 static driver_t ixv_driver = {
@@ -299,11 +297,6 @@
 
 	INIT_DEBUGOUT("ixv_attach: begin");
 
-	if (resource_disabled("ixgbe", device_get_unit(dev))) {
-		device_printf(dev, "Disabled by device hint\n");
-		return (ENXIO);
-	}
-
 	/* Allocate, clear, and link in our adapter structure */
 	adapter = device_get_softc(dev);
 	adapter->dev = adapter->osdep.dev = dev;
@@ -624,24 +617,27 @@
 		ixv_txeof(txr);
 
 	enqueued = 0;
-	if (m == NULL) {
-		next = drbr_dequeue(ifp, txr->br);
-	} else if (drbr_needs_enqueue(ifp, txr->br)) {
-		if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+	if (m != NULL) {
+		err = drbr_enqueue(ifp, txr->br, m);
+		if (err) {
 			return (err);
-		next = drbr_dequeue(ifp, txr->br);
-	} else
-		next = m;
-
+		}
+	}
 	/* Process the queue */
-	while (next != NULL) {
+	while ((next = drbr_peek(ifp, txr->br)) != NULL) {
 		if ((err = ixv_xmit(txr, &next)) != 0) {
-			if (next != NULL)
-				err = drbr_enqueue(ifp, txr->br, next);
+			if (next == NULL) {
+				drbr_advance(ifp, txr->br);
+			} else {
+				drbr_putback(ifp, txr->br, next);
+			}
 			break;
 		}
+		drbr_advance(ifp, txr->br);
 		enqueued++;
-		drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+		ifp->if_obytes += next->m_pkthdr.len;
+		if (next->m_flags & M_MCAST)
+			ifp->if_omcasts++;
 		/* Send a copy of the frame to the BPF listener */
 		ETHER_BPF_MTAP(ifp, next);
 		if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
@@ -650,7 +646,6 @@
 			ifp->if_drv_flags |= IFF_DRV_OACTIVE;
 			break;
 		}
-		next = drbr_dequeue(ifp, txr->br);
 	}
 
 	if (enqueued > 0) {
@@ -1238,7 +1233,7 @@
 	if (error == EFBIG) {
 		struct mbuf *m;
 
-		m = m_defrag(*m_headp, M_DONTWAIT);
+		m = m_defrag(*m_headp, M_NOWAIT);
 		if (m == NULL) {
 			adapter->mbuf_defrag_failed++;
 			m_freem(*m_headp);
@@ -1567,14 +1562,8 @@
 	** Make sure BUSMASTER is set, on a VM under
 	** KVM it may not be and will break things.
 	*/
+	pci_enable_busmaster(dev);
 	pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
-	if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
-	    (pci_cmd_word & PCIM_CMD_MEMEN))) {
-		INIT_DEBUGOUT("Memory Access and/or Bus Master "
-		    "bits were not set!\n");
-		pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
-		pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
-	}
 
 	/* Save off the information about this board */
 	adapter->hw.vendor_id = pci_get_vendor(dev);
@@ -1692,7 +1681,7 @@
 ixv_setup_msix(struct adapter *adapter)
 {
 	device_t dev = adapter->dev;
-	int rid, vectors, want = 2;
+	int rid, want;
 
 
 	/* First try MSI/X */
@@ -1699,30 +1688,30 @@
 	rid = PCIR_BAR(3);
 	adapter->msix_mem = bus_alloc_resource_any(dev,
 	    SYS_RES_MEMORY, &rid, RF_ACTIVE);
-       	if (!adapter->msix_mem) {
+       	if (adapter->msix_mem == NULL) {
 		device_printf(adapter->dev,
 		    "Unable to map MSIX table \n");
 		goto out;
 	}
 
-	vectors = pci_msix_count(dev); 
-	if (vectors < 2) {
-		bus_release_resource(dev, SYS_RES_MEMORY,
-		    rid, adapter->msix_mem);
-		adapter->msix_mem = NULL;
-		goto out;
-	}
-
 	/*
 	** Want two vectors: one for a queue,
 	** plus an additional for mailbox.
 	*/
-	if (pci_alloc_msix(dev, &want) == 0) {
+	want = 2;
+	if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
                	device_printf(adapter->dev,
 		    "Using MSIX interrupts with %d vectors\n", want);
 		return (want);
 	}
+	/* Release in case alloc was insufficient */
+	pci_release_msi(dev);
 out:
+       	if (adapter->msix_mem != NULL) {
+		bus_release_resource(dev, SYS_RES_MEMORY,
+		    rid, adapter->msix_mem);
+		adapter->msix_mem = NULL;
+	}
 	device_printf(adapter->dev,"MSIX config error\n");
 	return (ENXIO);
 }
@@ -1893,7 +1882,6 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32	autoneg, err = 0;
-	bool	negotiate = TRUE;
 
 	if (hw->mac.ops.check_link)
 		err = hw->mac.ops.check_link(hw, &autoneg,
@@ -1902,8 +1890,8 @@
 		goto out;
 
 	if (hw->mac.ops.setup_link)
-               	err = hw->mac.ops.setup_link(hw, autoneg,
-		    negotiate, adapter->link_up);
+               	err = hw->mac.ops.setup_link(hw,
+		    autoneg, adapter->link_up);
 out:
 	return;
 }
@@ -2726,7 +2714,7 @@
 	while (j != limit) {
 		rxbuf = &rxr->rx_buffers[i];
 		if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
-			mh = m_gethdr(M_DONTWAIT, MT_DATA);
+			mh = m_gethdr(M_NOWAIT, MT_DATA);
 			if (mh == NULL)
 				goto update;
 			mh->m_pkthdr.len = mh->m_len = MHLEN;
@@ -2750,7 +2738,7 @@
 		}
 
 		if (rxbuf->m_pack == NULL) {
-			mp = m_getjcl(M_DONTWAIT, MT_DATA,
+			mp = m_getjcl(M_NOWAIT, MT_DATA,
 			    M_PKTHDR, adapter->rx_mbuf_sz);
 			if (mp == NULL)
 				goto update;


Property changes on: trunk/sys/dev/ixgbe/ixv.c
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property
Modified: trunk/sys/dev/ixgbe/ixv.h
===================================================================
--- trunk/sys/dev/ixgbe/ixv.h	2013-12-28 14:28:53 UTC (rev 6558)
+++ trunk/sys/dev/ixgbe/ixv.h	2013-12-28 14:30:04 UTC (rev 6559)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
 /******************************************************************************
 
   Copyright (c) 2001-2012, Intel Corporation 
@@ -30,7 +31,7 @@
   POSSIBILITY OF SUCH DAMAGE.
 
 ******************************************************************************/
-/*$MidnightBSD$*/
+/*$FreeBSD: release/9.2.0/sys/dev/ixgbe/ixv.h 235528 2012-05-17 00:06:54Z jfv $*/
 
 
 #ifndef _IXV_H_


Property changes on: trunk/sys/dev/ixgbe/ixv.h
___________________________________________________________________
Deleted: cvs2svn:cvs-rev
## -1 +0,0 ##
-1.2
\ No newline at end of property


More information about the Midnightbsd-cvs mailing list