[Midnightbsd-cvs] src [12245] trunk/sys/dev/mlx5: update
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Thu Aug 8 22:49:12 EDT 2019
Revision: 12245
http://svnweb.midnightbsd.org/src/?rev=12245
Author: laffer1
Date: 2019-08-08 22:49:11 -0400 (Thu, 08 Aug 2019)
Log Message:
-----------
update
Modified Paths:
--------------
trunk/sys/dev/mlx5/device.h
trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h
trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c
trunk/sys/dev/mlx5/mlx5_en/en.h
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
trunk/sys/dev/mlx5/qp.h
trunk/sys/dev/mlx5/vport.h
Modified: trunk/sys/dev/mlx5/device.h
===================================================================
--- trunk/sys/dev/mlx5/device.h 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/device.h 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/device.h 322151 2017-08-07 12:49:30Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/device.h 339713 2018-10-25 14:12:48Z slavash $
*/
#ifndef MLX5_DEVICE_H
@@ -476,6 +476,7 @@
MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
+ MLX5_MODULE_EVENT_ERROR_PCIE_SYSTEM_POWER_SLOT_EXCEEDED = 0xc,
};
struct mlx5_eqe_port_module_event {
@@ -1312,6 +1313,13 @@
MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
};
+enum mlx5_inline_modes {
+ MLX5_INLINE_MODE_NONE,
+ MLX5_INLINE_MODE_L2,
+ MLX5_INLINE_MODE_IP,
+ MLX5_INLINE_MODE_TCP_UDP,
+};
+
enum {
MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
};
Modified: trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_core.h 306244 2016-09-23 08:28:44Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_core.h 337748 2018-08-14 11:52:05Z hselasky $
*/
#ifndef __MLX5_CORE_H__
@@ -34,8 +34,8 @@
#include <linux/sched.h>
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.23.0 (03 Mar 2015)"
-#define DRIVER_RELDATE "03 Mar 2015"
+#define DRIVER_VERSION "3.2.1"
+#define DRIVER_RELDATE "August 2018"
extern int mlx5_core_debug_mask;
Modified: trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eq.c 306244 2016-09-23 08:28:44Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eq.c 339713 2018-10-25 14:12:48Z slavash $
*/
#include <linux/interrupt.h>
@@ -619,6 +619,12 @@
return "High Temperature";
case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
return "Cable is shorted";
+ case MLX5_MODULE_EVENT_ERROR_PCIE_SYSTEM_POWER_SLOT_EXCEEDED:
+ return "One or more network ports have been powered "
+ "down due to insufficient/unadvertised power on "
+ "the PCIe slot. Please refer to the card's user "
+ "manual for power specifications or contact "
+ "Mellanox support.";
default:
return "Unknown error type";
Modified: trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_health.c 322149 2017-08-07 12:45:26Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_health.c 337743 2018-08-14 11:24:14Z hselasky $
*/
#include <linux/kernel.h>
@@ -57,10 +57,13 @@
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
- /* nothing yet */
+
spin_lock_irq(&health_lock);
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
+
+ /* enter error state */
+ mlx5_enter_error_state(dev);
}
}
Modified: trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_vport.c 306244 2016-09-23 08:28:44Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_vport.c 337742 2018-08-14 11:19:04Z hselasky $
*/
#include <linux/etherdevice.h>
@@ -229,6 +229,58 @@
return err;
}
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *min_inline)
+{
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+ int err;
+
+ err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+ if (!err)
+ *min_inline = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_L2:
+ *min_inline_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
+ break;
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ *min_inline_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
+
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_ctx;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_wqe_inline_mode, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ min_wqe_inline_mode, min_inline);
+
+ return mlx5_modify_nic_vport_context(mdev, in, inlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
Modified: trunk/sys/dev/mlx5/mlx5_en/en.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/en.h 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_en/en.h 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/en.h 329300 2018-02-15 08:48:04Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/en.h 338553 2018-09-10 08:10:52Z hselasky $
*/
#ifndef _MLX5_EN_H_
@@ -84,8 +84,19 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xe
-/* freeBSD HW LRO is limited by 16KB - the size of max mbuf */
+#define MLX5E_MAX_RX_SEGS 7
+
+#ifndef MLX5E_MAX_RX_BYTES
+#define MLX5E_MAX_RX_BYTES MCLBYTES
+#endif
+
+#if (MLX5E_MAX_RX_SEGS == 1)
+/* FreeBSD HW LRO is limited by 16KB - the size of max mbuf */
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ MJUM16BYTES
+#else
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ \
+ MIN(65535, MLX5E_MAX_RX_SEGS * MLX5E_MAX_RX_BYTES)
+#endif
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
@@ -115,7 +126,8 @@
#define MLX5E_MAX_TX_MBUF_SIZE 65536 /* bytes */
#define MLX5E_MAX_TX_MBUF_FRAGS \
((MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - \
- (MLX5E_MAX_TX_HEADER / MLX5_SEND_WQE_DS)) /* units */
+ (MLX5E_MAX_TX_HEADER / MLX5_SEND_WQE_DS) - \
+ 1 /* the maximum value of the DS counter is 0x3F and not 0x40 */) /* units */
#define MLX5E_MAX_TX_INLINE \
(MLX5E_MAX_TX_HEADER - sizeof(struct mlx5e_tx_wqe) + \
sizeof(((struct mlx5e_tx_wqe *)0)->eth.inline_hdr_start)) /* bytes */
@@ -394,6 +406,9 @@
u16 rx_hash_log_tbl_sz;
u32 tx_pauseframe_control;
u32 rx_pauseframe_control;
+ u16 tx_max_inline;
+ u8 tx_min_inline_mode;
+ u8 channels_rsss;
};
#define MLX5E_PARAMS(m) \
@@ -402,6 +417,7 @@
m(+1, u64 tx_queue_size, "tx_queue_size", "Default send queue size") \
m(+1, u64 rx_queue_size, "rx_queue_size", "Default receive queue size") \
m(+1, u64 channels, "channels", "Default number of channels") \
+ m(+1, u64 channels_rsss, "channels_rsss", "Default channels receive side scaling stride") \
m(+1, u64 coalesce_usecs_max, "coalesce_usecs_max", "Maximum usecs for joining packets") \
m(+1, u64 coalesce_pkts_max, "coalesce_pkts_max", "Maximum packets to join") \
m(+1, u64 rx_coalesce_usecs, "rx_coalesce_usecs", "Limit in usec for joining rx packets") \
@@ -416,7 +432,8 @@
m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
m(+1, u64 cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled") \
m(+1, u64 diag_pci_enable, "diag_pci_enable", "0: Disabled 1: Enabled") \
- m(+1, u64 diag_general_enable, "diag_general_enable", "0: Disabled 1: Enabled")
+ m(+1, u64 diag_general_enable, "diag_general_enable", "0: Disabled 1: Enabled") \
+ m(+1, u64 hw_mtu, "hw_mtu", "Current hardware MTU value")
#define MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))
@@ -477,6 +494,7 @@
struct mtx mtx;
bus_dma_tag_t dma_tag;
u32 wqe_sz;
+ u32 nsegs;
struct mlx5e_rq_mbuf *mbuf;
struct ifnet *ifp;
struct mlx5e_rq_stats stats;
@@ -549,6 +567,9 @@
u32 sqn;
u32 bf_buf_size;
u32 mkey_be;
+ u16 max_inline;
+ u8 min_inline_mode;
+ u8 vlan_inline_cap;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -710,9 +731,12 @@
struct mlx5e_rx_wqe {
struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data;
+ struct mlx5_wqe_data_seg data[];
};
+/* the size of the structure above must be power of two */
+CTASSERT(powerof2(sizeof(struct mlx5e_rx_wqe)));
+
struct mlx5e_eeprom {
int lock_bit;
int i2c_addr;
@@ -837,5 +861,6 @@
int mlx5e_modify_sq(struct mlx5e_sq *, int curr_state, int next_state);
void mlx5e_disable_sq(struct mlx5e_sq *);
void mlx5e_drain_sq(struct mlx5e_sq *);
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
#endif /* _MLX5_EN_H_ */
Modified: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 322007 2017-08-03 14:14:13Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 341984 2018-12-12 13:14:41Z hselasky $
*/
#include "en.h"
@@ -247,6 +247,24 @@
mlx5e_open_locked(priv->ifp);
break;
+ case MLX5_PARAM_OFFSET(channels_rsss):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import number of channels */
+ if (priv->params_ethtool.channels_rsss < 1)
+ priv->params_ethtool.channels_rsss = 1;
+ else if (priv->params_ethtool.channels_rsss > 128)
+ priv->params_ethtool.channels_rsss = 128;
+
+ priv->params.channels_rsss = priv->params_ethtool.channels_rsss;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
case MLX5_PARAM_OFFSET(channels):
/* network interface must be down */
if (was_opened)
@@ -313,21 +331,24 @@
mlx5e_close_locked(priv->ifp);
/* import HW LRO mode */
- if (priv->params_ethtool.hw_lro != 0) {
- if ((priv->ifp->if_capenable & IFCAP_LRO) &&
- MLX5_CAP_ETH(priv->mdev, lro_cap)) {
- priv->params.hw_lro_en = 1;
- priv->params_ethtool.hw_lro = 1;
+ if (priv->params_ethtool.hw_lro != 0 &&
+ MLX5_CAP_ETH(priv->mdev, lro_cap)) {
+ priv->params_ethtool.hw_lro = 1;
+ /* check if feature should actually be enabled */
+ if (priv->ifp->if_capenable & IFCAP_LRO) {
+ priv->params.hw_lro_en = true;
} else {
- priv->params.hw_lro_en = 0;
- priv->params_ethtool.hw_lro = 0;
- error = EINVAL;
+ priv->params.hw_lro_en = false;
- if_printf(priv->ifp, "Can't enable HW LRO: "
- "The HW or SW LRO feature is disabled\n");
+ if_printf(priv->ifp, "To enable HW LRO "
+ "please also enable LRO via ifconfig(8).\n");
}
} else {
- priv->params.hw_lro_en = 0;
+ /* return an error if HW does not support this feature */
+ if (priv->params_ethtool.hw_lro != 0)
+ error = EINVAL;
+ priv->params.hw_lro_en = false;
+ priv->params_ethtool.hw_lro = 0;
}
/* restart network interface, if any */
if (was_opened)
@@ -695,6 +716,7 @@
priv->params_ethtool.tx_queue_size = 1 << priv->params.log_sq_size;
priv->params_ethtool.rx_queue_size = 1 << priv->params.log_rq_size;
priv->params_ethtool.channels = priv->params.num_channels;
+ priv->params_ethtool.channels_rsss = priv->params.channels_rsss;
priv->params_ethtool.coalesce_pkts_max = MLX5E_FLD_MAX(cqc, cq_max_count);
priv->params_ethtool.coalesce_usecs_max = MLX5E_FLD_MAX(cqc, cq_period);
priv->params_ethtool.rx_coalesce_mode = priv->params.rx_cq_moderation_mode;
@@ -715,7 +737,8 @@
return;
for (x = 0; x != MLX5E_PARAMS_NUM; x++) {
/* check for read-only parameter */
- if (strstr(mlx5e_params_desc[2 * x], "_max") != NULL) {
+ if (strstr(mlx5e_params_desc[2 * x], "_max") != NULL ||
+ strstr(mlx5e_params_desc[2 * x], "_mtu") != NULL) {
SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RD |
CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
Modified: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 324523 2017-10-11 10:04:17Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 348832 2019-06-09 08:22:38Z hselasky $
*/
#include "en.h"
@@ -31,10 +31,12 @@
#include <sys/sockio.h>
#include <machine/atomic.h>
-#define ETH_DRIVER_VERSION "3.1.0-dev"
+#define ETH_DRIVER_VERSION "3.2.1"
char mlx5e_version[] = "Mellanox Ethernet driver"
" (" ETH_DRIVER_VERSION ")";
+static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
+
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
@@ -655,7 +657,12 @@
int wq_sz;
int err;
int i;
+ u32 nsegs, wqe_sz;
+ err = mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
+ if (err != 0)
+ goto done;
+
/* Create DMA descriptor TAG */
if ((err = -bus_dma_tag_create(
bus_get_dma_tag(mdev->pdev->dev.bsddev),
@@ -664,9 +671,9 @@
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MJUM16BYTES, /* maxsize */
- 1, /* nsegments */
- MJUM16BYTES, /* maxsegsize */
+ nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */
+ nsegs, /* nsegments */
+ nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&rq->dma_tag)))
@@ -679,29 +686,19 @@
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
- if (priv->params.hw_lro_en) {
- rq->wqe_sz = priv->params.lro_wqe_sz;
- } else {
- rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
- }
- if (rq->wqe_sz > MJUM16BYTES) {
- err = -ENOMEM;
+ err = mlx5e_get_wqe_sz(priv, &rq->wqe_sz, &rq->nsegs);
+ if (err != 0)
goto err_rq_wq_destroy;
- } else if (rq->wqe_sz > MJUM9BYTES) {
- rq->wqe_sz = MJUM16BYTES;
- } else if (rq->wqe_sz > MJUMPAGESIZE) {
- rq->wqe_sz = MJUM9BYTES;
- } else if (rq->wqe_sz > MCLBYTES) {
- rq->wqe_sz = MJUMPAGESIZE;
- } else {
- rq->wqe_sz = MCLBYTES;
- }
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
for (i = 0; i != wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+#if (MLX5E_MAX_RX_SEGS == 1)
uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+#else
+ int j;
+#endif
err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
if (err != 0) {
@@ -709,8 +706,15 @@
bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
goto err_rq_mbuf_free;
}
- wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+
+ /* set value for constant fields */
+#if (MLX5E_MAX_RX_SEGS == 1)
+ wqe->data[0].lkey = c->mkey_be;
+ wqe->data[0].byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+#else
+ for (j = 0; j < rq->nsegs; j++)
+ wqe->data[j].lkey = c->mkey_be;
+#endif
}
rq->ifp = c->ifp;
@@ -769,6 +773,7 @@
}
free(rq->mbuf, M_MLX5EN);
mlx5_wq_destroy(&rq->wq_ctrl);
+ bus_dma_tag_destroy(rq->dma_tag);
}
static int
@@ -918,8 +923,11 @@
static void
mlx5e_close_rq_wait(struct mlx5e_rq *rq)
{
+ struct mlx5_core_dev *mdev = rq->channel->priv->mdev;
+
/* wait till RQ is empty */
- while (!mlx5_wq_ll_is_empty(&rq->wq)) {
+ while (!mlx5_wq_ll_is_empty(&rq->wq) &&
+ (mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
msleep(4);
rq->cq.mcq.comp(&rq->cq.mcq);
}
@@ -1019,6 +1027,9 @@
sq->ifp = priv->ifp;
sq->priv = priv;
sq->tc = tc;
+ sq->max_inline = priv->params.tx_max_inline;
+ sq->min_inline_mode = priv->params.tx_min_inline_mode;
+ sq->vlan_inline_cap = MLX5_CAP_ETH(mdev, wqe_vlan_insert);
/* check if we should allocate a second packet buffer */
if (priv->params_ethtool.tx_bufring_disable == 0) {
@@ -1089,6 +1100,7 @@
}
if (sq->br != NULL)
buf_ring_free(sq->br, M_MLX5EN);
+ bus_dma_tag_destroy(sq->dma_tag);
}
int
@@ -1258,6 +1270,7 @@
mlx5e_drain_sq(struct mlx5e_sq *sq)
{
int error;
+ struct mlx5_core_dev *mdev = sq->priv->mdev;
/*
* Check if already stopped.
@@ -1290,7 +1303,8 @@
/* wait till SQ is empty or link is down */
mtx_lock(&sq->lock);
while (sq->cc != sq->pc &&
- (sq->priv->media_status_last & IFM_ACTIVE) != 0) {
+ (sq->priv->media_status_last & IFM_ACTIVE) != 0 &&
+ mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mtx_unlock(&sq->lock);
msleep(1);
sq->cq.mcq.comp(&sq->cq.mcq);
@@ -1307,7 +1321,8 @@
/* wait till SQ is empty */
mtx_lock(&sq->lock);
- while (sq->cc != sq->pc) {
+ while (sq->cc != sq->pc &&
+ mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
mtx_unlock(&sq->lock);
msleep(1);
sq->cq.mcq.comp(&sq->cq.mcq);
@@ -1656,6 +1671,38 @@
free(c, M_MLX5EN);
}
+static int
+mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
+{
+ u32 r, n;
+
+ r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
+ MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
+ if (r > MJUM16BYTES)
+ return (-ENOMEM);
+
+ if (r > MJUM9BYTES)
+ r = MJUM16BYTES;
+ else if (r > MJUMPAGESIZE)
+ r = MJUM9BYTES;
+ else if (r > MCLBYTES)
+ r = MJUMPAGESIZE;
+ else
+ r = MCLBYTES;
+
+ /*
+ * n + 1 must be a power of two, because stride size must be.
+ * Stride size is 16 * (n + 1), as the first segment is
+ * control.
+ */
+ for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
+ ;
+
+ *wqe_sz = r;
+ *nsegs = n;
+ return (0);
+}
+
static void
mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param)
@@ -1662,10 +1709,13 @@
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 wqe_sz, nsegs;
+ mlx5e_get_wqe_sz(priv, &wqe_sz, &nsegs);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
- MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe) +
+ nsegs * sizeof(struct mlx5_wqe_data_seg)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
@@ -2008,14 +2058,16 @@
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
for (i = 0; i < sz; i++) {
- int ix;
+ int ix = i;
#ifdef RSS
- ix = rss_get_indirection_to_bucket(i);
-#else
- ix = i;
+ ix = rss_get_indirection_to_bucket(ix);
#endif
/* ensure we don't overflow */
ix %= priv->params.num_channels;
+
+ /* apply receive side scaling stride, if any */
+ ix -= ix % (int)priv->params.channels_rsss;
+
MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
}
@@ -2300,16 +2352,33 @@
int hw_mtu;
int err;
- err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(sw_mtu));
+ hw_mtu = MLX5E_SW2HW_MTU(sw_mtu);
+
+ err = mlx5_set_port_mtu(mdev, hw_mtu);
if (err) {
if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
__func__, sw_mtu, err);
return (err);
}
- err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
+
+ /* Update vport context MTU */
+ err = mlx5_set_vport_mtu(mdev, hw_mtu);
if (err) {
+ if_printf(ifp, "%s: Failed updating vport context with MTU size, err=%d\n",
+ __func__, err);
+ }
+
+ ifp->if_mtu = sw_mtu;
+
+ err = mlx5_query_vport_mtu(mdev, &hw_mtu);
+ if (err || !hw_mtu) {
+ /* fallback to port oper mtu */
+ err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
+ }
+ if (err) {
if_printf(ifp, "Query port MTU, after setting new "
"MTU value, failed\n");
+ return (err);
} else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
err = -E2BIG,
if_printf(ifp, "Port MTU %d is smaller than "
@@ -2319,7 +2388,8 @@
if_printf(ifp, "Port MTU %d is bigger than "
"ifp mtu %d\n", hw_mtu, sw_mtu);
}
- ifp->if_mtu = sw_mtu;
+ priv->params_ethtool.hw_mtu = hw_mtu;
+
return (err);
}
@@ -2684,13 +2754,19 @@
bool need_restart = false;
ifp->if_capenable ^= IFCAP_LRO;
+
+ /* figure out if updating HW LRO is needed */
if (!(ifp->if_capenable & IFCAP_LRO)) {
if (priv->params.hw_lro_en) {
priv->params.hw_lro_en = false;
need_restart = true;
- /* Not sure this is the correct way */
- priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
}
+ } else {
+ if (priv->params.hw_lro_en == false &&
+ priv->params_ethtool.hw_lro != 0) {
+ priv->params.hw_lro_en = true;
+ need_restart = true;
+ }
}
if (was_opened && need_restart) {
mlx5e_close_locked(ifp);
@@ -2802,6 +2878,16 @@
return (0);
}
+static u16
+mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+ int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ return bf_buf_size -
+ sizeof(struct mlx5e_tx_wqe) +
+ 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
static void
mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv,
@@ -2837,6 +2923,8 @@
priv->params.num_tc = 1;
priv->params.default_vlan_prio = 0;
priv->counter_set_id = -1;
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
/*
* hw lro is currently defaulted to off. when it won't anymore we
@@ -2849,6 +2937,7 @@
priv->mdev = mdev;
priv->params.num_channels = num_comp_vectors;
+ priv->params.channels_rsss = 1;
priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
priv->queue_mapping_channel_mask =
roundup_pow_of_two(num_comp_vectors) - 1;
@@ -2935,6 +3024,20 @@
return (error);
}
+u8
+mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
+{
+ u8 min_inline_mode;
+
+ min_inline_mode = MLX5_INLINE_MODE_L2;
+ mlx5_query_min_inline(mdev, &min_inline_mode);
+ if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ min_inline_mode = MLX5_INLINE_MODE_L2;
+
+ return (min_inline_mode);
+}
+
static void
mlx5e_add_hw_stats(struct mlx5e_priv *priv)
{
Modified: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 324523 2017-10-11 10:04:17Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 337741 2018-08-14 11:15:05Z hselasky $
*/
#include "en.h"
@@ -33,21 +33,47 @@
mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
struct mlx5e_rx_wqe *wqe, u16 ix)
{
- bus_dma_segment_t segs[1];
+ bus_dma_segment_t segs[rq->nsegs];
struct mbuf *mb;
int nsegs;
int err;
-
+#if (MLX5E_MAX_RX_SEGS != 1)
+ struct mbuf *mb_head;
+ int i;
+#endif
if (rq->mbuf[ix].mbuf != NULL)
return (0);
+#if (MLX5E_MAX_RX_SEGS == 1)
mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
if (unlikely(!mb))
return (-ENOMEM);
- /* set initial mbuf length */
mb->m_pkthdr.len = mb->m_len = rq->wqe_sz;
+#else
+ mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
+ MLX5E_MAX_RX_BYTES);
+ if (unlikely(mb == NULL))
+ return (-ENOMEM);
+ mb->m_len = MLX5E_MAX_RX_BYTES;
+ mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
+
+ for (i = 1; i < rq->nsegs; i++) {
+ if (mb_head->m_pkthdr.len >= rq->wqe_sz)
+ break;
+ mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
+ MLX5E_MAX_RX_BYTES);
+ if (unlikely(mb == NULL)) {
+ m_freem(mb_head);
+ return (-ENOMEM);
+ }
+ mb->m_len = MLX5E_MAX_RX_BYTES;
+ mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES;
+ }
+ /* rewind to first mbuf in chain */
+ mb = mb_head;
+#endif
/* get IP header aligned */
m_adj(mb, MLX5E_NET_IP_ALIGN);
@@ -55,12 +81,26 @@
mb, segs, &nsegs, BUS_DMA_NOWAIT);
if (err != 0)
goto err_free_mbuf;
- if (unlikely(nsegs != 1)) {
+ if (unlikely(nsegs == 0)) {
bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
err = -ENOMEM;
goto err_free_mbuf;
}
- wqe->data.addr = cpu_to_be64(segs[0].ds_addr);
+#if (MLX5E_MAX_RX_SEGS == 1)
+ wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
+#else
+ wqe->data[0].addr = cpu_to_be64(segs[0].ds_addr);
+ wqe->data[0].byte_count = cpu_to_be32(segs[0].ds_len |
+ MLX5_HW_START_PADDING);
+ for (i = 1; i != nsegs; i++) {
+ wqe->data[i].addr = cpu_to_be64(segs[i].ds_addr);
+ wqe->data[i].byte_count = cpu_to_be32(segs[i].ds_len);
+ }
+ for (; i < rq->nsegs; i++) {
+ wqe->data[i].addr = 0;
+ wqe->data[i].byte_count = 0;
+ }
+#endif
rq->mbuf[ix].mbuf = mb;
rq->mbuf[ix].data = mb->m_data;
@@ -186,6 +226,9 @@
u32 cqe_bcnt)
{
struct ifnet *ifp = rq->ifp;
+#if (MLX5E_MAX_RX_SEGS != 1)
+ struct mbuf *mb_head;
+#endif
int lro_num_seg; /* HW LRO session aggregated packets counter */
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
@@ -195,7 +238,26 @@
rq->stats.lro_bytes += cqe_bcnt;
}
+#if (MLX5E_MAX_RX_SEGS == 1)
mb->m_pkthdr.len = mb->m_len = cqe_bcnt;
+#else
+ mb->m_pkthdr.len = cqe_bcnt;
+ for (mb_head = mb; mb != NULL; mb = mb->m_next) {
+ if (mb->m_len > cqe_bcnt)
+ mb->m_len = cqe_bcnt;
+ cqe_bcnt -= mb->m_len;
+ if (likely(cqe_bcnt == 0)) {
+ if (likely(mb->m_next != NULL)) {
+ /* trim off empty mbufs */
+ m_freem(mb->m_next);
+ mb->m_next = NULL;
+ }
+ break;
+ }
+ }
+ /* rewind to first mbuf in chain */
+ mb = mb_head;
+#endif
/* check if a Toeplitz hash was computed */
if (cqe->rss_hash_type != 0) {
mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
@@ -361,6 +423,10 @@
}
if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
(mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
+#if (MLX5E_MAX_RX_SEGS != 1)
+ /* set maximum mbuf length */
+ mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
+#endif
/* get IP header aligned */
mb->m_data += MLX5E_NET_IP_ALIGN;
Modified: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 324523 2017-10-11 10:04:17Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 338551 2018-09-10 08:06:34Z hselasky $
*/
#include "en.h"
@@ -138,7 +138,43 @@
static inline u16
mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
{
- return (MIN(MLX5E_MAX_TX_INLINE, mb->m_len));
+
+ switch(sq->min_inline_mode) {
+ case MLX5_INLINE_MODE_NONE:
+ /*
+ * When inline mode is NONE, we do not need to copy
+ * headers into WQEs, except when vlan tag framing is
+ * requested. Hardware might offload vlan tagging on
+ * transmit. This is a separate capability, which is
+ * known to be disabled on ConnectX-5 due to a hardware
+ * bug RM 931383. If vlan_inline_cap is not present and
+ * the packet has vlan tag, fall back to inlining.
+ */
+ if ((mb->m_flags & M_VLANTAG) != 0 &&
+ sq->vlan_inline_cap == 0)
+ break;
+ return (0);
+ case MLX5_INLINE_MODE_L2:
+ /*
+ * Due to hardware limitations, when trust mode is
+ * DSCP, the hardware may request MLX5_INLINE_MODE_L2
+ * while it really needs all L2 headers and the 4 first
+ * bytes of the IP header (which include the
+ * TOS/traffic-class).
+ *
+ * To avoid doing a firmware command for querying the
+ * trust state and parsing the mbuf for doing
+ * unnecessary checks (VLAN/eth_type) in the fast path,
+ * we are going for the worth case (22 Bytes) if
+ * the mb->m_pkthdr.len allows it.
+ */
+ if (mb->m_pkthdr.len > ETHER_HDR_LEN +
+ ETHER_VLAN_ENCAP_LEN + 4)
+ return (MIN(sq->max_inline, ETHER_HDR_LEN +
+ ETHER_VLAN_ENCAP_LEN + 4));
+ break;
+ }
+ return (MIN(sq->max_inline, mb->m_pkthdr.len));
}
static int
@@ -276,37 +312,47 @@
sq->mbuf[pi].num_bytes = max_t (unsigned int,
mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
}
- if (mb->m_flags & M_VLANTAG) {
- struct ether_vlan_header *eh =
- (struct ether_vlan_header *)wqe->eth.inline_hdr_start;
+ if (ihs == 0) {
+ if ((mb->m_flags & M_VLANTAG) != 0) {
+ wqe->eth.vlan_cmd = htons(0x8000); /* bit 0 CVLAN */
+ wqe->eth.vlan_hdr = htons(mb->m_pkthdr.ether_vtag);
+ } else {
+ wqe->eth.inline_hdr_sz = 0;
+ }
+ } else {
+ if ((mb->m_flags & M_VLANTAG) != 0) {
+ struct ether_vlan_header *eh = (struct ether_vlan_header
+ *)wqe->eth.inline_hdr_start;
- /* Range checks */
- if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
- ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
- else if (ihs < ETHER_HDR_LEN) {
- err = EINVAL;
- goto tx_drop;
+ /* Range checks */
+ if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
+ ihs = (MLX5E_MAX_TX_INLINE -
+ ETHER_VLAN_ENCAP_LEN);
+ else if (ihs < ETHER_HDR_LEN) {
+ err = EINVAL;
+ goto tx_drop;
+ }
+ m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
+ m_adj(mb, ETHER_HDR_LEN);
+ /* Insert 4 bytes VLAN tag into data stream */
+ eh->evl_proto = eh->evl_encap_proto;
+ eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
+ eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
+ /* Copy rest of header data, if any */
+ m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh +
+ 1));
+ m_adj(mb, ihs - ETHER_HDR_LEN);
+ /* Extend header by 4 bytes */
+ ihs += ETHER_VLAN_ENCAP_LEN;
+ } else {
+ m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
+ m_adj(mb, ihs);
}
- m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
- m_adj(mb, ETHER_HDR_LEN);
- /* Insert 4 bytes VLAN tag into data stream */
- eh->evl_proto = eh->evl_encap_proto;
- eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
- eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
- /* Copy rest of header data, if any */
- m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
- m_adj(mb, ihs - ETHER_HDR_LEN);
- /* Extend header by 4 bytes */
- ihs += ETHER_VLAN_ENCAP_LEN;
- } else {
- m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
- m_adj(mb, ihs);
+ wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
}
- wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
-
ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (likely(ihs > sizeof(wqe->eth.inline_hdr_start))) {
+ if (ihs > sizeof(wqe->eth.inline_hdr_start)) {
ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
MLX5_SEND_WQE_DS);
}
@@ -470,13 +516,11 @@
/* Process the queue */
while ((next = drbr_peek(ifp, sq->br)) != NULL) {
if (mlx5e_sq_xmit(sq, &next) != 0) {
- if (next == NULL) {
- drbr_advance(ifp, sq->br);
- } else {
+ if (next != NULL) {
drbr_putback(ifp, sq->br, next);
atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
+ break;
}
- break;
}
drbr_advance(ifp, sq->br);
}
Modified: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 325611 2017-11-09 19:00:11Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 337748 2018-08-14 11:52:05Z hselasky $
*/
#include <linux/errno.h>
@@ -51,8 +51,8 @@
#include <sys/unistd.h>
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "3.2-rc1"
-#define DRIVER_RELDATE "May 2016"
+#define DRIVER_VERSION "3.2.1"
+#define DRIVER_RELDATE "August 2018"
#undef MODULE_VERSION
#include <sys/module.h>
Modified: trunk/sys/dev/mlx5/qp.h
===================================================================
--- trunk/sys/dev/mlx5/qp.h 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/qp.h 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/qp.h 308684 2016-11-15 08:58:51Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/qp.h 337742 2018-08-14 11:19:04Z hselasky $
*/
#ifndef MLX5_QP_H
@@ -237,8 +237,16 @@
u8 swp_flags;
__be16 mss;
__be32 rsvd2;
- __be16 inline_hdr_sz;
- u8 inline_hdr_start[2];
+ union {
+ struct {
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+ };
+ struct {
+ __be16 vlan_cmd;
+ __be16 vlan_hdr;
+ };
+ };
};
struct mlx5_wqe_xrc_seg {
Modified: trunk/sys/dev/mlx5/vport.h
===================================================================
--- trunk/sys/dev/mlx5/vport.h 2019-08-09 02:41:44 UTC (rev 12244)
+++ trunk/sys/dev/mlx5/vport.h 2019-08-09 02:49:11 UTC (rev 12245)
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/10/sys/dev/mlx5/vport.h 306244 2016-09-23 08:28:44Z hselasky $
+ * $FreeBSD: stable/10/sys/dev/mlx5/vport.h 337742 2018-08-14 11:19:04Z hselasky $
*/
#ifndef __MLX5_VPORT_H__
@@ -30,6 +30,13 @@
#define __MLX5_VPORT_H__
#include <dev/mlx5/driver.h>
+
+enum {
+ MLX5_CAP_INLINE_MODE_L2,
+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
u16 *counter_set_id);
int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
@@ -70,6 +77,11 @@
u16 vport, u8 mac[ETH_ALEN]);
int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
bool other_vport, u8 *addr);
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
u32 vport, u64 port_guid);
int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
More information about the Midnightbsd-cvs
mailing list