[Midnightbsd-cvs] src [10097] trunk/sys/dev: sync mn, mlx5
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Sun May 27 19:36:48 EDT 2018
Revision: 10097
http://svnweb.midnightbsd.org/src/?rev=10097
Author: laffer1
Date: 2018-05-27 19:36:47 -0400 (Sun, 27 May 2018)
Log Message:
-----------
sync mn, mlx5
Modified Paths:
--------------
trunk/sys/dev/mn/if_mn.c
Added Paths:
-----------
trunk/sys/dev/mlx5/
trunk/sys/dev/mlx5/cq.h
trunk/sys/dev/mlx5/device.h
trunk/sys/dev/mlx5/diagnostics.h
trunk/sys/dev/mlx5/doorbell.h
trunk/sys/dev/mlx5/driver.h
trunk/sys/dev/mlx5/eswitch_vacl.h
trunk/sys/dev/mlx5/flow_table.h
trunk/sys/dev/mlx5/mlx5_core/
trunk/sys/dev/mlx5/mlx5_core/mlx5_alloc.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h
trunk/sys/dev/mlx5/mlx5_core/mlx5_cq.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_fw.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_mad.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_main.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_mcg.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_mr.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_pd.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_port.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_qp.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_srq.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_uar.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c
trunk/sys/dev/mlx5/mlx5_core/mlx5_wq.c
trunk/sys/dev/mlx5/mlx5_core/transobj.h
trunk/sys/dev/mlx5/mlx5_core/wq.h
trunk/sys/dev/mlx5/mlx5_en/
trunk/sys/dev/mlx5/mlx5_en/en.h
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
trunk/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c
trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.c
trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.h
trunk/sys/dev/mlx5/mlx5_ib/
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c
trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
trunk/sys/dev/mlx5/mlx5_ib/user.h
trunk/sys/dev/mlx5/mlx5_ifc.h
trunk/sys/dev/mlx5/mlx5_rdma_if.h
trunk/sys/dev/mlx5/qp.h
trunk/sys/dev/mlx5/srq.h
trunk/sys/dev/mlx5/vport.h
Added: trunk/sys/dev/mlx5/cq.h
===================================================================
--- trunk/sys/dev/mlx5/cq.h (rev 0)
+++ trunk/sys/dev/mlx5/cq.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,178 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/cq.h 321996 2017-08-03 13:57:17Z hselasky $
+ */
+
+#ifndef MLX5_CORE_CQ_H
+#define MLX5_CORE_CQ_H
+
+#include <rdma/ib_verbs.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_ifc.h>
+
+
+struct mlx5_core_cq {
+ u32 cqn;
+ int cqe_sz;
+ __be32 *set_ci_db;
+ __be32 *arm_db;
+ atomic_t refcount;
+ struct completion free;
+ unsigned vector;
+ int irqn;
+ void (*comp) (struct mlx5_core_cq *);
+ void (*event) (struct mlx5_core_cq *, int);
+ struct mlx5_uar *uar;
+ u32 cons_index;
+ unsigned arm_sn;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+ int reset_notify_added;
+ struct list_head reset_notify;
+};
+
+
+enum {
+ MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01,
+ MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02,
+ MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04,
+ MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05,
+ MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06,
+ MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10,
+ MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11,
+ MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12,
+ MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13,
+ MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14,
+ MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15,
+ MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16,
+ MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22,
+};
+
+enum {
+ MLX5_CQE_OWNER_MASK = 1,
+ MLX5_CQE_REQ = 0,
+ MLX5_CQE_RESP_WR_IMM = 1,
+ MLX5_CQE_RESP_SEND = 2,
+ MLX5_CQE_RESP_SEND_IMM = 3,
+ MLX5_CQE_RESP_SEND_INV = 4,
+ MLX5_CQE_RESIZE_CQ = 5,
+ MLX5_CQE_SIG_ERR = 12,
+ MLX5_CQE_REQ_ERR = 13,
+ MLX5_CQE_RESP_ERR = 14,
+ MLX5_CQE_INVALID = 15,
+};
+
+enum {
+ MLX5_CQ_MODIFY_PERIOD = 1 << 0,
+ MLX5_CQ_MODIFY_COUNT = 1 << 1,
+ MLX5_CQ_MODIFY_OVERRUN = 1 << 2,
+ MLX5_CQ_MODIFY_PERIOD_MODE = 1 << 4,
+};
+
+enum {
+ MLX5_CQ_OPMOD_RESIZE = 1,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0,
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1,
+ MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2,
+};
+
+struct mlx5_cq_modify_params {
+ int type;
+ union {
+ struct {
+ u32 page_offset;
+ u8 log_cq_size;
+ } resize;
+
+ struct {
+ } moder;
+
+ struct {
+ } mapping;
+ } params;
+};
+
+static inline int cqe_sz_to_mlx_sz(u8 size)
+{
+ return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+}
+
+static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
+{
+ *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
+}
+
+enum {
+ MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24,
+ MLX5_CQ_DB_REQ_NOT = 0 << 24
+};
+
+static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
+ void __iomem *uar_page,
+ spinlock_t *doorbell_lock,
+ u32 cons_index)
+{
+ __be32 doorbell[2];
+ u32 sn;
+ u32 ci;
+
+ sn = cq->arm_sn & 3;
+ ci = cons_index & 0xffffff;
+
+ *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
+
+ /* Make sure that the doorbell record in host memory is
+ * written before ringing the doorbell via PCI MMIO.
+ */
+ wmb();
+
+ doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
+ doorbell[1] = cpu_to_be32(cq->cqn);
+
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
+}
+
+int mlx5_init_cq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_create_cq_mbox_in *in, int inlen);
+int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_query_cq_mbox_out *out);
+int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_modify_cq_mbox_in *in, int in_sz);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq, u16 cq_period,
+ u16 cq_max_count);
+int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count,
+ u8 cq_mode);
+int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+
+#endif /* MLX5_CORE_CQ_H */
Property changes on: trunk/sys/dev/mlx5/cq.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/device.h
===================================================================
--- trunk/sys/dev/mlx5/device.h (rev 0)
+++ trunk/sys/dev/mlx5/device.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1393 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/device.h 322151 2017-08-07 12:49:30Z hselasky $
+ */
+
+#ifndef MLX5_DEVICE_H
+#define MLX5_DEVICE_H
+
+#include <linux/types.h>
+#include <rdma/ib_verbs.h>
+#include <dev/mlx5/mlx5_ifc.h>
+
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+
+#if defined(__LITTLE_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS 0
+#elif defined(__BIG_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS 0x80
+#else
+#error Host endianness not defined
+#endif
+
+/* helper macros */
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
+
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+ BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
+ *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+ cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+ (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+ << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
+__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+__mlx5_mask(typ, fld))
+
+#define MLX5_GET_PR(typ, p, fld) ({ \
+ u32 ___t = MLX5_GET(typ, p, fld); \
+ pr_debug(#fld " = 0x%x\n", ___t); \
+ ___t; \
+})
+
+#define MLX5_SET64(typ, p, fld, v) do { \
+ BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
+ BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+ *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
+} while (0)
+
+#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+
+enum {
+ MLX5_MAX_COMMANDS = 32,
+ MLX5_CMD_DATA_BLOCK_SIZE = 512,
+ MLX5_CMD_MBOX_SIZE = 1024,
+ MLX5_PCI_CMD_XPORT = 7,
+ MLX5_MKEY_BSF_OCTO_SIZE = 4,
+ MLX5_MAX_PSVS = 4,
+};
+
+enum {
+ MLX5_EXTENDED_UD_AV = 0x80000000,
+};
+
+enum {
+ MLX5_CQ_FLAGS_OI = 2,
+};
+
+enum {
+ MLX5_STAT_RATE_OFFSET = 5,
+};
+
+enum {
+ MLX5_INLINE_SEG = 0x80000000,
+};
+
+enum {
+ MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
+ MLX5_MIN_PKEY_TABLE_SIZE = 128,
+ MLX5_MAX_LOG_PKEY_TABLE = 5,
+};
+
+enum {
+ MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+ MLX5_PERM_LOCAL_READ = 1 << 2,
+ MLX5_PERM_LOCAL_WRITE = 1 << 3,
+ MLX5_PERM_REMOTE_READ = 1 << 4,
+ MLX5_PERM_REMOTE_WRITE = 1 << 5,
+ MLX5_PERM_ATOMIC = 1 << 6,
+ MLX5_PERM_UMR_EN = 1 << 7,
+};
+
+enum {
+ MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
+ MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
+ MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
+ MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
+ MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
+};
+
+enum {
+ MLX5_MKEY_REMOTE_INVAL = 1 << 24,
+ MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+ MLX5_MKEY_BSF_EN = 1 << 30,
+ MLX5_MKEY_LEN64 = 1 << 31,
+};
+
+enum {
+ MLX5_EN_RD = (u64)1,
+ MLX5_EN_WR = (u64)2
+};
+
+enum {
+ MLX5_BF_REGS_PER_PAGE = 4,
+ MLX5_MAX_UAR_PAGES = 1 << 8,
+ MLX5_NON_FP_BF_REGS_PER_PAGE = 2,
+ MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
+};
+
+enum {
+ MLX5_MKEY_MASK_LEN = 1ull << 0,
+ MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
+ MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
+ MLX5_MKEY_MASK_PD = 1ull << 7,
+ MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
+ MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
+ MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
+ MLX5_MKEY_MASK_KEY = 1ull << 13,
+ MLX5_MKEY_MASK_QPN = 1ull << 14,
+ MLX5_MKEY_MASK_LR = 1ull << 17,
+ MLX5_MKEY_MASK_LW = 1ull << 18,
+ MLX5_MKEY_MASK_RR = 1ull << 19,
+ MLX5_MKEY_MASK_RW = 1ull << 20,
+ MLX5_MKEY_MASK_A = 1ull << 21,
+ MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
+ MLX5_MKEY_MASK_FREE = 1ull << 29,
+};
+
+enum {
+ MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
+
+ MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
+ MLX5_UMR_CHECK_FREE = (2 << 5),
+
+ MLX5_UMR_INLINE = (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
+enum {
+ MLX5_EVENT_QUEUE_TYPE_QP = 0,
+ MLX5_EVENT_QUEUE_TYPE_RQ = 1,
+ MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+};
+
+enum {
+ MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
+ MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
+ MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
+ MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
+ MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
+ MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
+ MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
+};
+
+enum {
+ MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
+ MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
+ MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
+ MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
+ MLX5_MAX_INLINE_RECEIVE_SIZE = 64
+};
+
+enum {
+ MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
+ MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
+ MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
+ MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
+ MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD = 1LL << 21,
+ MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23,
+ MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
+ MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
+ MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 33,
+ MLX5_DEV_CAP_FLAG_ROCE = 1LL << 34,
+ MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
+ MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
+ MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
+ MLX5_DEV_CAP_FLAG_DRAIN_SIGERR = 1LL << 48,
+};
+
+enum {
+ MLX5_ROCE_VERSION_1 = 0,
+ MLX5_ROCE_VERSION_1_5 = 1,
+ MLX5_ROCE_VERSION_2 = 2,
+};
+
+enum {
+ MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
+ MLX5_ROCE_VERSION_1_5_CAP = 1 << MLX5_ROCE_VERSION_1_5,
+ MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
+};
+
+enum {
+ MLX5_ROCE_L3_TYPE_IPV4 = 0,
+ MLX5_ROCE_L3_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
+ MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
+};
+
+enum {
+ MLX5_OPCODE_NOP = 0x00,
+ MLX5_OPCODE_SEND_INVAL = 0x01,
+ MLX5_OPCODE_RDMA_WRITE = 0x08,
+ MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
+ MLX5_OPCODE_SEND = 0x0a,
+ MLX5_OPCODE_SEND_IMM = 0x0b,
+ MLX5_OPCODE_LSO = 0x0e,
+ MLX5_OPCODE_RDMA_READ = 0x10,
+ MLX5_OPCODE_ATOMIC_CS = 0x11,
+ MLX5_OPCODE_ATOMIC_FA = 0x12,
+ MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
+ MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
+ MLX5_OPCODE_BIND_MW = 0x18,
+ MLX5_OPCODE_CONFIG_CMD = 0x1f,
+
+ MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
+ MLX5_RECV_OPCODE_SEND = 0x01,
+ MLX5_RECV_OPCODE_SEND_IMM = 0x02,
+ MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
+
+ MLX5_CQE_OPCODE_ERROR = 0x1e,
+ MLX5_CQE_OPCODE_RESIZE = 0x16,
+
+ MLX5_OPCODE_SET_PSV = 0x20,
+ MLX5_OPCODE_GET_PSV = 0x21,
+ MLX5_OPCODE_CHECK_PSV = 0x22,
+ MLX5_OPCODE_RGET_PSV = 0x26,
+ MLX5_OPCODE_RCHECK_PSV = 0x27,
+
+ MLX5_OPCODE_UMR = 0x25,
+
+ MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
+};
+
+enum {
+ MLX5_SET_PORT_RESET_QKEY = 0,
+ MLX5_SET_PORT_GUID0 = 16,
+ MLX5_SET_PORT_NODE_GUID = 17,
+ MLX5_SET_PORT_SYS_GUID = 18,
+ MLX5_SET_PORT_GID_TABLE = 19,
+ MLX5_SET_PORT_PKEY_TABLE = 20,
+};
+
+enum {
+ MLX5_MAX_PAGE_SHIFT = 31
+};
+
+enum {
+ MLX5_ADAPTER_PAGE_SHIFT = 12,
+ MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+ MLX5_CAP_OFF_CMDIF_CSUM = 46,
+};
+
+struct mlx5_inbox_hdr {
+ __be16 opcode;
+ u8 rsvd[4];
+ __be16 opmod;
+};
+
+struct mlx5_outbox_hdr {
+ u8 status;
+ u8 rsvd[3];
+ __be32 syndrome;
+};
+
+struct mlx5_cmd_set_dc_cnak_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 enable;
+ u8 reserved[47];
+ __be64 pa;
+};
+
+struct mlx5_cmd_set_dc_cnak_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cmd_layout {
+ u8 type;
+ u8 rsvd0[3];
+ __be32 inlen;
+ __be64 in_ptr;
+ __be32 in[4];
+ __be32 out[4];
+ __be64 out_ptr;
+ __be32 outlen;
+ u8 token;
+ u8 sig;
+ u8 rsvd1;
+ u8 status_own;
+};
+
+
+struct mlx5_health_buffer {
+ __be32 assert_var[5];
+ __be32 rsvd0[3];
+ __be32 assert_exit_ptr;
+ __be32 assert_callra;
+ __be32 rsvd1[2];
+ __be32 fw_ver;
+ __be32 hw_id;
+ __be32 rsvd2;
+ u8 irisc_index;
+ u8 synd;
+ __be16 ext_sync;
+};
+
+struct mlx5_init_seg {
+ __be32 fw_rev;
+ __be32 cmdif_rev_fw_sub;
+ __be32 rsvd0[2];
+ __be32 cmdq_addr_h;
+ __be32 cmdq_addr_l_sz;
+ __be32 cmd_dbell;
+ __be32 rsvd1[120];
+ __be32 initializing;
+ struct mlx5_health_buffer health;
+ __be32 rsvd2[880];
+ __be32 internal_timer_h;
+ __be32 internal_timer_l;
+ __be32 rsvd3[2];
+ __be32 health_counter;
+ __be32 rsvd4[1019];
+ __be64 ieee1588_clk;
+ __be32 ieee1588_clk_type;
+ __be32 clr_intx;
+};
+
+struct mlx5_eqe_comp {
+ __be32 reserved[6];
+ __be32 cqn;
+};
+
+struct mlx5_eqe_qp_srq {
+ __be32 reserved[6];
+ __be32 qp_srq_n;
+};
+
+struct mlx5_eqe_cq_err {
+ __be32 cqn;
+ u8 reserved1[7];
+ u8 syndrome;
+};
+
+struct mlx5_eqe_port_state {
+ u8 reserved0[8];
+ u8 port;
+};
+
+struct mlx5_eqe_gpio {
+ __be32 reserved0[2];
+ __be64 gpio_event;
+};
+
+struct mlx5_eqe_congestion {
+ u8 type;
+ u8 rsvd0;
+ u8 congestion_level;
+};
+
+struct mlx5_eqe_stall_vl {
+ u8 rsvd0[3];
+ u8 port_vl;
+};
+
+struct mlx5_eqe_cmd {
+ __be32 vector;
+ __be32 rsvd[6];
+};
+
+struct mlx5_eqe_page_req {
+ u8 rsvd0[2];
+ __be16 func_id;
+ __be32 num_pages;
+ __be32 rsvd1[5];
+};
+
+struct mlx5_eqe_vport_change {
+ u8 rsvd0[2];
+ __be16 vport_num;
+ __be32 rsvd1[6];
+};
+
+
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
+enum {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+};
+
+enum {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE = 0x1,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
+ MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED = 0x7,
+};
+
+struct mlx5_eqe_port_module_event {
+ u8 rsvd0;
+ u8 module;
+ u8 rsvd1;
+ u8 module_status;
+ u8 rsvd2[2];
+ u8 error_type;
+};
+
+union ev_data {
+ __be32 raw[7];
+ struct mlx5_eqe_cmd cmd;
+ struct mlx5_eqe_comp comp;
+ struct mlx5_eqe_qp_srq qp_srq;
+ struct mlx5_eqe_cq_err cq_err;
+ struct mlx5_eqe_port_state port;
+ struct mlx5_eqe_gpio gpio;
+ struct mlx5_eqe_congestion cong;
+ struct mlx5_eqe_stall_vl stall_vl;
+ struct mlx5_eqe_page_req req_pages;
+ struct mlx5_eqe_port_module_event port_module_event;
+ struct mlx5_eqe_vport_change vport_change;
+} __packed;
+
+struct mlx5_eqe {
+ u8 rsvd0;
+ u8 type;
+ u8 rsvd1;
+ u8 sub_type;
+ __be32 rsvd2[7];
+ union ev_data data;
+ __be16 rsvd3;
+ u8 signature;
+ u8 owner;
+} __packed;
+
+struct mlx5_cmd_prot_block {
+ u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
+ u8 rsvd0[48];
+ __be64 next;
+ __be32 block_num;
+ u8 rsvd1;
+ u8 token;
+ u8 ctrl_sig;
+ u8 sig;
+};
+
+#define MLX5_NUM_CMDS_IN_ADAPTER_PAGE \
+ (MLX5_ADAPTER_PAGE_SIZE / MLX5_CMD_MBOX_SIZE)
+CTASSERT(MLX5_CMD_MBOX_SIZE >= sizeof(struct mlx5_cmd_prot_block));
+CTASSERT(MLX5_CMD_MBOX_SIZE <= MLX5_ADAPTER_PAGE_SIZE);
+
+enum {
+ MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
+struct mlx5_err_cqe {
+ u8 rsvd0[32];
+ __be32 srqn;
+ u8 rsvd1[18];
+ u8 vendor_err_synd;
+ u8 syndrome;
+ __be32 s_wqe_opcode_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+};
+
+struct mlx5_cqe64 {
+ u8 tunneled_etc;
+ u8 rsvd0[3];
+ u8 lro_tcppsh_abort_dupack;
+ u8 lro_min_ttl;
+ __be16 lro_tcp_win;
+ __be32 lro_ack_seq_num;
+ __be32 rss_hash_result;
+ u8 rss_hash_type;
+ u8 ml_path;
+ u8 rsvd20[2];
+ __be16 check_sum;
+ __be16 slid;
+ __be32 flags_rqpn;
+ u8 hds_ip_ext;
+ u8 l4_hdr_type_etc;
+ __be16 vlan_info;
+ __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
+ __be32 imm_inval_pkey;
+ u8 rsvd40[4];
+ __be32 byte_cnt;
+ __be64 timestamp;
+ __be32 sop_drop_qpn;
+ __be16 wqe_counter;
+ u8 signature;
+ u8 op_own;
+};
+
+static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
+}
+
+static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+ return (cqe->l4_hdr_type_etc >> 4) & 0x7;
+}
+
+static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->vlan_info) & 0xfff;
+}
+
+static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
+{
+ memcpy(smac, &cqe->rss_hash_type , 4);
+ memcpy(smac + 4, &cqe->slid , 2);
+}
+
+static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+ return cqe->l4_hdr_type_etc & 0x1;
+}
+
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+ return cqe->tunneled_etc & 0x1;
+}
+
+enum {
+ CQE_L4_HDR_TYPE_NONE = 0x0,
+ CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
+ CQE_L4_HDR_TYPE_UDP = 0x2,
+ CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
+ CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
+};
+
+enum {
+ /* source L3 hash types */
+ CQE_RSS_SRC_HTYPE_IP = 0x3 << 0,
+ CQE_RSS_SRC_HTYPE_IPV4 = 0x1 << 0,
+ CQE_RSS_SRC_HTYPE_IPV6 = 0x2 << 0,
+
+ /* destination L3 hash types */
+ CQE_RSS_DST_HTYPE_IP = 0x3 << 2,
+ CQE_RSS_DST_HTYPE_IPV4 = 0x1 << 2,
+ CQE_RSS_DST_HTYPE_IPV6 = 0x2 << 2,
+
+ /* source L4 hash types */
+ CQE_RSS_SRC_HTYPE_L4 = 0x3 << 4,
+ CQE_RSS_SRC_HTYPE_TCP = 0x1 << 4,
+ CQE_RSS_SRC_HTYPE_UDP = 0x2 << 4,
+ CQE_RSS_SRC_HTYPE_IPSEC = 0x3 << 4,
+
+ /* destination L4 hash types */
+ CQE_RSS_DST_HTYPE_L4 = 0x3 << 6,
+ CQE_RSS_DST_HTYPE_TCP = 0x1 << 6,
+ CQE_RSS_DST_HTYPE_UDP = 0x2 << 6,
+ CQE_RSS_DST_HTYPE_IPSEC = 0x3 << 6,
+};
+
+enum {
+ CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
+ CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
+ CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
+};
+
+enum {
+ CQE_L2_OK = 1 << 0,
+ CQE_L3_OK = 1 << 1,
+ CQE_L4_OK = 1 << 2,
+};
+
+struct mlx5_sig_err_cqe {
+ u8 rsvd0[16];
+ __be32 expected_trans_sig;
+ __be32 actual_trans_sig;
+ __be32 expected_reftag;
+ __be32 actual_reftag;
+ __be16 syndrome;
+ u8 rsvd22[2];
+ __be32 mkey;
+ __be64 err_offset;
+ u8 rsvd30[8];
+ __be32 qpn;
+ u8 rsvd38[2];
+ u8 signature;
+ u8 op_own;
+};
+
+struct mlx5_wqe_srq_next_seg {
+ u8 rsvd0[2];
+ __be16 next_wqe_index;
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+union mlx5_ext_cqe {
+ struct ib_grh grh;
+ u8 inl[64];
+};
+
+struct mlx5_cqe128 {
+ union mlx5_ext_cqe inl_grh;
+ struct mlx5_cqe64 cqe64;
+};
+
+struct mlx5_srq_ctx {
+ u8 state_log_sz;
+ u8 rsvd0[3];
+ __be32 flags_xrcd;
+ __be32 pgoff_cqn;
+ u8 rsvd1[4];
+ u8 log_pg_sz;
+ u8 rsvd2[7];
+ __be32 pd;
+ __be16 lwm;
+ __be16 wqe_cnt;
+ u8 rsvd3[8];
+ __be64 db_record;
+};
+
+struct mlx5_create_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_srqn;
+ u8 rsvd0[4];
+ struct mlx5_srq_ctx ctx;
+ u8 rsvd1[208];
+ __be64 pas[0];
+};
+
+struct mlx5_create_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_query_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_srq_ctx ctx;
+ u8 rsvd1[32];
+ __be64 pas[0];
+};
+
+struct mlx5_arm_srq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 srqn;
+ __be16 rsvd;
+ __be16 lwm;
+};
+
+struct mlx5_arm_srq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_cq_context {
+ u8 status;
+ u8 cqe_sz_flags;
+ u8 st;
+ u8 rsvd3;
+ u8 rsvd4[6];
+ __be16 page_offset;
+ __be32 log_sz_usr_page;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ __be16 rsvd20;
+ __be16 c_eqn;
+ u8 log_pg_sz;
+ u8 rsvd25[7];
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_counter;
+ __be32 producer_counter;
+ u8 rsvd48[8];
+ __be64 db_record_addr;
+};
+
+struct mlx5_create_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_cqn;
+ u8 rsvdx[4];
+ struct mlx5_cq_context ctx;
+ u8 rsvd6[192];
+ __be64 pas[0];
+};
+
+struct mlx5_create_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_query_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_query_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_cq_context ctx;
+ u8 rsvd6[16];
+ __be64 pas[0];
+};
+
+struct mlx5_modify_cq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 cqn;
+ __be32 field_select;
+ struct mlx5_cq_context ctx;
+ u8 rsvd[192];
+ __be64 pas[0];
+};
+
+struct mlx5_modify_cq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_eq_context {
+ u8 status;
+ u8 ec_oi;
+ u8 st;
+ u8 rsvd2[7];
+ __be16 page_pffset;
+ __be32 log_sz_usr_page;
+ u8 rsvd3[7];
+ u8 intr;
+ u8 log_page_size;
+ u8 rsvd4[15];
+ __be32 consumer_counter;
+ __be32 produser_counter;
+ u8 rsvd5[16];
+};
+
+struct mlx5_create_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 input_eqn;
+ u8 rsvd1[4];
+ struct mlx5_eq_context ctx;
+ u8 rsvd2[8];
+ __be64 events_mask;
+ u8 rsvd3[176];
+ __be64 pas[0];
+};
+
+struct mlx5_create_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eq_number;
+ u8 rsvd1[4];
+};
+
+struct mlx5_map_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be64 mask;
+ u8 mu;
+ u8 rsvd0[2];
+ u8 eqn;
+ u8 rsvd1[24];
+};
+
+struct mlx5_map_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_eq_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[3];
+ u8 eqn;
+ u8 rsvd1[4];
+};
+
+struct mlx5_query_eq_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ struct mlx5_eq_context ctx;
+};
+
+enum {
+ MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
+struct mlx5_mkey_seg {
+ /* This is a two bit field occupying bits 31-30.
+ * bit 31 is always 0,
+ * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+ */
+ u8 status;
+ u8 pcie_control;
+ u8 flags;
+ u8 version;
+ __be32 qpn_mkey7_0;
+ u8 rsvd1[4];
+ __be32 flags_pd;
+ __be64 start_addr;
+ __be64 len;
+ __be32 bsfs_octo_size;
+ u8 rsvd2[16];
+ __be32 xlt_oct_size;
+ u8 rsvd3[3];
+ u8 log2_page_size;
+ u8 rsvd4[4];
+};
+
+struct mlx5_query_special_ctxs_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_query_special_ctxs_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 dump_fill_mkey;
+ __be32 reserved_lkey;
+};
+
+struct mlx5_create_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_mkey_index;
+ __be32 flags;
+ struct mlx5_mkey_seg seg;
+ u8 rsvd1[16];
+ __be32 xlat_oct_act_size;
+ __be32 rsvd2;
+ u8 rsvd3[168];
+ __be64 pas[0];
+};
+
+struct mlx5_create_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 mkey;
+ u8 rsvd[4];
+};
+
+struct mlx5_query_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 mkey;
+};
+
+struct mlx5_query_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be64 pas[0];
+};
+
+struct mlx5_modify_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 mkey;
+ __be64 pas[0];
+};
+
+struct mlx5_modify_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_dump_mkey_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+};
+
+struct mlx5_dump_mkey_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 mkey;
+};
+
+struct mlx5_mad_ifc_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be16 remote_lid;
+ u8 rsvd0;
+ u8 port;
+ u8 rsvd1[4];
+ u8 data[256];
+};
+
+struct mlx5_mad_ifc_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ u8 data[256];
+};
+
+struct mlx5_access_reg_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[2];
+ __be16 register_id;
+ __be32 arg;
+ __be32 data[0];
+};
+
+struct mlx5_access_reg_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ __be32 data[0];
+};
+
+#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
+
+enum {
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
+};
+
+struct mlx5_allocate_psv_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 npsv_pd;
+ __be32 rsvd_psv0;
+};
+
+struct mlx5_allocate_psv_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+ __be32 psv_idx[4];
+};
+
+struct mlx5_destroy_psv_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 psv_number;
+ u8 rsvd[4];
+};
+
+struct mlx5_destroy_psv_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+static inline int mlx5_host_is_le(void)
+{
+#if defined(__LITTLE_ENDIAN)
+ return 1;
+#elif defined(__BIG_ENDIAN)
+ return 0;
+#else
+#error Host endianness not defined
+#endif
+}
+
+#define MLX5_CMD_OP_MAX 0x939
+
+enum {
+ VPORT_STATE_DOWN = 0x0,
+ VPORT_STATE_UP = 0x1,
+};
+
+enum {
+ MLX5_L3_PROT_TYPE_IPV4 = 0,
+ MLX5_L3_PROT_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_L4_PROT_TYPE_TCP = 0,
+ MLX5_L4_PROT_TYPE_UDP = 1,
+};
+
+enum {
+ MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
+ MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
+ MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
+ MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
+};
+
+enum {
+ MLX5_MATCH_OUTER_HEADERS = 1 << 0,
+ MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
+ MLX5_MATCH_INNER_HEADERS = 1 << 2,
+
+};
+
+enum {
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
+ MLX5_FLOW_TABLE_TYPE_EGRESS_ACL = 2,
+ MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
+ MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
+ MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
+};
+
+enum {
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE = 0,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE = 2
+};
+
+enum {
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP = 1 << 0,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP = 1 << 1,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
+};
+
+enum {
+ MLX5_UC_ADDR_CHANGE = (1 << 0),
+ MLX5_MC_ADDR_CHANGE = (1 << 1),
+ MLX5_VLAN_CHANGE = (1 << 2),
+ MLX5_PROMISC_CHANGE = (1 << 3),
+ MLX5_MTU_CHANGE = (1 << 4),
+};
+
+enum mlx5_list_type {
+ MLX5_NIC_VPORT_LIST_TYPE_UC = 0x0,
+ MLX5_NIC_VPORT_LIST_TYPE_MC = 0x1,
+ MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
+};
+
+enum {
+ MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+ HCA_CAP_OPMOD_GET_MAX = 0,
+ HCA_CAP_OPMOD_GET_CUR = 1,
+};
+
+enum mlx5_cap_type {
+ MLX5_CAP_GENERAL = 0,
+ MLX5_CAP_ETHERNET_OFFLOADS,
+ MLX5_CAP_ODP,
+ MLX5_CAP_ATOMIC,
+ MLX5_CAP_ROCE,
+ MLX5_CAP_IPOIB_OFFLOADS,
+ MLX5_CAP_EOIB_OFFLOADS,
+ MLX5_CAP_FLOW_TABLE,
+ MLX5_CAP_ESWITCH_FLOW_TABLE,
+ MLX5_CAP_ESWITCH,
+ MLX5_CAP_SNAPSHOT,
+ MLX5_CAP_VECTOR_CALC,
+ MLX5_CAP_QOS,
+ MLX5_CAP_DEBUG,
+ /* NUM OF CAP Types */
+ MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+ MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+ MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+ MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+ MLX5_GET(flow_table_eswitch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ESW_MAX(mdev, cap) \
+ MLX5_GET(e_switch_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+#define MLX5_CAP_ODP_MAX(mdev, cap)\
+ MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
+
+#define MLX5_CAP_SNAPSHOT(mdev, cap) \
+ MLX5_GET(snapshot_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
+ MLX5_GET(snapshot_cap, \
+ mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
+ MLX5_GET(per_protocol_networking_offload_caps,\
+ mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_DEBUG(mdev, cap) \
+ MLX5_GET(debug_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_DEBUG_MAX(mdev, cap) \
+ MLX5_GET(debug_cap, \
+ mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_QOS(mdev, cap) \
+ MLX5_GET(qos_cap,\
+ mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_QOS_MAX(mdev, cap) \
+ MLX5_GET(qos_cap,\
+ mdev->hca_caps_max[MLX5_CAP_QOS], cap)
+
+enum {
+ MLX5_CMD_STAT_OK = 0x0,
+ MLX5_CMD_STAT_INT_ERR = 0x1,
+ MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
+ MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
+ MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
+ MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
+ MLX5_CMD_STAT_RES_BUSY = 0x6,
+ MLX5_CMD_STAT_LIM_ERR = 0x8,
+ MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
+ MLX5_CMD_STAT_IX_ERR = 0xa,
+ MLX5_CMD_STAT_NO_RES_ERR = 0xf,
+ MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
+ MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
+ MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
+ MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
+ MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
+};
+
+enum {
+ MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
+ MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
+ MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
+ MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
+ MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+ MLX5_ETHERNET_DISCARD_COUNTERS_GROUP = 0x6,
+ MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
+ MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+ MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
+};
+
+enum {
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
+ MLX5_PCIE_LANE_COUNTERS_GROUP = 0x1,
+ MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
+};
+
+enum {
+ MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
+ MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
+};
+
+enum {
+ NUM_DRIVER_UARS = 4,
+ NUM_LOW_LAT_UUARS = 4,
+};
+
+enum {
+ MLX5_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CAP_PORT_TYPE_ETH = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2 = 0x0,
+ MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
+ MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+ return 0;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
+struct mlx5_ifc_mcia_reg_bits {
+ u8 l[0x1];
+ u8 reserved_0[0x7];
+ u8 module[0x8];
+ u8 reserved_1[0x8];
+ u8 status[0x8];
+
+ u8 i2c_device_address[0x8];
+ u8 page_number[0x8];
+ u8 device_address[0x10];
+
+ u8 reserved_2[0x10];
+ u8 size[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 dword_0[0x20];
+ u8 dword_1[0x20];
+ u8 dword_2[0x20];
+ u8 dword_3[0x20];
+ u8 dword_4[0x20];
+ u8 dword_5[0x20];
+ u8 dword_6[0x20];
+ u8 dword_7[0x20];
+ u8 dword_8[0x20];
+ u8 dword_9[0x20];
+ u8 dword_10[0x20];
+ u8 dword_11[0x20];
+};
+
+#define MLX5_CMD_OP_QUERY_EEPROM 0x93c
+
+struct mlx5_mini_cqe8 {
+ union {
+ __be32 rx_hash_result;
+ __be16 checksum;
+ __be16 rsvd;
+ struct {
+ __be16 wqe_counter;
+ u8 s_wqe_opcode;
+ u8 reserved;
+ } s_wqe_info;
+ };
+ __be32 byte_cnt;
+};
+
+enum {
+ MLX5_NO_INLINE_DATA,
+ MLX5_INLINE_DATA32_SEG,
+ MLX5_INLINE_DATA64_SEG,
+ MLX5_COMPRESSED,
+};
+
+enum mlx5_exp_cqe_zip_recv_type {
+ MLX5_CQE_FORMAT_HASH,
+ MLX5_CQE_FORMAT_CSUM,
+};
+
+#define MLX5E_CQE_FORMAT_MASK 0xc
+static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
+{
+ return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
+}
+
+/* 8 regular priorities + 1 for multicast */
+#define MLX5_NUM_BYPASS_FTS 9
+
+#endif /* MLX5_DEVICE_H */
Property changes on: trunk/sys/dev/mlx5/device.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/diagnostics.h
===================================================================
--- trunk/sys/dev/mlx5/diagnostics.h (rev 0)
+++ trunk/sys/dev/mlx5/diagnostics.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,139 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/diagnostics.h 322011 2017-08-03 14:20:19Z hselasky $
+ */
+
+#ifndef MLX5_CORE_DIAGNOSTICS_H
+#define MLX5_CORE_DIAGNOSTICS_H
+
+#define MLX5_CORE_DIAGNOSTICS_NUM(n, s, t) n
+#define MLX5_CORE_DIAGNOSTICS_STRUCT(n, s, t) s,
+#define MLX5_CORE_DIAGNOSTICS_ENTRY(n, s, t) { #s, (t) },
+
+struct mlx5_core_diagnostics_entry {
+ const char *desc;
+ u16 counter_id;
+};
+
+#define MLX5_CORE_PCI_DIAGNOSTICS(m) \
+m(+1, pxd_ready_bp, 0x0401) \
+m(+1, pci_write_bp, 0x0402) \
+m(+1, pci_read_bp, 0x0403) \
+m(+1, pci_read_stuck_no_completion_buffer, 0x0404) \
+m(+1, max_pci_bw, 0x0405) \
+m(+1, used_pci_bw, 0x0406) \
+m(+1, rx_pci_errors, 0) \
+m(+1, tx_pci_errors, 0) \
+m(+1, tx_pci_correctable_errors, 0) \
+m(+1, tx_pci_non_fatal_errors, 0) \
+m(+1, tx_pci_fatal_errors, 0)
+
+#define MLX5_CORE_PCI_DIAGNOSTICS_NUM \
+ (0 MLX5_CORE_PCI_DIAGNOSTICS(MLX5_CORE_DIAGNOSTICS_NUM))
+
+union mlx5_core_pci_diagnostics {
+ u64 array[MLX5_CORE_PCI_DIAGNOSTICS_NUM];
+ struct {
+ u64 MLX5_CORE_PCI_DIAGNOSTICS(
+ MLX5_CORE_DIAGNOSTICS_STRUCT) dummy[0];
+ } counter;
+};
+
+extern const struct mlx5_core_diagnostics_entry
+ mlx5_core_pci_diagnostics_table[MLX5_CORE_PCI_DIAGNOSTICS_NUM];
+
+#define MLX5_CORE_GENERAL_DIAGNOSTICS(m) \
+m(+1, l0_mtt_miss, 0x0801) \
+m(+1, l0_mtt_hit, 0x0802) \
+m(+1, l1_mtt_miss, 0x0803) \
+m(+1, l1_mtt_hit, 0x0804) \
+m(+1, l0_mpt_miss, 0x0805) \
+m(+1, l0_mpt_hit, 0x0806) \
+m(+1, l1_mpt_miss, 0x0807) \
+m(+1, l1_mpt_hit, 0x0808) \
+m(+1, rxb_no_slow_path_credits, 0x0c01) \
+m(+1, rxb_no_fast_path_credits, 0x0c02) \
+m(+1, rxb_rxt_no_slow_path_cred_perf_count, 0x0c03) \
+m(+1, rxb_rxt_no_fast_path_cred_perf_count, 0x0c04) \
+m(+1, rxt_ctrl_perf_slice_load_slow, 0x1001) \
+m(+1, rxt_ctrl_perf_slice_load_fast, 0x1002) \
+m(+1, rxt_steering_perf_count_steering0_rse_work_rate, 0x1003) \
+m(+1, rxt_steering_perf_count_steering1_rse_work_rate, 0x1004) \
+m(+1, perf_count_tpt_credit, 0x1401) \
+m(+1, perf_wb_miss, 0x1402) \
+m(+1, perf_wb_hit, 0x1403) \
+m(+1, rxw_perf_rx_l1_slow_miss_ldb, 0x1404) \
+m(+1, rxw_perf_rx_l1_slow_hit_ldb, 0x1405) \
+m(+1, rxw_perf_rx_l1_fast_miss_ldb, 0x1406) \
+m(+1, rxw_perf_rx_l1_fast_hit_ldb, 0x1407) \
+m(+1, rxw_perf_l2_cache_read_miss_ldb, 0x1408) \
+m(+1, rxw_perf_l2_cache_read_hit_ldb, 0x1409) \
+m(+1, rxw_perf_rx_l1_slow_miss_reqsl, 0x140a) \
+m(+1, rxw_perf_rx_l1_slow_hit_reqsl, 0x140b) \
+m(+1, rxw_perf_rx_l1_fast_miss_reqsl, 0x140c) \
+m(+1, rxw_perf_rx_l1_fast_hit_reqsl, 0x140d) \
+m(+1, rxw_perf_l2_cache_read_miss_reqsl, 0x140e) \
+m(+1, rxw_perf_l2_cache_read_hit_reqsl, 0x140f) \
+m(+1, rxs_no_pxt_credits, 0x1801) \
+m(+1, rxc_eq_all_slices_busy, 0x1c01) \
+m(+1, rxc_cq_all_slices_busy, 0x1c02) \
+m(+1, rxc_msix_all_slices_busy, 0x1c03) \
+m(+1, sxw_qp_done_due_to_vl_limited, 0x2001) \
+m(+1, sxw_qp_done_due_to_desched, 0x2002) \
+m(+1, sxw_qp_done_due_to_work_done, 0x2003) \
+m(+1, sxw_qp_done_due_to_limited, 0x2004) \
+m(+1, sxw_qp_done_due_to_e2e_credits, 0x2005) \
+m(+1, sxw_packet_send_sxw2sxp_go_vld, 0x2006) \
+m(+1, sxw_perf_count_steering_hit, 0x2007) \
+m(+1, sxw_perf_count_steering_miss, 0x2008) \
+m(+1, sxw_perf_count_steering_rse_0, 0x2009) \
+m(+1, sxd_no_sched_credits, 0x2401) \
+m(+1, sxd_no_slow_path_sched_credits, 0x2402) \
+m(+1, tpt_indirect_mem_key, 0x2801)
+
+#define MLX5_CORE_GENERAL_DIAGNOSTICS_NUM \
+ (0 MLX5_CORE_GENERAL_DIAGNOSTICS(MLX5_CORE_DIAGNOSTICS_NUM))
+
+union mlx5_core_general_diagnostics {
+ u64 array[MLX5_CORE_GENERAL_DIAGNOSTICS_NUM];
+ struct {
+ u64 MLX5_CORE_GENERAL_DIAGNOSTICS(
+ MLX5_CORE_DIAGNOSTICS_STRUCT) dummy[0];
+ } counter;
+};
+
+extern const struct mlx5_core_diagnostics_entry
+ mlx5_core_general_diagnostics_table[MLX5_CORE_GENERAL_DIAGNOSTICS_NUM];
+
+/* function prototypes */
+int mlx5_core_set_diagnostics_full(struct mlx5_core_dev *mdev,
+ u8 enable_pci, u8 enable_general);
+int mlx5_core_get_diagnostics_full(struct mlx5_core_dev *mdev,
+ union mlx5_core_pci_diagnostics *ppci,
+ union mlx5_core_general_diagnostics *pgen);
+int mlx5_core_supports_diagnostics(struct mlx5_core_dev *mdev, u16 counter_id);
+
+#endif /* MLX5_CORE_DIAGNOSTICS_H */
Property changes on: trunk/sys/dev/mlx5/diagnostics.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/doorbell.h
===================================================================
--- trunk/sys/dev/mlx5/doorbell.h (rev 0)
+++ trunk/sys/dev/mlx5/doorbell.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,75 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/doorbell.h 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#ifndef MLX5_DOORBELL_H
+#define MLX5_DOORBELL_H
+
+#define MLX5_BF_OFFSET 0x800
+#define MLX5_CQ_DOORBELL 0x20
+
+#if BITS_PER_LONG == 64
+/* Assume that we can just write a 64-bit doorbell atomically. s390
+ * actually doesn't have writeq() but S/390 systems don't even have
+ * PCI so we won't worry about it.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name)
+#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
+#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ __raw_writeq(*(u64 *)val, dest);
+}
+
+#else
+
+/* Just fall back to a spinlock to protect the doorbell if
+ * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
+ * MMIO writes.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
+#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
+#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+ spinlock_t *doorbell_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(doorbell_lock, flags);
+ __raw_writel((__force u32) val[0], dest);
+ __raw_writel((__force u32) val[1], dest + 4);
+ spin_unlock_irqrestore(doorbell_lock, flags);
+}
+
+#endif
+
+#endif /* MLX5_DOORBELL_H */
Property changes on: trunk/sys/dev/mlx5/doorbell.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/driver.h
===================================================================
--- trunk/sys/dev/mlx5/driver.h (rev 0)
+++ trunk/sys/dev/mlx5/driver.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1047 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/driver.h 322151 2017-08-07 12:49:30Z hselasky $
+ */
+
+#ifndef MLX5_DRIVER_H
+#define MLX5_DRIVER_H
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/rbtree.h>
+#include <linux/if_ether.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/radix-tree.h>
+
+#include <dev/mlx5/device.h>
+#include <dev/mlx5/doorbell.h>
+
+#define MLX5_QCOUNTER_SETS_NETDEV 64
+#define MLX5_MAX_NUMBER_OF_VFS 128
+
+enum {
+ MLX5_BOARD_ID_LEN = 64,
+ MLX5_MAX_NAME_LEN = 16,
+};
+
+enum {
+ MLX5_CMD_TIMEOUT_MSEC = 8 * 60 * 1000,
+ MLX5_CMD_WQ_MAX_NAME = 32,
+};
+
+enum {
+ CMD_OWNER_SW = 0x0,
+ CMD_OWNER_HW = 0x1,
+ CMD_STATUS_SUCCESS = 0,
+};
+
+enum mlx5_sqp_t {
+ MLX5_SQP_SMI = 0,
+ MLX5_SQP_GSI = 1,
+ MLX5_SQP_IEEE_1588 = 2,
+ MLX5_SQP_SNIFFER = 3,
+ MLX5_SQP_SYNC_UMR = 4,
+};
+
+enum {
+ MLX5_MAX_PORTS = 2,
+};
+
+enum {
+ MLX5_EQ_VEC_PAGES = 0,
+ MLX5_EQ_VEC_CMD = 1,
+ MLX5_EQ_VEC_ASYNC = 2,
+ MLX5_EQ_VEC_COMP_BASE,
+};
+
+enum {
+ MLX5_MAX_IRQ_NAME = 32
+};
+
+enum {
+ MLX5_ATOMIC_MODE_OFF = 16,
+ MLX5_ATOMIC_MODE_NONE = 0 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_IB_COMP = 1 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_CX = 2 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_8B = 3 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_16B = 4 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_32B = 5 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_64B = 6 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_128B = 7 << MLX5_ATOMIC_MODE_OFF,
+ MLX5_ATOMIC_MODE_256B = 8 << MLX5_ATOMIC_MODE_OFF,
+};
+
+enum {
+ MLX5_ATOMIC_MODE_DCT_OFF = 20,
+ MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_8B = 3 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_16B = 4 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_32B = 5 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_64B = 6 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_128B = 7 << MLX5_ATOMIC_MODE_DCT_OFF,
+ MLX5_ATOMIC_MODE_DCT_256B = 8 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
+enum {
+ MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
+ MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_MASKED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_MASKED_FETCH_ADD = 1 << 3,
+};
+
+enum {
+ MLX5_REG_QETCR = 0x4005,
+ MLX5_REG_QPDP = 0x4007,
+ MLX5_REG_QTCT = 0x400A,
+ MLX5_REG_QHLL = 0x4016,
+ MLX5_REG_DCBX_PARAM = 0x4020,
+ MLX5_REG_DCBX_APP = 0x4021,
+ MLX5_REG_PCAP = 0x5001,
+ MLX5_REG_PMTU = 0x5003,
+ MLX5_REG_PTYS = 0x5004,
+ MLX5_REG_PAOS = 0x5006,
+ MLX5_REG_PFCC = 0x5007,
+ MLX5_REG_PPCNT = 0x5008,
+ MLX5_REG_PMAOS = 0x5012,
+ MLX5_REG_PUDE = 0x5009,
+ MLX5_REG_PPTB = 0x500B,
+ MLX5_REG_PBMC = 0x500C,
+ MLX5_REG_PMPE = 0x5010,
+ MLX5_REG_PELC = 0x500e,
+ MLX5_REG_PVLC = 0x500f,
+ MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_NODE_DESC = 0x6001,
+ MLX5_REG_HOST_ENDIANNESS = 0x7004,
+ MLX5_REG_MCIA = 0x9014,
+ MLX5_REG_MPCNT = 0x9051,
+};
+
+enum dbg_rsc_type {
+ MLX5_DBG_RSC_QP,
+ MLX5_DBG_RSC_EQ,
+ MLX5_DBG_RSC_CQ,
+};
+
+enum {
+ MLX5_INTERFACE_PROTOCOL_IB = 0,
+ MLX5_INTERFACE_PROTOCOL_ETH = 1,
+ MLX5_INTERFACE_NUMBER = 2,
+};
+
+struct mlx5_field_desc {
+ struct dentry *dent;
+ int i;
+};
+
+struct mlx5_rsc_debug {
+ struct mlx5_core_dev *dev;
+ void *object;
+ enum dbg_rsc_type type;
+ struct dentry *root;
+ struct mlx5_field_desc fields[0];
+};
+
+enum mlx5_dev_event {
+ MLX5_DEV_EVENT_SYS_ERROR,
+ MLX5_DEV_EVENT_PORT_UP,
+ MLX5_DEV_EVENT_PORT_DOWN,
+ MLX5_DEV_EVENT_PORT_INITIALIZED,
+ MLX5_DEV_EVENT_LID_CHANGE,
+ MLX5_DEV_EVENT_PKEY_CHANGE,
+ MLX5_DEV_EVENT_GUID_CHANGE,
+ MLX5_DEV_EVENT_CLIENT_REREG,
+ MLX5_DEV_EVENT_VPORT_CHANGE,
+ MLX5_DEV_EVENT_ERROR_STATE_DCBX,
+ MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE,
+ MLX5_DEV_EVENT_LOCAL_OPER_CHANGE,
+ MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE,
+};
+
+enum mlx5_port_status {
+ MLX5_PORT_UP = 1 << 0,
+ MLX5_PORT_DOWN = 1 << 1,
+};
+
+enum mlx5_link_mode {
+ MLX5_1000BASE_CX_SGMII = 0,
+ MLX5_1000BASE_KX = 1,
+ MLX5_10GBASE_CX4 = 2,
+ MLX5_10GBASE_KX4 = 3,
+ MLX5_10GBASE_KR = 4,
+ MLX5_20GBASE_KR2 = 5,
+ MLX5_40GBASE_CR4 = 6,
+ MLX5_40GBASE_KR4 = 7,
+ MLX5_56GBASE_R4 = 8,
+ MLX5_10GBASE_CR = 12,
+ MLX5_10GBASE_SR = 13,
+ MLX5_10GBASE_ER = 14,
+ MLX5_40GBASE_SR4 = 15,
+ MLX5_40GBASE_LR4 = 16,
+ MLX5_100GBASE_CR4 = 20,
+ MLX5_100GBASE_SR4 = 21,
+ MLX5_100GBASE_KR4 = 22,
+ MLX5_100GBASE_LR4 = 23,
+ MLX5_100BASE_TX = 24,
+ MLX5_1000BASE_T = 25,
+ MLX5_10GBASE_T = 26,
+ MLX5_25GBASE_CR = 27,
+ MLX5_25GBASE_KR = 28,
+ MLX5_25GBASE_SR = 29,
+ MLX5_50GBASE_CR2 = 30,
+ MLX5_50GBASE_KR2 = 31,
+ MLX5_LINK_MODES_NUMBER,
+};
+
+#define MLX5_PROT_MASK(link_mode) (1 << link_mode)
+
+struct mlx5_uuar_info {
+ struct mlx5_uar *uars;
+ int num_uars;
+ int num_low_latency_uuars;
+ unsigned long *bitmap;
+ unsigned int *count;
+ struct mlx5_bf *bfs;
+
+ /*
+ * protect uuar allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+};
+
+struct mlx5_bf {
+ void __iomem *reg;
+ void __iomem *regreg;
+ int buf_size;
+ struct mlx5_uar *uar;
+ unsigned long offset;
+ int need_lock;
+ /* protect blue flame buffer selection when needed
+ */
+ spinlock_t lock;
+
+ /* serialize 64 bit writes when done as two 32 bit accesses
+ */
+ spinlock_t lock32;
+ int uuarn;
+};
+
+struct mlx5_cmd_first {
+ __be32 data[4];
+};
+
+struct cache_ent;
+struct mlx5_fw_page {
+ union {
+ struct rb_node rb_node;
+ struct list_head list;
+ };
+ struct mlx5_cmd_first first;
+ struct mlx5_core_dev *dev;
+ bus_dmamap_t dma_map;
+ bus_addr_t dma_addr;
+ void *virt_addr;
+ struct cache_ent *cache;
+ u32 numpages;
+ u16 load_done;
+#define MLX5_LOAD_ST_NONE 0
+#define MLX5_LOAD_ST_SUCCESS 1
+#define MLX5_LOAD_ST_FAILURE 2
+ u16 func_id;
+};
+#define mlx5_cmd_msg mlx5_fw_page
+
+struct mlx5_cmd_debug {
+ struct dentry *dbg_root;
+ struct dentry *dbg_in;
+ struct dentry *dbg_out;
+ struct dentry *dbg_outlen;
+ struct dentry *dbg_status;
+ struct dentry *dbg_run;
+ void *in_msg;
+ void *out_msg;
+ u8 status;
+ u16 inlen;
+ u16 outlen;
+};
+
+struct cache_ent {
+ /* protect block chain allocations
+ */
+ spinlock_t lock;
+ struct list_head head;
+};
+
+struct cmd_msg_cache {
+ struct cache_ent large;
+ struct cache_ent med;
+
+};
+
+struct mlx5_cmd_stats {
+ u64 sum;
+ u64 n;
+ struct dentry *root;
+ struct dentry *avg;
+ struct dentry *count;
+ /* protect command average calculations */
+ spinlock_t lock;
+};
+
+struct mlx5_cmd {
+ struct mlx5_fw_page *cmd_page;
+ bus_dma_tag_t dma_tag;
+ struct sx dma_sx;
+ struct mtx dma_mtx;
+#define MLX5_DMA_OWNED(dev) mtx_owned(&(dev)->cmd.dma_mtx)
+#define MLX5_DMA_LOCK(dev) mtx_lock(&(dev)->cmd.dma_mtx)
+#define MLX5_DMA_UNLOCK(dev) mtx_unlock(&(dev)->cmd.dma_mtx)
+ struct cv dma_cv;
+#define MLX5_DMA_DONE(dev) cv_broadcast(&(dev)->cmd.dma_cv)
+#define MLX5_DMA_WAIT(dev) cv_wait(&(dev)->cmd.dma_cv, &(dev)->cmd.dma_mtx)
+ void *cmd_buf;
+ dma_addr_t dma;
+ u16 cmdif_rev;
+ u8 log_sz;
+ u8 log_stride;
+ int max_reg_cmds;
+ int events;
+ u32 __iomem *vector;
+
+ /* protect command queue allocations
+ */
+ spinlock_t alloc_lock;
+
+ /* protect token allocations
+ */
+ spinlock_t token_lock;
+ u8 token;
+ unsigned long bitmask;
+ char wq_name[MLX5_CMD_WQ_MAX_NAME];
+ struct workqueue_struct *wq;
+ struct semaphore sem;
+ struct semaphore pages_sem;
+ int mode;
+ struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
+ struct mlx5_cmd_debug dbg;
+ struct cmd_msg_cache cache;
+ int checksum_disabled;
+ struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
+ int moving_to_polling;
+};
+
+struct mlx5_port_caps {
+ int gid_table_len;
+ int pkey_table_len;
+ u8 ext_port_cap;
+};
+
+struct mlx5_buf {
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ struct mlx5_core_dev *dev;
+ struct {
+ void *buf;
+ } direct;
+ u64 *page_list;
+ int npages;
+ int size;
+ u8 page_shift;
+ u8 load_done;
+};
+
+struct mlx5_eq {
+ struct mlx5_core_dev *dev;
+ __be32 __iomem *doorbell;
+ u32 cons_index;
+ struct mlx5_buf buf;
+ int size;
+ u8 irqn;
+ u8 eqn;
+ int nent;
+ u64 mask;
+ struct list_head list;
+ int index;
+ struct mlx5_rsc_debug *dbg;
+};
+
+struct mlx5_core_psv {
+ u32 psv_idx;
+ struct psv_layout {
+ u32 pd;
+ u16 syndrome;
+ u16 reserved;
+ u16 bg;
+ u16 app_tag;
+ u32 ref_tag;
+ } psv;
+};
+
+struct mlx5_core_sig_ctx {
+ struct mlx5_core_psv psv_memory;
+ struct mlx5_core_psv psv_wire;
+#if (__FreeBSD_version >= 1100000)
+ struct ib_sig_err err_item;
+#endif
+ bool sig_status_checked;
+ bool sig_err_exists;
+ u32 sigerr_count;
+};
+
+struct mlx5_core_mr {
+ u64 iova;
+ u64 size;
+ u32 key;
+ u32 pd;
+};
+
+enum mlx5_res_type {
+ MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
+ MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
+ MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
+ MLX5_RES_SRQ = 3,
+ MLX5_RES_XSRQ = 4,
+ MLX5_RES_DCT = 5,
+};
+
+struct mlx5_core_rsc_common {
+ enum mlx5_res_type res;
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
+ u32 srqn;
+ int max;
+ int max_gs;
+ int max_avail_gather;
+ int wqe_shift;
+ void (*event)(struct mlx5_core_srq *, int);
+ atomic_t refcount;
+ struct completion free;
+};
+
+struct mlx5_eq_table {
+ void __iomem *update_ci;
+ void __iomem *update_arm_ci;
+ struct list_head comp_eqs_list;
+ struct mlx5_eq pages_eq;
+ struct mlx5_eq async_eq;
+ struct mlx5_eq cmd_eq;
+ int num_comp_vectors;
+ /* protect EQs list
+ */
+ spinlock_t lock;
+};
+
+struct mlx5_uar {
+ u32 index;
+ void __iomem *bf_map;
+ void __iomem *map;
+};
+
+
+struct mlx5_core_health {
+ struct mlx5_health_buffer __iomem *health;
+ __be32 __iomem *health_counter;
+ struct timer_list timer;
+ struct list_head list;
+ u32 prev;
+ int miss_counter;
+};
+
+#define MLX5_CQ_LINEAR_ARRAY_SIZE 1024
+
+struct mlx5_cq_linear_array_entry {
+ spinlock_t lock;
+ struct mlx5_core_cq * volatile cq;
+};
+
+struct mlx5_cq_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+ struct mlx5_cq_linear_array_entry linear_array[MLX5_CQ_LINEAR_ARRAY_SIZE];
+};
+
+struct mlx5_qp_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_srq_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_mr_table {
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+struct mlx5_irq_info {
+ char name[MLX5_MAX_IRQ_NAME];
+};
+
+struct mlx5_priv {
+ char name[MLX5_MAX_NAME_LEN];
+ struct mlx5_eq_table eq_table;
+ struct msix_entry *msix_arr;
+ struct mlx5_irq_info *irq_info;
+ struct mlx5_uuar_info uuari;
+ MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
+
+ struct io_mapping *bf_mapping;
+
+ /* pages stuff */
+ struct workqueue_struct *pg_wq;
+ struct rb_root page_root;
+ s64 fw_pages;
+ atomic_t reg_pages;
+ s64 pages_per_func[MLX5_MAX_NUMBER_OF_VFS];
+ struct mlx5_core_health health;
+
+ struct mlx5_srq_table srq_table;
+
+ /* start: qp staff */
+ struct mlx5_qp_table qp_table;
+ struct dentry *qp_debugfs;
+ struct dentry *eq_debugfs;
+ struct dentry *cq_debugfs;
+ struct dentry *cmdif_debugfs;
+ /* end: qp staff */
+
+ /* start: cq staff */
+ struct mlx5_cq_table cq_table;
+ /* end: cq staff */
+
+ /* start: mr staff */
+ struct mlx5_mr_table mr_table;
+ /* end: mr staff */
+
+ /* start: alloc staff */
+ int numa_node;
+
+ struct mutex pgdir_mutex;
+ struct list_head pgdir_list;
+ /* end: alloc staff */
+ struct dentry *dbg_root;
+
+ /* protect mkey key part */
+ spinlock_t mkey_lock;
+ u8 mkey_key;
+
+ struct list_head dev_list;
+ struct list_head ctx_list;
+ spinlock_t ctx_lock;
+ unsigned long pci_dev_data;
+};
+
+enum mlx5_device_state {
+ MLX5_DEVICE_STATE_UP,
+ MLX5_DEVICE_STATE_INTERNAL_ERROR,
+};
+
+struct mlx5_special_contexts {
+ int resd_lkey;
+};
+
+struct mlx5_core_dev {
+ struct pci_dev *pdev;
+ char board_id[MLX5_BOARD_ID_LEN];
+ struct mlx5_cmd cmd;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct mlx5_init_seg __iomem *iseg;
+ enum mlx5_device_state state;
+ void (*event) (struct mlx5_core_dev *dev,
+ enum mlx5_dev_event event,
+ unsigned long param);
+ struct mlx5_priv priv;
+ struct mlx5_profile *profile;
+ atomic_t num_qps;
+ u32 issi;
+ struct mlx5_special_contexts special_contexts;
+ unsigned int module_status[MLX5_MAX_PORTS];
+ u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
+};
+
+enum {
+ MLX5_WOL_DISABLE = 0,
+ MLX5_WOL_SECURED_MAGIC = 1 << 1,
+ MLX5_WOL_MAGIC = 1 << 2,
+ MLX5_WOL_ARP = 1 << 3,
+ MLX5_WOL_BROADCAST = 1 << 4,
+ MLX5_WOL_MULTICAST = 1 << 5,
+ MLX5_WOL_UNICAST = 1 << 6,
+ MLX5_WOL_PHY_ACTIVITY = 1 << 7,
+};
+
+struct mlx5_db {
+ __be32 *db;
+ union {
+ struct mlx5_db_pgdir *pgdir;
+ struct mlx5_ib_user_db_page *user_page;
+ } u;
+ dma_addr_t dma;
+ int index;
+};
+
+struct mlx5_net_counters {
+ u64 packets;
+ u64 octets;
+};
+
+struct mlx5_ptys_reg {
+ u8 an_dis_admin;
+ u8 an_dis_ap;
+ u8 local_port;
+ u8 proto_mask;
+ u32 eth_proto_cap;
+ u16 ib_link_width_cap;
+ u16 ib_proto_cap;
+ u32 eth_proto_admin;
+ u16 ib_link_width_admin;
+ u16 ib_proto_admin;
+ u32 eth_proto_oper;
+ u16 ib_link_width_oper;
+ u16 ib_proto_oper;
+ u32 eth_proto_lp_advertise;
+};
+
+struct mlx5_pvlc_reg {
+ u8 local_port;
+ u8 vl_hw_cap;
+ u8 vl_admin;
+ u8 vl_operational;
+};
+
+struct mlx5_pmtu_reg {
+ u8 local_port;
+ u16 max_mtu;
+ u16 admin_mtu;
+ u16 oper_mtu;
+};
+
+struct mlx5_vport_counters {
+ struct mlx5_net_counters received_errors;
+ struct mlx5_net_counters transmit_errors;
+ struct mlx5_net_counters received_ib_unicast;
+ struct mlx5_net_counters transmitted_ib_unicast;
+ struct mlx5_net_counters received_ib_multicast;
+ struct mlx5_net_counters transmitted_ib_multicast;
+ struct mlx5_net_counters received_eth_broadcast;
+ struct mlx5_net_counters transmitted_eth_broadcast;
+ struct mlx5_net_counters received_eth_unicast;
+ struct mlx5_net_counters transmitted_eth_unicast;
+ struct mlx5_net_counters received_eth_multicast;
+ struct mlx5_net_counters transmitted_eth_multicast;
+};
+
+enum {
+ MLX5_DB_PER_PAGE = MLX5_ADAPTER_PAGE_SIZE / L1_CACHE_BYTES,
+};
+
+struct mlx5_core_dct {
+ struct mlx5_core_rsc_common common; /* must be first */
+ void (*event)(struct mlx5_core_dct *, int);
+ int dctn;
+ struct completion drained;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+};
+
+enum {
+ MLX5_COMP_EQ_SIZE = 1024,
+};
+
+enum {
+ MLX5_PTYS_IB = 1 << 0,
+ MLX5_PTYS_EN = 1 << 2,
+};
+
+struct mlx5_db_pgdir {
+ struct list_head list;
+ DECLARE_BITMAP(bitmap, MLX5_DB_PER_PAGE);
+ struct mlx5_fw_page *fw_page;
+ __be32 *db_page;
+ dma_addr_t db_dma;
+};
+
+typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
+
+struct mlx5_cmd_work_ent {
+ struct mlx5_cmd_msg *in;
+ struct mlx5_cmd_msg *out;
+ int uin_size;
+ void *uout;
+ int uout_size;
+ mlx5_cmd_cbk_t callback;
+ void *context;
+ int idx;
+ struct completion done;
+ struct mlx5_cmd *cmd;
+ struct work_struct work;
+ struct mlx5_cmd_layout *lay;
+ int ret;
+ int page_queue;
+ u8 status;
+ u8 token;
+ u64 ts1;
+ u64 ts2;
+ u16 op;
+ u8 busy;
+};
+
+struct mlx5_pas {
+ u64 pa;
+ u8 log_sz;
+};
+
+static inline void *
+mlx5_buf_offset(struct mlx5_buf *buf, int offset)
+{
+ return ((char *)buf->direct.buf + offset);
+}
+
+
+extern struct workqueue_struct *mlx5_core_wq;
+
+#define STRUCT_FIELD(header, field) \
+ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
+ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
+
+static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+extern struct dentry *mlx5_debugfs_root;
+
+static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->fw_rev) & 0xffff;
+}
+
+static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->fw_rev) >> 16;
+}
+
+static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
+}
+
+static inline u16 cmdif_rev_get(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+}
+
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+ if (param > 4) {
+ printf("M4_CORE_DRV_NAME: WARN: ""gid table length is zero\n");
+ return 0;
+ }
+
+ return 8 * (1 << param);
+}
+
+static inline void *mlx5_vzalloc(unsigned long size)
+{
+ void *rtn;
+
+ rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ return rtn;
+}
+
+static inline void *mlx5_vmalloc(unsigned long size)
+{
+ void *rtn;
+
+ rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!rtn)
+ rtn = vmalloc(size);
+ return rtn;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+int mlx5_cmd_init(struct mlx5_core_dev *dev);
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
+int mlx5_cmd_status_to_err_v2(void *ptr);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode);
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context);
+int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
+int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
+int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
+void mlx5_health_cleanup(void);
+void __init mlx5_health_init(void);
+void mlx5_start_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+
+#define mlx5_buf_alloc_node(dev, size, direct, buf, node) \
+ mlx5_buf_alloc(dev, size, direct, buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
+ struct mlx5_buf *buf);
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc);
+int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq);
+void mlx5_init_mr_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_query_mkey_mbox_out *out, int outlen);
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ u32 *mkey);
+int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
+int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+ u16 opmod, u8 port);
+void mlx5_fwp_flush(struct mlx5_fw_page *fwp);
+void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp);
+struct mlx5_fw_page *mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num);
+void mlx5_fwp_free(struct mlx5_fw_page *fwp);
+u64 mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset);
+void *mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset);
+void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
+ s32 npages);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
+s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev);
+void mlx5_register_debugfs(void);
+void mlx5_unregister_debugfs(void);
+int mlx5_eq_init(struct mlx5_core_dev *dev);
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
+void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
+struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector);
+void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev);
+void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+ int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_start_eqs(struct mlx5_core_dev *dev);
+int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
+int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
+ u64 addr);
+
+int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_num, int arg, int write);
+
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask);
+int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_disable_cap, u8 *an_disable_status);
+int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
+ u32 eth_proto_admin, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask);
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask);
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status);
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status);
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
+ u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
+ u32 *rx_pause, u32 *tx_pause);
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu);
+
+unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num);
+int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num);
+int mlx5_query_eeprom(struct mlx5_core_dev *dev, int i2c_addr, int page_num,
+ int device_addr, int size, int module_num, u32 *data,
+ int *size_read);
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct mlx5_query_eq_mbox_out *out, int outlen);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+ int node);
+void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
+
+const char *mlx5_command_str(int command);
+int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+ int npsvs, u32 *sig_index);
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev);
+int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode);
+int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode);
+int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
+ struct mlx5_pvlc_reg *pvlc, int write);
+int mlx5_core_access_ptys(struct mlx5_core_dev *dev,
+ struct mlx5_ptys_reg *ptys, int write);
+int mlx5_core_access_pmtu(struct mlx5_core_dev *dev,
+ struct mlx5_pmtu_reg *pmtu, int write);
+int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port);
+int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port);
+int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
+ int priority, int *is_enable);
+int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
+ int priority, int enable);
+int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol,
+ void *out, int out_size);
+int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
+ void *in, int in_size);
+int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
+ void *out, int out_size);
+int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in,
+ int in_size);
+int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev,
+ u8 num_of_samples, u16 sample_index,
+ void *out, int out_size);
+static inline u32 mlx5_mkey_to_idx(u32 mkey)
+{
+ return mkey >> 8;
+}
+
+static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
+{
+ return mkey_idx << 8;
+}
+
+static inline u8 mlx5_mkey_variant(u32 mkey)
+{
+ return mkey & 0xff;
+}
+
+enum {
+ MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
+ MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
+};
+
+enum {
+ MAX_MR_CACHE_ENTRIES = 15,
+};
+
+struct mlx5_interface {
+ void * (*add)(struct mlx5_core_dev *dev);
+ void (*remove)(struct mlx5_core_dev *dev, void *context);
+ void (*event)(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param);
+ void * (*get_dev)(void *context);
+ int protocol;
+ struct list_head list;
+};
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
+int mlx5_register_interface(struct mlx5_interface *intf);
+void mlx5_unregister_interface(struct mlx5_interface *intf);
+
+struct mlx5_profile {
+ u64 mask;
+ u8 log_max_qp;
+ struct {
+ int size;
+ int limit;
+ } mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
+enum {
+ MLX5_PCI_DEV_IS_VF = 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+ return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
+
+#define MLX5_EEPROM_MAX_BYTES 32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff
+#define MLX5_EEPROM_REVISION_ID_BYTE_MASK 0x0000ff00
+#define MLX5_EEPROM_PAGE_3_VALID_BIT_MASK 0x00040000
+#endif /* MLX5_DRIVER_H */
Property changes on: trunk/sys/dev/mlx5/driver.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/eswitch_vacl.h
===================================================================
--- trunk/sys/dev/mlx5/eswitch_vacl.h (rev 0)
+++ trunk/sys/dev/mlx5/eswitch_vacl.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,47 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/eswitch_vacl.h 292196 2015-12-14 10:31:03Z hselasky $
+ */
+
+#ifndef MLX5_ESWITCH_VACL_TABLE_H
+#define MLX5_ESWITCH_VACL_TABLE_H
+
+#include <dev/mlx5/driver.h>
+
+void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
+ u16 vport, bool is_egress);
+void mlx5_vacl_table_cleanup(void *acl_t);
+int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan);
+void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan);
+int mlx5_vacl_table_enable_vlan_filter(void *acl_t);
+void mlx5_vacl_table_disable_vlan_filter(void *acl_t);
+int mlx5_vacl_table_drop_untagged(void *acl_t);
+int mlx5_vacl_table_allow_untagged(void *acl_t);
+int mlx5_vacl_table_drop_unknown_vlan(void *acl_t);
+int mlx5_vacl_table_allow_unknown_vlan(void *acl_t);
+int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac);
+
+#endif /* MLX5_ESWITCH_VACL_TABLE_H */
Property changes on: trunk/sys/dev/mlx5/eswitch_vacl.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/flow_table.h
===================================================================
--- trunk/sys/dev/mlx5/flow_table.h (rev 0)
+++ trunk/sys/dev/mlx5/flow_table.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,57 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/flow_table.h 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#ifndef MLX5_FLOW_TABLE_H
+#define MLX5_FLOW_TABLE_H
+
+#include <dev/mlx5/driver.h>
+
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET 0x0
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_RESET 0x1
+
+struct mlx5_flow_table_group {
+ u8 log_sz;
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 vport,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group);
+void mlx5_destroy_flow_table(void *flow_table);
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index);
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+u32 mlx5_get_flow_table_id(void *flow_table);
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+ u8 vport_num, u8 table_type, u32 table_id,
+ u32 underlay_qpn);
+
+#endif /* MLX5_FLOW_TABLE_H */
Property changes on: trunk/sys/dev/mlx5/flow_table.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_alloc.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_alloc.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_alloc.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,262 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_alloc.c 322151 2017-08-07 12:49:30Z hselasky $
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <dev/mlx5/driver.h>
+
+#include "mlx5_core.h"
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+static void
+mlx5_buf_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mlx5_buf *buf;
+ uint8_t owned;
+ int x;
+
+ buf = (struct mlx5_buf *)arg;
+ owned = MLX5_DMA_OWNED(buf->dev);
+
+ if (!owned)
+ MLX5_DMA_LOCK(buf->dev);
+
+ if (error == 0) {
+ for (x = 0; x != nseg; x++) {
+ buf->page_list[x] = segs[x].ds_addr;
+ KASSERT(segs[x].ds_len == PAGE_SIZE, ("Invalid segment size"));
+ }
+ buf->load_done = MLX5_LOAD_ST_SUCCESS;
+ } else {
+ buf->load_done = MLX5_LOAD_ST_FAILURE;
+ }
+ MLX5_DMA_DONE(buf->dev);
+
+ if (!owned)
+ MLX5_DMA_UNLOCK(buf->dev);
+}
+
+int
+mlx5_buf_alloc(struct mlx5_core_dev *dev, int size,
+ int max_direct, struct mlx5_buf *buf)
+{
+ int err;
+
+ buf->npages = howmany(size, PAGE_SIZE);
+ buf->page_shift = PAGE_SHIFT;
+ buf->load_done = MLX5_LOAD_ST_NONE;
+ buf->dev = dev;
+ buf->page_list = kcalloc(buf->npages, sizeof(*buf->page_list),
+ GFP_KERNEL);
+
+ err = -bus_dma_tag_create(
+ bus_get_dma_tag(dev->pdev->dev.bsddev),
+ PAGE_SIZE, /* alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ PAGE_SIZE * buf->npages, /* maxsize */
+ buf->npages, /* nsegments */
+ PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &buf->dma_tag);
+
+ if (err != 0)
+ goto err_dma_tag;
+
+ /* allocate memory */
+ err = -bus_dmamem_alloc(buf->dma_tag, &buf->direct.buf,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &buf->dma_map);
+ if (err != 0)
+ goto err_dma_alloc;
+
+ /* load memory into DMA */
+ MLX5_DMA_LOCK(dev);
+ err = bus_dmamap_load(
+ buf->dma_tag, buf->dma_map, buf->direct.buf,
+ PAGE_SIZE * buf->npages, &mlx5_buf_load_mem_cb,
+ buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
+
+ while (buf->load_done == MLX5_LOAD_ST_NONE)
+ MLX5_DMA_WAIT(dev);
+ MLX5_DMA_UNLOCK(dev);
+
+ /* check for error */
+ if (buf->load_done != MLX5_LOAD_ST_SUCCESS) {
+ err = -ENOMEM;
+ goto err_dma_load;
+ }
+
+ /* clean memory */
+ memset(buf->direct.buf, 0, PAGE_SIZE * buf->npages);
+
+ /* flush memory to RAM */
+ bus_dmamap_sync(buf->dev->cmd.dma_tag, buf->dma_map, BUS_DMASYNC_PREWRITE);
+ return (0);
+
+err_dma_load:
+ bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
+err_dma_alloc:
+ bus_dma_tag_destroy(buf->dma_tag);
+err_dma_tag:
+ kfree(buf->page_list);
+ return (err);
+}
+
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
+{
+
+ bus_dmamap_unload(buf->dma_tag, buf->dma_map);
+ bus_dmamem_free(buf->dma_tag, buf->direct.buf, buf->dma_map);
+ bus_dma_tag_destroy(buf->dma_tag);
+ kfree(buf->page_list);
+}
+EXPORT_SYMBOL_GPL(mlx5_buf_free);
+
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+ int node)
+{
+ struct mlx5_db_pgdir *pgdir;
+
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+
+ bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
+
+ pgdir->fw_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
+ if (pgdir->fw_page != NULL) {
+ pgdir->db_page = pgdir->fw_page->virt_addr;
+ pgdir->db_dma = pgdir->fw_page->dma_addr;
+
+ /* clean allocated memory */
+ memset(pgdir->db_page, 0, MLX5_ADAPTER_PAGE_SIZE);
+
+ /* flush memory to RAM */
+ mlx5_fwp_flush(pgdir->fw_page);
+ }
+ if (!pgdir->db_page) {
+ kfree(pgdir);
+ return NULL;
+ }
+
+ return pgdir;
+}
+
+static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
+ struct mlx5_db *db)
+{
+ int offset;
+ int i;
+
+ i = find_first_bit(pgdir->bitmap, MLX5_DB_PER_PAGE);
+ if (i >= MLX5_DB_PER_PAGE)
+ return -ENOMEM;
+
+ __clear_bit(i, pgdir->bitmap);
+
+ db->u.pgdir = pgdir;
+ db->index = i;
+ offset = db->index * L1_CACHE_BYTES;
+ db->db = pgdir->db_page + offset / sizeof(*pgdir->db_page);
+ db->dma = pgdir->db_dma + offset;
+
+ db->db[0] = 0;
+ db->db[1] = 0;
+
+ return 0;
+}
+
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
+{
+ struct mlx5_db_pgdir *pgdir;
+ int ret = 0;
+
+ mutex_lock(&dev->priv.pgdir_mutex);
+
+ list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
+ if (!mlx5_alloc_db_from_pgdir(pgdir, db))
+ goto out;
+
+ pgdir = mlx5_alloc_db_pgdir(dev, node);
+ if (!pgdir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ list_add(&pgdir->list, &dev->priv.pgdir_list);
+
+ /* This should never fail -- we just allocated an empty page: */
+ WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
+
+out:
+ mutex_unlock(&dev->priv.pgdir_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc);
+
+void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+ mutex_lock(&dev->priv.pgdir_mutex);
+
+ __set_bit(db->index, db->u.pgdir->bitmap);
+
+ if (bitmap_full(db->u.pgdir->bitmap, MLX5_DB_PER_PAGE)) {
+ mlx5_fwp_free(db->u.pgdir->fw_page);
+ list_del(&db->u.pgdir->list);
+ kfree(db->u.pgdir);
+ }
+
+ mutex_unlock(&dev->priv.pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx5_db_free);
+
+void
+mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
+{
+ int i;
+
+ for (i = 0; i != buf->npages; i++)
+ pas[i] = cpu_to_be64(buf->page_list[i]);
+}
+EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_alloc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_cmd.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_cmd.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1816 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_cmd.c 322151 2017-08-07 12:49:30Z hselasky $
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/io-mapping.h>
+#include <linux/hardirq.h>
+#include <linux/ktime.h>
+#include <dev/mlx5/driver.h>
+
+#include "mlx5_core.h"
+
+static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_msg *msg);
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+
+enum {
+ CMD_IF_REV = 5,
+};
+
+enum {
+ CMD_MODE_POLLING,
+ CMD_MODE_EVENTS
+};
+
+enum {
+ NUM_LONG_LISTS = 2,
+ NUM_MED_LISTS = 64,
+ LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
+ MLX5_CMD_DATA_BLOCK_SIZE,
+ MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
+};
+
+enum {
+ MLX5_CMD_DELIVERY_STAT_OK = 0x0,
+ MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
+ MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
+ MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
+ MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
+ MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
+ MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
+ MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
+ MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
+ MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
+ MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
+};
+
+static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
+ struct mlx5_cmd_msg *in,
+ int uin_size,
+ struct mlx5_cmd_msg *out,
+ void *uout, int uout_size,
+ mlx5_cmd_cbk_t cbk,
+ void *context, int page_queue)
+{
+ gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
+ struct mlx5_cmd_work_ent *ent;
+
+ ent = kzalloc(sizeof(*ent), alloc_flags);
+ if (!ent)
+ return ERR_PTR(-ENOMEM);
+
+ ent->in = in;
+ ent->uin_size = uin_size;
+ ent->out = out;
+ ent->uout = uout;
+ ent->uout_size = uout_size;
+ ent->callback = cbk;
+ ent->context = context;
+ ent->cmd = cmd;
+ ent->page_queue = page_queue;
+
+ return ent;
+}
+
+static u8 alloc_token(struct mlx5_cmd *cmd)
+{
+ u8 token;
+
+ spin_lock(&cmd->token_lock);
+ cmd->token++;
+ if (cmd->token == 0)
+ cmd->token++;
+ token = cmd->token;
+ spin_unlock(&cmd->token_lock);
+
+ return token;
+}
+
+static int alloc_ent(struct mlx5_cmd_work_ent *ent)
+{
+ unsigned long flags;
+ struct mlx5_cmd *cmd = ent->cmd;
+ struct mlx5_core_dev *dev =
+ container_of(cmd, struct mlx5_core_dev, cmd);
+ int ret = cmd->max_reg_cmds;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ if (!ent->page_queue) {
+ ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
+ if (ret >= cmd->max_reg_cmds)
+ ret = -1;
+ }
+
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ ret = -1;
+
+ if (ret != -1) {
+ ent->busy = 1;
+ ent->idx = ret;
+ clear_bit(ent->idx, &cmd->bitmask);
+ cmd->ent_arr[ent->idx] = ent;
+ }
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+ return ret;
+}
+
+static void free_ent(struct mlx5_cmd *cmd, int idx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cmd->alloc_lock, flags);
+ set_bit(idx, &cmd->bitmask);
+ spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+}
+
+static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+{
+ return cmd->cmd_buf + (idx << cmd->log_stride);
+}
+
+static u8 xor8_buf(void *buf, int len)
+{
+ u8 *ptr = buf;
+ u8 sum = 0;
+ int i;
+
+ for (i = 0; i < len; i++)
+ sum ^= ptr[i];
+
+ return sum;
+}
+
+static int verify_block_sig(struct mlx5_cmd_prot_block *block)
+{
+ if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+ return -EINVAL;
+
+ if (xor8_buf(block, sizeof(*block)) != 0xff)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+ int csum)
+{
+ block->token = token;
+ if (csum) {
+ block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+ sizeof(block->data) - 2);
+ block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+ }
+}
+
+static void
+calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+{
+ size_t i;
+
+ for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
+
+ /* compute signature */
+ calc_block_sig(block, token, csum);
+
+ /* check for last block */
+ if (block->next == 0)
+ break;
+ }
+
+ /* make sure data gets written to RAM */
+ mlx5_fwp_flush(msg);
+}
+
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
+{
+ ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
+ calc_chain_sig(ent->in, ent->token, csum);
+ calc_chain_sig(ent->out, ent->token, csum);
+}
+
+static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+{
+ struct mlx5_core_dev *dev = container_of(ent->cmd,
+ struct mlx5_core_dev, cmd);
+ int poll_end = jiffies +
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+ u8 own;
+
+ do {
+ own = ent->lay->status_own;
+ if (!(own & CMD_OWNER_HW) ||
+ dev->state != MLX5_DEVICE_STATE_UP) {
+ ent->ret = 0;
+ return;
+ }
+ usleep_range(5000, 10000);
+ } while (time_before(jiffies, poll_end));
+
+ ent->ret = -ETIMEDOUT;
+}
+
+static void free_cmd(struct mlx5_cmd_work_ent *ent)
+{
+ kfree(ent);
+}
+
+static int
+verify_signature(struct mlx5_cmd_work_ent *ent)
+{
+ struct mlx5_cmd_msg *msg = ent->out;
+ size_t i;
+ int err;
+ u8 sig;
+
+ sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+ if (sig != 0xff)
+ return -EINVAL;
+
+ for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
+
+ /* compute signature */
+ err = verify_block_sig(block);
+ if (err != 0)
+ return (err);
+
+ /* check for last block */
+ if (block->next == 0)
+ break;
+ }
+ return (0);
+}
+
+static void dump_buf(void *buf, int size, int data_only, int offset)
+{
+ __be32 *p = buf;
+ int i;
+
+ for (i = 0; i < size; i += 16) {
+ pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
+ be32_to_cpu(p[1]), be32_to_cpu(p[2]),
+ be32_to_cpu(p[3]));
+ p += 4;
+ offset += 16;
+ }
+ if (!data_only)
+ pr_debug("\n");
+}
+
+const char *mlx5_command_str(int command)
+{
+ switch (command) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ return "QUERY_HCA_CAP";
+
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ return "SET_HCA_CAP";
+
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ return "QUERY_ADAPTER";
+
+ case MLX5_CMD_OP_INIT_HCA:
+ return "INIT_HCA";
+
+ case MLX5_CMD_OP_TEARDOWN_HCA:
+ return "TEARDOWN_HCA";
+
+ case MLX5_CMD_OP_ENABLE_HCA:
+ return "MLX5_CMD_OP_ENABLE_HCA";
+
+ case MLX5_CMD_OP_DISABLE_HCA:
+ return "MLX5_CMD_OP_DISABLE_HCA";
+
+ case MLX5_CMD_OP_QUERY_PAGES:
+ return "QUERY_PAGES";
+
+ case MLX5_CMD_OP_MANAGE_PAGES:
+ return "MANAGE_PAGES";
+
+ case MLX5_CMD_OP_QUERY_ISSI:
+ return "QUERY_ISSI";
+
+ case MLX5_CMD_OP_SET_ISSI:
+ return "SET_ISSI";
+
+ case MLX5_CMD_OP_CREATE_MKEY:
+ return "CREATE_MKEY";
+
+ case MLX5_CMD_OP_QUERY_MKEY:
+ return "QUERY_MKEY";
+
+ case MLX5_CMD_OP_DESTROY_MKEY:
+ return "DESTROY_MKEY";
+
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ return "QUERY_SPECIAL_CONTEXTS";
+
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ return "PAGE_FAULT_RESUME";
+
+ case MLX5_CMD_OP_CREATE_EQ:
+ return "CREATE_EQ";
+
+ case MLX5_CMD_OP_DESTROY_EQ:
+ return "DESTROY_EQ";
+
+ case MLX5_CMD_OP_QUERY_EQ:
+ return "QUERY_EQ";
+
+ case MLX5_CMD_OP_GEN_EQE:
+ return "GEN_EQE";
+
+ case MLX5_CMD_OP_CREATE_CQ:
+ return "CREATE_CQ";
+
+ case MLX5_CMD_OP_DESTROY_CQ:
+ return "DESTROY_CQ";
+
+ case MLX5_CMD_OP_QUERY_CQ:
+ return "QUERY_CQ";
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ return "MODIFY_CQ";
+
+ case MLX5_CMD_OP_CREATE_QP:
+ return "CREATE_QP";
+
+ case MLX5_CMD_OP_DESTROY_QP:
+ return "DESTROY_QP";
+
+ case MLX5_CMD_OP_RST2INIT_QP:
+ return "RST2INIT_QP";
+
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ return "INIT2RTR_QP";
+
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ return "RTR2RTS_QP";
+
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ return "RTS2RTS_QP";
+
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ return "SQERR2RTS_QP";
+
+ case MLX5_CMD_OP_2ERR_QP:
+ return "2ERR_QP";
+
+ case MLX5_CMD_OP_2RST_QP:
+ return "2RST_QP";
+
+ case MLX5_CMD_OP_QUERY_QP:
+ return "QUERY_QP";
+
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ return "SQD_RTS_QP";
+
+ case MLX5_CMD_OP_MAD_IFC:
+ return "MAD_IFC";
+
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ return "INIT2INIT_QP";
+
+ case MLX5_CMD_OP_CREATE_PSV:
+ return "CREATE_PSV";
+
+ case MLX5_CMD_OP_DESTROY_PSV:
+ return "DESTROY_PSV";
+
+ case MLX5_CMD_OP_CREATE_SRQ:
+ return "CREATE_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_SRQ:
+ return "DESTROY_SRQ";
+
+ case MLX5_CMD_OP_QUERY_SRQ:
+ return "QUERY_SRQ";
+
+ case MLX5_CMD_OP_ARM_RQ:
+ return "ARM_RQ";
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
+
+ case MLX5_CMD_OP_CREATE_DCT:
+ return "CREATE_DCT";
+
+ case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
+ return "SET_DC_CNAK_TRACE";
+
+ case MLX5_CMD_OP_DESTROY_DCT:
+ return "DESTROY_DCT";
+
+ case MLX5_CMD_OP_DRAIN_DCT:
+ return "DRAIN_DCT";
+
+ case MLX5_CMD_OP_QUERY_DCT:
+ return "QUERY_DCT";
+
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ return "ARM_DCT_FOR_KEY_VIOLATION";
+
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ return "QUERY_VPORT_STATE";
+
+ case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+ return "MODIFY_VPORT_STATE";
+
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ return "QUERY_ESW_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+ return "MODIFY_ESW_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ return "QUERY_NIC_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ return "MODIFY_NIC_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ return "QUERY_ROCE_ADDRESS";
+
+ case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+ return "SET_ROCE_ADDRESS";
+
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return "QUERY_HCA_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ return "MODIFY_HCA_VPORT_CONTEXT";
+
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ return "QUERY_HCA_VPORT_GID";
+
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ return "QUERY_HCA_VPORT_PKEY";
+
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ return "QUERY_VPORT_COUNTER";
+
+ case MLX5_CMD_OP_SET_WOL_ROL:
+ return "SET_WOL_ROL";
+
+ case MLX5_CMD_OP_QUERY_WOL_ROL:
+ return "QUERY_WOL_ROL";
+
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ return "ALLOC_Q_COUNTER";
+
+ case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ return "DEALLOC_Q_COUNTER";
+
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ return "QUERY_Q_COUNTER";
+
+ case MLX5_CMD_OP_ALLOC_PD:
+ return "ALLOC_PD";
+
+ case MLX5_CMD_OP_DEALLOC_PD:
+ return "DEALLOC_PD";
+
+ case MLX5_CMD_OP_ALLOC_UAR:
+ return "ALLOC_UAR";
+
+ case MLX5_CMD_OP_DEALLOC_UAR:
+ return "DEALLOC_UAR";
+
+ case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+ return "CONFIG_INT_MODERATION";
+
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ return "ATTACH_TO_MCG";
+
+ case MLX5_CMD_OP_DETACH_FROM_MCG:
+ return "DETACH_FROM_MCG";
+
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ return "GET_DROPPED_PACKET_LOG";
+
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ return "QUERY_MAD_DEMUX";
+
+ case MLX5_CMD_OP_SET_MAD_DEMUX:
+ return "SET_MAD_DEMUX";
+
+ case MLX5_CMD_OP_NOP:
+ return "NOP";
+
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return "ALLOC_XRCD";
+
+ case MLX5_CMD_OP_DEALLOC_XRCD:
+ return "DEALLOC_XRCD";
+
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ return "ALLOC_TRANSPORT_DOMAIN";
+
+ case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+ return "DEALLOC_TRANSPORT_DOMAIN";
+
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ return "QUERY_CONG_STATUS";
+
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ return "MODIFY_CONG_STATUS";
+
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ return "QUERY_CONG_PARAMS";
+
+ case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+ return "MODIFY_CONG_PARAMS";
+
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ return "QUERY_CONG_STATISTICS";
+
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return "ADD_VXLAN_UDP_DPORT";
+
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ return "DELETE_VXLAN_UDP_DPORT";
+
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ return "SET_L2_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ return "QUERY_L2_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ return "DELETE_L2_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ return "CREATE_RMP";
+
+ case MLX5_CMD_OP_MODIFY_RMP:
+ return "MODIFY_RMP";
+
+ case MLX5_CMD_OP_DESTROY_RMP:
+ return "DESTROY_RMP";
+
+ case MLX5_CMD_OP_QUERY_RMP:
+ return "QUERY_RMP";
+
+ case MLX5_CMD_OP_CREATE_RQT:
+ return "CREATE_RQT";
+
+ case MLX5_CMD_OP_MODIFY_RQT:
+ return "MODIFY_RQT";
+
+ case MLX5_CMD_OP_DESTROY_RQT:
+ return "DESTROY_RQT";
+
+ case MLX5_CMD_OP_QUERY_RQT:
+ return "QUERY_RQT";
+
+ case MLX5_CMD_OP_ACCESS_REG:
+ return "MLX5_CMD_OP_ACCESS_REG";
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ return "CREATE_SQ";
+
+ case MLX5_CMD_OP_MODIFY_SQ:
+ return "MODIFY_SQ";
+
+ case MLX5_CMD_OP_DESTROY_SQ:
+ return "DESTROY_SQ";
+
+ case MLX5_CMD_OP_QUERY_SQ:
+ return "QUERY_SQ";
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ return "CREATE_RQ";
+
+ case MLX5_CMD_OP_MODIFY_RQ:
+ return "MODIFY_RQ";
+
+ case MLX5_CMD_OP_DESTROY_RQ:
+ return "DESTROY_RQ";
+
+ case MLX5_CMD_OP_QUERY_RQ:
+ return "QUERY_RQ";
+
+ case MLX5_CMD_OP_CREATE_TIR:
+ return "CREATE_TIR";
+
+ case MLX5_CMD_OP_MODIFY_TIR:
+ return "MODIFY_TIR";
+
+ case MLX5_CMD_OP_DESTROY_TIR:
+ return "DESTROY_TIR";
+
+ case MLX5_CMD_OP_QUERY_TIR:
+ return "QUERY_TIR";
+
+ case MLX5_CMD_OP_CREATE_TIS:
+ return "CREATE_TIS";
+
+ case MLX5_CMD_OP_MODIFY_TIS:
+ return "MODIFY_TIS";
+
+ case MLX5_CMD_OP_DESTROY_TIS:
+ return "DESTROY_TIS";
+
+ case MLX5_CMD_OP_QUERY_TIS:
+ return "QUERY_TIS";
+
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ return "CREATE_FLOW_TABLE";
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ return "DESTROY_FLOW_TABLE";
+
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ return "QUERY_FLOW_TABLE";
+
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ return "CREATE_FLOW_GROUP";
+
+ case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+ return "DESTROY_FLOW_GROUP";
+
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ return "QUERY_FLOW_GROUP";
+
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ return "SET_FLOW_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ return "QUERY_FLOW_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ return "DELETE_FLOW_TABLE_ENTRY";
+
+ case MLX5_CMD_OP_SET_DIAGNOSTICS:
+ return "MLX5_CMD_OP_SET_DIAGNOSTICS";
+
+ case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
+ return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
+
+ default: return "unknown command opcode";
+ }
+}
+
+static void dump_command(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_work_ent *ent, int input)
+{
+ u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
+ struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+ size_t i;
+ int data_only;
+ int offset = 0;
+ int msg_len = input ? ent->uin_size : ent->uout_size;
+ int dump_len;
+
+ data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
+
+ if (data_only)
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
+ "dump command data %s(0x%x) %s\n",
+ mlx5_command_str(op), op,
+ input ? "INPUT" : "OUTPUT");
+ else
+ mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
+ mlx5_command_str(op), op,
+ input ? "INPUT" : "OUTPUT");
+
+ if (data_only) {
+ if (input) {
+ dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
+ offset += sizeof(ent->lay->in);
+ } else {
+ dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
+ offset += sizeof(ent->lay->out);
+ }
+ } else {
+ dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
+ offset += sizeof(*ent->lay);
+ }
+
+ for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
+
+ if (data_only) {
+ if (offset >= msg_len)
+ break;
+ dump_len = min_t(int,
+ MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset);
+
+ dump_buf(block->data, dump_len, 1, offset);
+ offset += MLX5_CMD_DATA_BLOCK_SIZE;
+ } else {
+ mlx5_core_dbg(dev, "command block:\n");
+ dump_buf(block, sizeof(*block), 0, offset);
+ offset += sizeof(*block);
+ }
+
+ /* check for last block */
+ if (block->next == 0)
+ break;
+ }
+
+ if (data_only)
+ pr_debug("\n");
+}
+
+static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode,
+ struct mlx5_outbox_hdr *hdr)
+{
+ hdr->status = 0;
+ hdr->syndrome = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_TEARDOWN_HCA:
+ case MLX5_CMD_OP_DISABLE_HCA:
+ case MLX5_CMD_OP_MANAGE_PAGES:
+ case MLX5_CMD_OP_DESTROY_MKEY:
+ case MLX5_CMD_OP_DESTROY_EQ:
+ case MLX5_CMD_OP_DESTROY_CQ:
+ case MLX5_CMD_OP_DESTROY_QP:
+ case MLX5_CMD_OP_DESTROY_PSV:
+ case MLX5_CMD_OP_DESTROY_SRQ:
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ case MLX5_CMD_OP_DESTROY_DCT:
+ case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_DEALLOC_PD:
+ case MLX5_CMD_OP_DEALLOC_UAR:
+ case MLX5_CMD_OP_DETACH_FROM_MCG:
+ case MLX5_CMD_OP_DEALLOC_XRCD:
+ case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_DESTROY_LAG:
+ case MLX5_CMD_OP_DESTROY_VPORT_LAG:
+ case MLX5_CMD_OP_DESTROY_TIR:
+ case MLX5_CMD_OP_DESTROY_SQ:
+ case MLX5_CMD_OP_DESTROY_RQ:
+ case MLX5_CMD_OP_DESTROY_RMP:
+ case MLX5_CMD_OP_DESTROY_TIS:
+ case MLX5_CMD_OP_DESTROY_RQT:
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+ case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+ case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_LAG:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP:
+ case MLX5_CMD_OP_ACCESS_REG:
+ case MLX5_CMD_OP_DRAIN_DCT:
+ return 0;
+
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_ALLOC_UAR:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_EQ:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_LAG:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_PSV:
+ case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_CREATE_VPORT_LAG:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_ENABLE_HCA:
+ case MLX5_CMD_OP_GEN_EQE:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT_HCA:
+ case MLX5_CMD_OP_MAD_IFC:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_EQ:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_PAGES:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
+ case MLX5_CMD_OP_SET_HCA_CAP:
+ case MLX5_CMD_OP_SET_ISSI:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_MAD_DEMUX:
+ case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ hdr->status = MLX5_CMD_STAT_INT_ERR;
+ hdr->syndrome = 0xFFFFFFFF;
+ return -ECANCELED;
+ default:
+ mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode);
+ return -EINVAL;
+ }
+}
+
+static void complete_command(struct mlx5_cmd_work_ent *ent)
+{
+ struct mlx5_cmd *cmd = ent->cmd;
+ struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
+ cmd);
+ mlx5_cmd_cbk_t callback;
+ void *context;
+
+ s64 ds;
+ struct mlx5_cmd_stats *stats;
+ unsigned long flags;
+ int err;
+ struct semaphore *sem;
+
+ if (ent->page_queue)
+ sem = &cmd->pages_sem;
+ else
+ sem = &cmd->sem;
+
+ if (dev->state != MLX5_DEVICE_STATE_UP) {
+ struct mlx5_outbox_hdr *out_hdr =
+ (struct mlx5_outbox_hdr *)ent->out;
+ struct mlx5_inbox_hdr *in_hdr =
+ (struct mlx5_inbox_hdr *)(ent->in->first.data);
+ u16 opcode = be16_to_cpu(in_hdr->opcode);
+
+ ent->ret = set_internal_err_outbox(dev,
+ opcode,
+ out_hdr);
+ }
+
+ if (ent->callback) {
+ ds = ent->ts2 - ent->ts1;
+ if (ent->op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[ent->op];
+ spin_lock_irqsave(&stats->lock, flags);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irqrestore(&stats->lock, flags);
+ }
+
+ callback = ent->callback;
+ context = ent->context;
+ err = ent->ret;
+ if (!err)
+ err = mlx5_copy_from_msg(ent->uout,
+ ent->out,
+ ent->uout_size);
+
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+
+ free_cmd(ent);
+ callback(err, context);
+ } else {
+ complete(&ent->done);
+ }
+ up(sem);
+}
+
+static void cmd_work_handler(struct work_struct *work)
+{
+ struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
+ struct mlx5_cmd *cmd = ent->cmd;
+ struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ struct mlx5_cmd_layout *lay;
+ struct semaphore *sem;
+
+ sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+ if (cmd->moving_to_polling) {
+ mlx5_core_warn(dev, "not expecting command execution, ignoring...\n");
+ return;
+ }
+
+ down(sem);
+
+ if (alloc_ent(ent) < 0) {
+ complete_command(ent);
+ return;
+ }
+
+ ent->token = alloc_token(cmd);
+ lay = get_inst(cmd, ent->idx);
+ ent->lay = lay;
+ memset(lay, 0, sizeof(*lay));
+ memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+ ent->op = be32_to_cpu(lay->in[0]) >> 16;
+ if (ent->in->numpages != 0)
+ lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0));
+ if (ent->out->numpages != 0)
+ lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0));
+ lay->inlen = cpu_to_be32(ent->uin_size);
+ lay->outlen = cpu_to_be32(ent->uout_size);
+ lay->type = MLX5_PCI_CMD_XPORT;
+ lay->token = ent->token;
+ lay->status_own = CMD_OWNER_HW;
+ set_signature(ent, !cmd->checksum_disabled);
+ dump_command(dev, ent, 1);
+ ent->ts1 = ktime_get_ns();
+ ent->busy = 0;
+ /* ring doorbell after the descriptor is valid */
+ mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
+ /* make sure data is written to RAM */
+ mlx5_fwp_flush(cmd->cmd_page);
+ iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
+ mmiowb();
+ /* if not in polling don't use ent after this point*/
+ if (cmd->mode == CMD_MODE_POLLING) {
+ poll_timeout(ent);
+ /* make sure we read the descriptor after ownership is SW */
+ mlx5_cmd_comp_handler(dev, 1U << ent->idx);
+ }
+}
+
+static const char *deliv_status_to_str(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_DELIVERY_STAT_OK:
+ return "no errors";
+ case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+ return "signature error";
+ case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+ return "token error";
+ case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+ return "bad block number";
+ case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+ return "output pointer not aligned to block size";
+ case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+ return "input pointer not aligned to block size";
+ case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+ return "firmware internal error";
+ case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+ return "command input length error";
+ case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+ return "command ouput length error";
+ case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+ return "reserved fields not cleared";
+ case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+ return "bad command descriptor type";
+ default:
+ return "unknown status code";
+ }
+}
+
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+{
+ int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int err;
+
+ if (cmd->mode == CMD_MODE_POLLING) {
+ wait_for_completion(&ent->done);
+ err = ent->ret;
+ } else {
+ if (!wait_for_completion_timeout(&ent->done, timeout))
+ err = -ETIMEDOUT;
+ else
+ err = 0;
+ }
+
+ if (err == -ETIMEDOUT) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ }
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
+
+ return err;
+}
+
+/* Notes:
+ * 1. Callback functions may not sleep
+ * 2. page queue commands do not support asynchrous completion
+ */
+static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+ int uin_size,
+ struct mlx5_cmd_msg *out, void *uout, int uout_size,
+ mlx5_cmd_cbk_t callback,
+ void *context, int page_queue, u8 *status)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_work_ent *ent;
+ struct mlx5_cmd_stats *stats;
+ int err = 0;
+ s64 ds;
+ u16 op;
+
+ if (callback && page_queue)
+ return -EINVAL;
+
+ ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback,
+ context, page_queue);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ if (!callback)
+ init_completion(&ent->done);
+
+ INIT_WORK(&ent->work, cmd_work_handler);
+ if (page_queue) {
+ cmd_work_handler(&ent->work);
+ } else if (!queue_work(cmd->wq, &ent->work)) {
+ mlx5_core_warn(dev, "failed to queue work\n");
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (!callback) {
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), (long long)ds);
+ *status = ent->status;
+ free_cmd(ent);
+ }
+
+ return err;
+
+out_free:
+ free_cmd(ent);
+out:
+ return err;
+}
+
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size)
+{
+ size_t delta;
+ size_t i;
+
+ if (to == NULL || from == NULL)
+ return (-ENOMEM);
+
+ delta = min_t(size_t, size, sizeof(to->first.data));
+ memcpy(to->first.data, from, delta);
+ from = (char *)from + delta;
+ size -= delta;
+
+ for (i = 0; size != 0; i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE);
+
+ delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
+ memcpy(block->data, from, delta);
+ from = (char *)from + delta;
+ size -= delta;
+ }
+ return (0);
+}
+
+static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
+{
+ size_t delta;
+ size_t i;
+
+ if (to == NULL || from == NULL)
+ return (-ENOMEM);
+
+ delta = min_t(size_t, size, sizeof(from->first.data));
+ memcpy(to, from->first.data, delta);
+ to = (char *)to + delta;
+ size -= delta;
+
+ for (i = 0; size != 0; i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE);
+
+ delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
+ memcpy(to, block->data, delta);
+ to = (char *)to + delta;
+ size -= delta;
+ }
+ return (0);
+}
+
+static struct mlx5_cmd_msg *
+mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size)
+{
+ struct mlx5_cmd_msg *msg;
+ size_t blen;
+ size_t n;
+ size_t i;
+
+ blen = size - min_t(size_t, sizeof(msg->first.data), size);
+ n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE);
+
+ msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE));
+ if (msg == NULL)
+ return (ERR_PTR(-ENOMEM));
+
+ for (i = 0; i != n; i++) {
+ struct mlx5_cmd_prot_block *block;
+
+ block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
+
+ memset(block, 0, MLX5_CMD_MBOX_SIZE);
+
+ if (i != (n - 1)) {
+ u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE);
+ block->next = cpu_to_be64(dma);
+ }
+ block->block_num = cpu_to_be32(i);
+ }
+
+ /* make sure initial data is written to RAM */
+ mlx5_fwp_flush(msg);
+
+ return (msg);
+}
+
+static void
+mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
+{
+
+ mlx5_fwp_free(msg);
+}
+
+static void set_wqname(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+
+ snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
+ dev_name(&dev->pdev->dev));
+}
+
+static void clean_debug_files(struct mlx5_core_dev *dev)
+{
+}
+
+
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ int i;
+
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ down(&cmd->sem);
+
+ down(&cmd->pages_sem);
+
+ flush_workqueue(cmd->wq);
+
+ cmd->mode = CMD_MODE_EVENTS;
+
+ up(&cmd->pages_sem);
+ for (i = 0; i < cmd->max_reg_cmds; i++)
+ up(&cmd->sem);
+}
+
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+
+ synchronize_irq(dev->priv.eq_table.pages_eq.irqn);
+ flush_workqueue(dev->priv.pg_wq);
+ cmd->moving_to_polling = 1;
+ flush_workqueue(cmd->wq);
+ cmd->mode = CMD_MODE_POLLING;
+ cmd->moving_to_polling = 0;
+}
+
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
+{
+ unsigned long flags;
+
+ if (msg->cache) {
+ spin_lock_irqsave(&msg->cache->lock, flags);
+ list_add_tail(&msg->list, &msg->cache->head);
+ spin_unlock_irqrestore(&msg->cache->lock, flags);
+ } else {
+ mlx5_free_cmd_msg(dev, msg);
+ }
+}
+
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_work_ent *ent;
+ int i;
+
+ /* make sure data gets read from RAM */
+ mlx5_fwp_invalidate(cmd->cmd_page);
+
+ while (vector != 0) {
+ i = ffs(vector) - 1;
+ vector &= ~(1U << i);
+ ent = cmd->ent_arr[i];
+ ent->ts2 = ktime_get_ns();
+ memcpy(ent->out->first.data, ent->lay->out,
+ sizeof(ent->lay->out));
+ /* make sure data gets read from RAM */
+ mlx5_fwp_invalidate(ent->out);
+ dump_command(dev, ent, 0);
+ if (!ent->ret) {
+ if (!cmd->checksum_disabled)
+ ent->ret = verify_signature(ent);
+ else
+ ent->ret = 0;
+ ent->status = ent->lay->status_own >> 1;
+
+ mlx5_core_dbg(dev,
+ "FW command ret 0x%x, status %s(0x%x)\n",
+ ent->ret,
+ deliv_status_to_str(ent->status),
+ ent->status);
+ }
+ free_ent(cmd, ent->idx);
+ complete_command(ent);
+ }
+}
+EXPORT_SYMBOL(mlx5_cmd_comp_handler);
+
+void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long vector;
+ int i = 0;
+ unsigned long flags;
+ synchronize_irq(dev->priv.eq_table.cmd_eq.irqn);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ if (!vector)
+ return;
+
+ for (i = 0; i < (1 << dev->cmd.log_sz); i++) {
+ struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i];
+
+ if (!test_bit(i, &vector))
+ continue;
+
+ while (ent->busy)
+ usleep_range(1000, 1100);
+ free_ent(&dev->cmd, i);
+ complete_command(ent);
+ }
+}
+EXPORT_SYMBOL(mlx5_trigger_cmd_completions);
+
+static int status_to_err(u8 status)
+{
+ return status ? -1 : 0; /* TBD more meaningful codes */
+}
+
+static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+ gfp_t gfp)
+{
+ struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct cache_ent *ent = NULL;
+
+ if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
+ ent = &cmd->cache.large;
+ else if (in_size > 16 && in_size <= MED_LIST_SIZE)
+ ent = &cmd->cache.med;
+
+ if (ent) {
+ spin_lock_irq(&ent->lock);
+ if (!list_empty(&ent->head)) {
+ msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
+ list);
+ list_del(&msg->list);
+ }
+ spin_unlock_irq(&ent->lock);
+ }
+
+ if (IS_ERR(msg))
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+
+ return msg;
+}
+
+static int is_manage_pages(struct mlx5_inbox_hdr *in)
+{
+ return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+}
+
+static int cmd_exec_helper(struct mlx5_core_dev *dev,
+ void *in, int in_size,
+ void *out, int out_size,
+ mlx5_cmd_cbk_t callback, void *context)
+{
+ struct mlx5_cmd_msg *inb;
+ struct mlx5_cmd_msg *outb;
+ int pages_queue;
+ const gfp_t gfp = GFP_KERNEL;
+ int err;
+ u8 status = 0;
+
+ pages_queue = is_manage_pages(in);
+
+ inb = alloc_msg(dev, in_size, gfp);
+ if (IS_ERR(inb)) {
+ err = PTR_ERR(inb);
+ return err;
+ }
+
+ err = mlx5_copy_to_msg(inb, in, in_size);
+ if (err) {
+ mlx5_core_warn(dev, "err %d\n", err);
+ goto out_in;
+ }
+
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+ if (IS_ERR(outb)) {
+ err = PTR_ERR(outb);
+ goto out_in;
+ }
+
+ err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback,
+ context, pages_queue, &status);
+ if (err) {
+ if (err == -ETIMEDOUT)
+ return err;
+ goto out_out;
+ }
+
+ mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
+ if (status) {
+ err = status_to_err(status);
+ goto out_out;
+ }
+
+ if (callback)
+ return err;
+
+ err = mlx5_copy_from_msg(out, outb, out_size);
+
+out_out:
+ mlx5_free_cmd_msg(dev, outb);
+
+out_in:
+ free_msg(dev, inb);
+ return err;
+}
+
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+ int out_size)
+{
+ return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec);
+
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+ void *out, int out_size, mlx5_cmd_cbk_t callback,
+ void *context)
+{
+ return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+
+static void destroy_msg_cache(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_msg *msg;
+ struct mlx5_cmd_msg *n;
+
+ list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
+ list_del(&msg->list);
+ mlx5_free_cmd_msg(dev, msg);
+ }
+
+ list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
+ list_del(&msg->list);
+ mlx5_free_cmd_msg(dev, msg);
+ }
+}
+
+static int create_msg_cache(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ struct mlx5_cmd_msg *msg;
+ int err;
+ int i;
+
+ spin_lock_init(&cmd->cache.large.lock);
+ INIT_LIST_HEAD(&cmd->cache.large.head);
+ spin_lock_init(&cmd->cache.med.lock);
+ INIT_LIST_HEAD(&cmd->cache.med.head);
+
+ for (i = 0; i < NUM_LONG_LISTS; i++) {
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto ex_err;
+ }
+ msg->cache = &cmd->cache.large;
+ list_add_tail(&msg->list, &cmd->cache.large.head);
+ }
+
+ for (i = 0; i < NUM_MED_LISTS; i++) {
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto ex_err;
+ }
+ msg->cache = &cmd->cache.med;
+ list_add_tail(&msg->list, &cmd->cache.med.head);
+ }
+
+ return 0;
+
+ex_err:
+ destroy_msg_cache(dev);
+ return err;
+}
+
+static int
+alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+ int err;
+
+ sx_init(&cmd->dma_sx, "MLX5-DMA-SX");
+ mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF);
+ cv_init(&cmd->dma_cv, "MLX5-DMA-CV");
+
+ /*
+ * Create global DMA descriptor tag for allocating
+ * 4K firmware pages:
+ */
+ err = -bus_dma_tag_create(
+ bus_get_dma_tag(dev->pdev->dev.bsddev),
+ MLX5_ADAPTER_PAGE_SIZE, /* alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MLX5_ADAPTER_PAGE_SIZE, /* maxsize */
+ 1, /* nsegments */
+ MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &cmd->dma_tag);
+ if (err != 0)
+ goto failure_destroy_sx;
+
+ cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
+ if (cmd->cmd_page == NULL) {
+ err = -ENOMEM;
+ goto failure_alloc_page;
+ }
+ cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0);
+ cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0);
+ return (0);
+
+failure_alloc_page:
+ bus_dma_tag_destroy(cmd->dma_tag);
+
+failure_destroy_sx:
+ cv_destroy(&cmd->dma_cv);
+ mtx_destroy(&cmd->dma_mtx);
+ sx_destroy(&cmd->dma_sx);
+ return (err);
+}
+
+static void
+free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+
+ mlx5_fwp_free(cmd->cmd_page);
+ bus_dma_tag_destroy(cmd->dma_tag);
+ cv_destroy(&cmd->dma_cv);
+ mtx_destroy(&cmd->dma_mtx);
+ sx_destroy(&cmd->dma_sx);
+}
+
+int mlx5_cmd_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+ u32 cmd_h, cmd_l;
+ u16 cmd_if_rev;
+ int err;
+ int i;
+
+ cmd_if_rev = cmdif_rev_get(dev);
+ if (cmd_if_rev != CMD_IF_REV) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev);
+ return -EINVAL;
+ }
+
+ err = alloc_cmd_page(dev, cmd);
+ if (err)
+ goto err_free_pool;
+
+ cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
+ cmd->log_sz = cmd_l >> 4 & 0xf;
+ cmd->log_stride = cmd_l & 0xf;
+ if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz);
+ err = -EINVAL;
+ goto err_free_page;
+ }
+
+ if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n");
+ err = -EINVAL;
+ goto err_free_page;
+ }
+
+ cmd->checksum_disabled = 1;
+ cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
+ cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+
+ cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+ if (cmd->cmdif_rev > CMD_IF_REV) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev);
+ err = -ENOTSUPP;
+ goto err_free_page;
+ }
+
+ spin_lock_init(&cmd->alloc_lock);
+ spin_lock_init(&cmd->token_lock);
+ for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
+ spin_lock_init(&cmd->stats[i].lock);
+
+ sema_init(&cmd->sem, cmd->max_reg_cmds);
+ sema_init(&cmd->pages_sem, 1);
+
+ cmd_h = (u32)((u64)(cmd->dma) >> 32);
+ cmd_l = (u32)(cmd->dma);
+ if (cmd_l & 0xfff) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n");
+ err = -ENOMEM;
+ goto err_free_page;
+ }
+
+ iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
+ iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
+
+ /* Make sure firmware sees the complete address before we proceed */
+ wmb();
+
+ mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
+
+ cmd->mode = CMD_MODE_POLLING;
+
+ err = create_msg_cache(dev);
+ if (err) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n");
+ goto err_free_page;
+ }
+
+ set_wqname(dev);
+ cmd->wq = create_singlethread_workqueue(cmd->wq_name);
+ if (!cmd->wq) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n");
+ err = -ENOMEM;
+ goto err_cache;
+ }
+
+ return 0;
+
+err_cache:
+ destroy_msg_cache(dev);
+
+err_free_page:
+ free_cmd_page(dev, cmd);
+
+err_free_pool:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_cmd_init);
+
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cmd *cmd = &dev->cmd;
+
+ clean_debug_files(dev);
+ destroy_workqueue(cmd->wq);
+ destroy_msg_cache(dev);
+ free_cmd_page(dev, cmd);
+}
+EXPORT_SYMBOL(mlx5_cmd_cleanup);
+
+static const char *cmd_status_str(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK:
+ return "OK";
+ case MLX5_CMD_STAT_INT_ERR:
+ return "internal error";
+ case MLX5_CMD_STAT_BAD_OP_ERR:
+ return "bad operation";
+ case MLX5_CMD_STAT_BAD_PARAM_ERR:
+ return "bad parameter";
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
+ return "bad system state";
+ case MLX5_CMD_STAT_BAD_RES_ERR:
+ return "bad resource";
+ case MLX5_CMD_STAT_RES_BUSY:
+ return "resource busy";
+ case MLX5_CMD_STAT_LIM_ERR:
+ return "limits exceeded";
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
+ return "bad resource state";
+ case MLX5_CMD_STAT_IX_ERR:
+ return "bad index";
+ case MLX5_CMD_STAT_NO_RES_ERR:
+ return "no resources";
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
+ return "bad input length";
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
+ return "bad output length";
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
+ return "bad QP state";
+ case MLX5_CMD_STAT_BAD_PKT_ERR:
+ return "bad packet (discarded)";
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
+ return "bad size too many outstanding CQEs";
+ default:
+ return "unknown status";
+ }
+}
+
+static int cmd_status_to_err_helper(u8 status)
+{
+ switch (status) {
+ case MLX5_CMD_STAT_OK: return 0;
+ case MLX5_CMD_STAT_INT_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
+ case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
+ case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
+ case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
+ case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
+ case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
+ default: return -EIO;
+ }
+}
+
+/* this will be available till all the commands use set/get macros */
+int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
+{
+ if (!hdr->status)
+ return 0;
+
+ printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome));
+
+ return cmd_status_to_err_helper(hdr->status);
+}
+
+int mlx5_cmd_status_to_err_v2(void *ptr)
+{
+ u32 syndrome;
+ u8 status;
+
+ status = be32_to_cpu(*(__be32 *)ptr) >> 24;
+ if (!status)
+ return 0;
+
+ syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
+
+ printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome);
+
+ return cmd_status_to_err_helper(status);
+}
+
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,95 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_core.h 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#ifndef __MLX5_CORE_H__
+#define __MLX5_CORE_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "1.23.0 (03 Mar 2015)"
+#define DRIVER_RELDATE "03 Mar 2015"
+
+extern int mlx5_core_debug_mask;
+
+#define mlx5_core_dbg(dev, format, ...) \
+ pr_debug("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
+ ##__VA_ARGS__)
+
+#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
+} while (0)
+
+#define mlx5_core_err(dev, format, ...) \
+ printf("mlx5_core: ERR: ""%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
+ ##__VA_ARGS__)
+
+#define mlx5_core_warn(dev, format, ...) \
+ printf("mlx5_core: WARN: ""%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, curthread->td_proc->p_pid, \
+ ##__VA_ARGS__)
+
+enum {
+ MLX5_CMD_DATA, /* print command payload only */
+ MLX5_CMD_TIME, /* print command execution time */
+};
+
+struct mlx5_core_dev;
+
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ int err;
+ err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+
+ if (err) {
+ return err;
+ }
+
+ err = mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+ return err;
+}
+
+int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name);
+
+#endif /* __MLX5_CORE_H__ */
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_core.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_cq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_cq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_cq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,309 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_cq.c 321996 2017-08-03 13:57:17Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include <dev/mlx5/driver.h>
+#include <rdma/ib_verbs.h>
+#include <dev/mlx5/cq.h>
+#include "mlx5_core.h"
+
+void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
+{
+ struct mlx5_core_cq *cq;
+ struct mlx5_cq_table *table = &dev->priv.cq_table;
+
+ if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
+ struct mlx5_cq_linear_array_entry *entry;
+
+ entry = &table->linear_array[cqn];
+ spin_lock(&entry->lock);
+ cq = entry->cq;
+ if (cq == NULL) {
+ mlx5_core_warn(dev,
+ "Completion event for bogus CQ 0x%x\n", cqn);
+ } else {
+ ++cq->arm_sn;
+ cq->comp(cq);
+ }
+ spin_unlock(&entry->lock);
+ return;
+ }
+
+ spin_lock(&table->lock);
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (likely(cq))
+ atomic_inc(&cq->refcount);
+ spin_unlock(&table->lock);
+
+ if (!cq) {
+ mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ ++cq->arm_sn;
+
+ cq->comp(cq);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
+{
+ struct mlx5_cq_table *table = &dev->priv.cq_table;
+ struct mlx5_core_cq *cq;
+
+ spin_lock(&table->lock);
+
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (cq)
+ atomic_inc(&cq->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!cq) {
+ mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+}
+
+
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_create_cq_mbox_in *in, int inlen)
+{
+ int err;
+ struct mlx5_cq_table *table = &dev->priv.cq_table;
+ struct mlx5_create_cq_mbox_out out;
+ struct mlx5_destroy_cq_mbox_in din;
+ struct mlx5_destroy_cq_mbox_out dout;
+
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
+ memset(&out, 0, sizeof(out));
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
+ cq->cons_index = 0;
+ cq->arm_sn = 0;
+ atomic_set(&cq->refcount, 1);
+ init_completion(&cq->free);
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, cq->cqn, cq);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ goto err_cmd;
+
+ if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
+ struct mlx5_cq_linear_array_entry *entry;
+
+ entry = &table->linear_array[cq->cqn];
+ spin_lock_irq(&entry->lock);
+ entry->cq = cq;
+ spin_unlock_irq(&entry->lock);
+ }
+
+ cq->pid = curthread->td_proc->p_pid;
+
+ return 0;
+
+err_cmd:
+ memset(&din, 0, sizeof(din));
+ memset(&dout, 0, sizeof(dout));
+ din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
+ din.cqn = cpu_to_be32(cq->cqn);
+ mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_cq);
+
+int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
+{
+ struct mlx5_cq_table *table = &dev->priv.cq_table;
+ struct mlx5_destroy_cq_mbox_in in;
+ struct mlx5_destroy_cq_mbox_out out;
+ struct mlx5_core_cq *tmp;
+ int err;
+
+ if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
+ struct mlx5_cq_linear_array_entry *entry;
+
+ entry = &table->linear_array[cq->cqn];
+ spin_lock_irq(&entry->lock);
+ entry->cq = NULL;
+ spin_unlock_irq(&entry->lock);
+ }
+
+ spin_lock_irq(&table->lock);
+ tmp = radix_tree_delete(&table->tree, cq->cqn);
+ spin_unlock_irq(&table->lock);
+ if (!tmp) {
+ mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
+ return -EINVAL;
+ }
+ if (tmp != cq) {
+ mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
+ return -EINVAL;
+ }
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
+ in.cqn = cpu_to_be32(cq->cqn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ goto out;
+
+ if (out.hdr.status) {
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ goto out;
+ }
+
+ synchronize_irq(cq->irqn);
+
+ if (atomic_dec_and_test(&cq->refcount))
+ complete(&cq->free);
+ wait_for_completion(&cq->free);
+
+out:
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_cq);
+
+int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_query_cq_mbox_out *out)
+{
+ struct mlx5_query_cq_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, sizeof(*out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
+ in.cqn = cpu_to_be32(cq->cqn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ return mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_cq);
+
+
+int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ struct mlx5_modify_cq_mbox_in *in, int in_sz)
+{
+ struct mlx5_modify_cq_mbox_out out;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
+ err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_core_modify_cq);
+
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
+int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count,
+ u8 cq_mode)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.ctx.cqe_sz_flags = (cq_mode & 2) >> 1;
+ in.ctx.st = (cq_mode & 1) << 7;
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT |
+ MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
+int mlx5_init_cq_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_cq_table *table = &dev->priv.cq_table;
+ int err;
+ int x;
+
+ spin_lock_init(&table->lock);
+ for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
+ spin_lock_init(&table->linear_array[x].lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+ err = 0;
+
+ return err;
+}
+
+void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
+{
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_cq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,287 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c 322007 2017-08-03 14:14:13Z hselasky $
+ */
+
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/diagnostics.h>
+
+const struct mlx5_core_diagnostics_entry
+ mlx5_core_pci_diagnostics_table[
+ MLX5_CORE_PCI_DIAGNOSTICS_NUM] = {
+ MLX5_CORE_PCI_DIAGNOSTICS(MLX5_CORE_DIAGNOSTICS_ENTRY)
+};
+
+const struct mlx5_core_diagnostics_entry
+ mlx5_core_general_diagnostics_table[
+ MLX5_CORE_GENERAL_DIAGNOSTICS_NUM] = {
+ MLX5_CORE_GENERAL_DIAGNOSTICS(MLX5_CORE_DIAGNOSTICS_ENTRY)
+};
+
+static int mlx5_core_get_index_of_diag_counter(
+ const struct mlx5_core_diagnostics_entry *entry,
+ int size, u16 counter_id)
+{
+ int x;
+
+ /* check for invalid counter ID */
+ if (counter_id == 0)
+ return -1;
+
+ /* lookup counter ID in table */
+ for (x = 0; x != size; x++) {
+ if (entry[x].counter_id == counter_id)
+ return x;
+ }
+ return -1;
+}
+
+static void mlx5_core_put_diag_counter(
+ const struct mlx5_core_diagnostics_entry *entry,
+ u64 *array, int size, u16 counter_id, u64 value)
+{
+ int x;
+
+ /* check for invalid counter ID */
+ if (counter_id == 0)
+ return;
+
+ /* lookup counter ID in table */
+ for (x = 0; x != size; x++) {
+ if (entry[x].counter_id == counter_id) {
+ array[x] = value;
+ break;
+ }
+ }
+}
+
+int mlx5_core_set_diagnostics_full(struct mlx5_core_dev *dev,
+ u8 enable_pci, u8 enable_general)
+{
+ void *diag_params_ctx;
+ void *in;
+ int numcounters;
+ int inlen;
+ int err;
+ int x;
+ int y;
+
+ if (MLX5_CAP_GEN(dev, debug) == 0)
+ return 0;
+
+ numcounters = MLX5_CAP_GEN(dev, num_of_diagnostic_counters);
+ if (numcounters == 0)
+ return 0;
+
+ inlen = MLX5_ST_SZ_BYTES(set_diagnostic_params_in) +
+ MLX5_ST_SZ_BYTES(diagnostic_counter) * numcounters;
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return -ENOMEM;
+
+ diag_params_ctx = MLX5_ADDR_OF(set_diagnostic_params_in, in,
+ diagnostic_params_ctx);
+
+ MLX5_SET(diagnostic_params_context, diag_params_ctx,
+ enable, enable_pci || enable_general);
+ MLX5_SET(diagnostic_params_context, diag_params_ctx,
+ single, 1);
+ MLX5_SET(diagnostic_params_context, diag_params_ctx,
+ on_demand, 1);
+
+ /* collect the counters we want to enable */
+ for (x = y = 0; x != numcounters; x++) {
+ u16 counter_id =
+ MLX5_CAP_DEBUG(dev, diagnostic_counter[x].counter_id);
+ int index = -1;
+
+ if (index < 0 && enable_pci != 0) {
+ /* check if counter ID exists in local table */
+ index = mlx5_core_get_index_of_diag_counter(
+ mlx5_core_pci_diagnostics_table,
+ MLX5_CORE_PCI_DIAGNOSTICS_NUM,
+ counter_id);
+ }
+ if (index < 0 && enable_general != 0) {
+ /* check if counter ID exists in local table */
+ index = mlx5_core_get_index_of_diag_counter(
+ mlx5_core_general_diagnostics_table,
+ MLX5_CORE_GENERAL_DIAGNOSTICS_NUM,
+ counter_id);
+ }
+ if (index < 0)
+ continue;
+
+ MLX5_SET(diagnostic_params_context,
+ diag_params_ctx,
+ counter_id[y].counter_id,
+ counter_id);
+ y++;
+ }
+
+ /* recompute input length */
+ inlen = MLX5_ST_SZ_BYTES(set_diagnostic_params_in) +
+ MLX5_ST_SZ_BYTES(diagnostic_counter) * y;
+
+ /* set number of counters */
+ MLX5_SET(diagnostic_params_context, diag_params_ctx,
+ num_of_counters, y);
+
+ /* execute firmware command */
+ err = mlx5_set_diagnostic_params(dev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_get_diagnostics_full(struct mlx5_core_dev *dev,
+ union mlx5_core_pci_diagnostics *pdiag,
+ union mlx5_core_general_diagnostics *pgen)
+{
+ void *out;
+ void *in;
+ int numcounters;
+ int outlen;
+ int inlen;
+ int err;
+ int x;
+
+ if (MLX5_CAP_GEN(dev, debug) == 0)
+ return 0;
+
+ numcounters = MLX5_CAP_GEN(dev, num_of_diagnostic_counters);
+ if (numcounters == 0)
+ return 0;
+
+ outlen = MLX5_ST_SZ_BYTES(query_diagnostic_counters_out) +
+ MLX5_ST_SZ_BYTES(diagnostic_counter) * numcounters;
+
+ out = mlx5_vzalloc(outlen);
+ if (out == NULL)
+ return -ENOMEM;
+
+ err = mlx5_query_diagnostic_counters(dev, 1, 0, out, outlen);
+ if (err == 0) {
+ for (x = 0; x != numcounters; x++) {
+ u16 counter_id = MLX5_GET(
+ query_diagnostic_counters_out,
+ out, diag_counter[x].counter_id);
+ u64 counter_value = MLX5_GET64(
+ query_diagnostic_counters_out,
+ out, diag_counter[x].counter_value_h);
+
+ if (pdiag != NULL) {
+ mlx5_core_put_diag_counter(
+ mlx5_core_pci_diagnostics_table,
+ pdiag->array,
+ MLX5_CORE_PCI_DIAGNOSTICS_NUM,
+ counter_id, counter_value);
+ }
+ if (pgen != NULL) {
+ mlx5_core_put_diag_counter(
+ mlx5_core_general_diagnostics_table,
+ pgen->array,
+ MLX5_CORE_GENERAL_DIAGNOSTICS_NUM,
+ counter_id, counter_value);
+ }
+ }
+ }
+ kvfree(out);
+
+ if (pdiag != NULL) {
+ inlen = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ outlen = MLX5_ST_SZ_BYTES(mpcnt_reg);
+
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return -ENOMEM;
+
+ out = mlx5_vzalloc(outlen);
+ if (out == NULL) {
+ kvfree(in);
+ return -ENOMEM;
+ }
+ MLX5_SET(mpcnt_reg, in, grp,
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+
+ err = mlx5_core_access_reg(dev, in, inlen, out, outlen,
+ MLX5_REG_MPCNT, 0, 0);
+ if (err == 0) {
+ void *pcounters = MLX5_ADDR_OF(mpcnt_reg, out,
+ counter_set.pcie_performance_counters_data_layout);
+
+ pdiag->counter.rx_pci_errors =
+ MLX5_GET(pcie_performance_counters_data_layout,
+ pcounters, rx_errors);
+ pdiag->counter.tx_pci_errors =
+ MLX5_GET(pcie_performance_counters_data_layout,
+ pcounters, tx_errors);
+ }
+ MLX5_SET(mpcnt_reg, in, grp,
+ MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
+
+ err = mlx5_core_access_reg(dev, in, inlen, out, outlen,
+ MLX5_REG_MPCNT, 0, 0);
+ if (err == 0) {
+ void *pcounters = MLX5_ADDR_OF(mpcnt_reg, out,
+ counter_set.pcie_timers_and_states_data_layout);
+
+ pdiag->counter.tx_pci_non_fatal_errors =
+ MLX5_GET(pcie_timers_and_states_data_layout,
+ pcounters, non_fatal_err_msg_sent);
+ pdiag->counter.tx_pci_fatal_errors =
+ MLX5_GET(pcie_timers_and_states_data_layout,
+ pcounters, fatal_err_msg_sent);
+ }
+ kvfree(in);
+ kvfree(out);
+ }
+ return 0;
+}
+
+int mlx5_core_supports_diagnostics(struct mlx5_core_dev *dev, u16 counter_id)
+{
+ int numcounters;
+ int x;
+
+ if (MLX5_CAP_GEN(dev, debug) == 0)
+ return 0;
+
+ /* check for any counter */
+ if (counter_id == 0)
+ return 1;
+
+ numcounters = MLX5_CAP_GEN(dev, num_of_diagnostic_counters);
+
+ /* check if counter ID exists in debug capability */
+ for (x = 0; x != numcounters; x++) {
+ if (MLX5_CAP_DEBUG(dev, diagnostic_counter[x].counter_id) ==
+ counter_id)
+ return 1;
+ }
+ return 0; /* not supported counter */
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,672 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eq.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_ifc.h>
+#include "mlx5_core.h"
+
+#if (__FreeBSD_version >= 1100000)
+#include "opt_rss.h"
+#endif
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
+enum {
+ MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
+ MLX5_EQE_OWNER_INIT_VAL = 0x1,
+};
+
+enum {
+ MLX5_NUM_SPARE_EQE = 0x80,
+ MLX5_NUM_ASYNC_EQE = 0x100,
+ MLX5_NUM_CMD_EQE = 32,
+};
+
+enum {
+ MLX5_EQ_DOORBEL_OFFSET = 0x40,
+};
+
+#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
+ (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
+ (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
+ (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
+ (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
+ (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
+ (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+ (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
+ (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
+ (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
+ (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
+ (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
+
+struct map_eq_in {
+ u64 mask;
+ u32 reserved;
+ u32 unmap_eqn;
+};
+
+struct cre_des_eq {
+ u8 reserved[15];
+ u8 eqn;
+};
+
+/*Function prototype*/
+static void mlx5_port_module_event(struct mlx5_core_dev *dev,
+ struct mlx5_eqe *eqe);
+
+static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_eq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_eq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
+ MLX5_SET(destroy_eq_in, in, eq_number, eqn);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
+{
+ return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+}
+
+static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
+{
+ struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+
+ return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+}
+
+static const char *eqe_type_str(u8 type)
+{
+ switch (type) {
+ case MLX5_EVENT_TYPE_COMP:
+ return "MLX5_EVENT_TYPE_COMP";
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ return "MLX5_EVENT_TYPE_PATH_MIG";
+ case MLX5_EVENT_TYPE_COMM_EST:
+ return "MLX5_EVENT_TYPE_COMM_EST";
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ return "MLX5_EVENT_TYPE_SQ_DRAINED";
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ return "MLX5_EVENT_TYPE_CQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_INTERNAL_ERROR:
+ return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ return "MLX5_EVENT_TYPE_PORT_CHANGE";
+ case MLX5_EVENT_TYPE_GPIO_EVENT:
+ return "MLX5_EVENT_TYPE_GPIO_EVENT";
+ case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
+ return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
+ case MLX5_EVENT_TYPE_REMOTE_CONFIG:
+ return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
+ case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
+ return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
+ case MLX5_EVENT_TYPE_STALL_EVENT:
+ return "MLX5_EVENT_TYPE_STALL_EVENT";
+ case MLX5_EVENT_TYPE_CMD:
+ return "MLX5_EVENT_TYPE_CMD";
+ case MLX5_EVENT_TYPE_PAGE_REQUEST:
+ return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+ return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
+ default:
+ return "Unrecognized event";
+ }
+}
+
+static enum mlx5_dev_event port_subtype_event(u8 subtype)
+{
+ switch (subtype) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ return MLX5_DEV_EVENT_PORT_DOWN;
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ return MLX5_DEV_EVENT_PORT_UP;
+ case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ return MLX5_DEV_EVENT_PORT_INITIALIZED;
+ case MLX5_PORT_CHANGE_SUBTYPE_LID:
+ return MLX5_DEV_EVENT_LID_CHANGE;
+ case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+ return MLX5_DEV_EVENT_PKEY_CHANGE;
+ case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+ return MLX5_DEV_EVENT_GUID_CHANGE;
+ case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ return MLX5_DEV_EVENT_CLIENT_REREG;
+ }
+ return -1;
+}
+
+static enum mlx5_dev_event dcbx_subevent(u8 subtype)
+{
+ switch (subtype) {
+ case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+ return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+ return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
+ case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+ return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+ return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
+ }
+ return -1;
+}
+
+static void eq_update_ci(struct mlx5_eq *eq, int arm)
+{
+ __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+ __raw_writel((__force u32) cpu_to_be32(val), addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+
+static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ struct mlx5_eqe *eqe;
+ int eqes_found = 0;
+ int set_ci = 0;
+ u32 cqn;
+ u32 rsn;
+ u8 port;
+
+ while ((eqe = next_eqe_sw(eq))) {
+ /*
+ * Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ rmb();
+
+ mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+ eq->eqn, eqe_type_str(eqe->type));
+ switch (eqe->type) {
+ case MLX5_EVENT_TYPE_COMP:
+ cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
+ mlx5_cq_completion(dev, cqn);
+ break;
+
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
+ eqe_type_str(eqe->type), eqe->type, rsn);
+ mlx5_rsc_event(dev, rsn, eqe->type);
+ break;
+
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
+ eqe_type_str(eqe->type), eqe->type, rsn);
+ mlx5_srq_event(dev, rsn, eqe->type);
+ break;
+
+ case MLX5_EVENT_TYPE_CMD:
+ mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+ break;
+
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ port = (eqe->data.port.port >> 4) & 0xf;
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ case MLX5_PORT_CHANGE_SUBTYPE_LID:
+ case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+ case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+ case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ if (dev->event)
+ dev->event(dev, port_subtype_event(eqe->sub_type),
+ (unsigned long)port);
+ break;
+ default:
+ mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
+ port, eqe->sub_type);
+ }
+ break;
+
+ case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+ port = (eqe->data.port.port >> 4) & 0xf;
+ switch (eqe->sub_type) {
+ case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+ case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+ case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+ if (dev->event)
+ dev->event(dev,
+ dcbx_subevent(eqe->sub_type),
+ 0);
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
+ port, eqe->sub_type);
+ }
+ break;
+
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+ cqn, eqe->data.cq_err.syndrome);
+ mlx5_cq_event(dev, cqn, eqe->type);
+ break;
+
+ case MLX5_EVENT_TYPE_PAGE_REQUEST:
+ {
+ u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
+ s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
+
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
+ mlx5_core_req_pages_handler(dev, func_id, npages);
+ }
+ break;
+
+ case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
+ mlx5_port_module_event(dev, eqe);
+ break;
+
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ {
+ struct mlx5_eqe_vport_change *vc_eqe =
+ &eqe->data.vport_change;
+ u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
+
+ if (dev->event)
+ dev->event(dev,
+ MLX5_DEV_EVENT_VPORT_CHANGE,
+ (unsigned long)vport_num);
+ }
+ break;
+
+ default:
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
+ break;
+ }
+
+ ++eq->cons_index;
+ eqes_found = 1;
+ ++set_ci;
+
+ /* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MLX5_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
+ if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
+ eq_update_ci(eq, 0);
+ set_ci = 0;
+ }
+ }
+
+ eq_update_ci(eq, 1);
+
+ return eqes_found;
+}
+
+static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
+{
+ struct mlx5_eq *eq = eq_ptr;
+ struct mlx5_core_dev *dev = eq->dev;
+
+ mlx5_eq_int(dev, eq);
+
+ /* MSI-X vectors always belong to us */
+ return IRQ_HANDLED;
+}
+
+static void init_eq_buf(struct mlx5_eq *eq)
+{
+ struct mlx5_eqe *eqe;
+ int i;
+
+ for (i = 0; i < eq->nent; i++) {
+ eqe = get_eqe(eq, i);
+ eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
+ }
+}
+
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+ int nent, u64 mask, const char *name, struct mlx5_uar *uar)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_create_eq_mbox_in *in;
+ struct mlx5_create_eq_mbox_out out;
+ int err;
+ int inlen;
+
+ eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
+ &eq->buf);
+ if (err)
+ return err;
+
+ init_eq_buf(eq);
+
+ inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+ memset(&out, 0, sizeof(out));
+
+ mlx5_fill_page_array(&eq->buf, in->pas);
+
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
+ in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
+ in->ctx.intr = vecidx;
+ in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->events_mask = cpu_to_be64(mask);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err)
+ goto err_in;
+
+ if (out.hdr.status) {
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ goto err_in;
+ }
+
+ eq->eqn = out.eq_number;
+ eq->irqn = vecidx;
+ eq->dev = dev;
+ eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s at pci:%s",
+ name, pci_name(dev->pdev));
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
+ if (err)
+ goto err_eq;
+#ifdef RSS
+ if (vecidx >= MLX5_EQ_VEC_COMP_BASE) {
+ u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE;
+ err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector,
+ rss_getcpu(bucket % rss_getnumbuckets()));
+ if (err)
+ goto err_irq;
+ }
+#else
+ if (0)
+ goto err_irq;
+#endif
+
+
+ /* EQs are created in ARMED state
+ */
+ eq_update_ci(eq, 1);
+
+ kvfree(in);
+ return 0;
+
+err_irq:
+ free_irq(priv->msix_arr[vecidx].vector, eq);
+
+err_eq:
+ mlx5_cmd_destroy_eq(dev, eq->eqn);
+
+err_in:
+ kvfree(in);
+
+err_buf:
+ mlx5_buf_free(dev, &eq->buf);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
+
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ int err;
+
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
+ err = mlx5_cmd_destroy_eq(dev, eq->eqn);
+ if (err)
+ mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
+ eq->eqn);
+ mlx5_buf_free(dev, &eq->buf);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+
+int mlx5_eq_init(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ spin_lock_init(&dev->priv.eq_table.lock);
+
+ err = 0;
+
+ return err;
+}
+
+
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+int mlx5_start_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+ int err;
+
+ if (MLX5_CAP_GEN(dev, port_module_event))
+ async_event_mask |= (1ull <<
+ MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
+
+ if (MLX5_CAP_GEN(dev, nic_vport_change_event))
+ async_event_mask |= (1ull <<
+ MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
+
+ if (MLX5_CAP_GEN(dev, dcbx))
+ async_event_mask |= (1ull <<
+ MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
+
+ err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
+ MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
+ "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
+ return err;
+ }
+
+ mlx5_cmd_use_events(dev);
+
+ err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
+ MLX5_NUM_ASYNC_EQE, async_event_mask,
+ "mlx5_async_eq", &dev->priv.uuari.uars[0]);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
+ goto err1;
+ }
+
+ err = mlx5_create_map_eq(dev, &table->pages_eq,
+ MLX5_EQ_VEC_PAGES,
+ /* TODO: sriov max_vf + */ 1,
+ 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
+ &dev->priv.uuari.uars[0]);
+ if (err) {
+ mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
+ goto err2;
+ }
+
+ return err;
+
+err2:
+ mlx5_destroy_unmap_eq(dev, &table->async_eq);
+
+err1:
+ mlx5_cmd_use_polling(dev);
+ mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ return err;
+}
+
+int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ int err;
+
+ err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+ if (err)
+ return err;
+
+ mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ mlx5_cmd_use_polling(dev);
+
+ err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ if (err)
+ mlx5_cmd_use_events(dev);
+
+ return err;
+}
+
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ struct mlx5_query_eq_mbox_out *out, int outlen)
+{
+ struct mlx5_query_eq_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, outlen);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ);
+ in.eqn = eq->eqn;
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ err = mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+
+EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
+
+static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
+{
+ switch (error_type) {
+ case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
+ return "Power Budget Exceeded";
+ case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE:
+ return "Long Range for non MLNX cable/module";
+ case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
+ return "Bus stuck(I2C or data shorted)";
+ case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
+ return "No EEPROM/retry timeout";
+ case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
+ return "Enforce part number list";
+ case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER:
+ return "Unknown identifier";
+ case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
+ return "High Temperature";
+ case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
+ return "Cable is shorted";
+
+ default:
+ return "Unknown error type";
+ }
+}
+
+unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num)
+{
+ if (module_num < 0 || module_num >= MLX5_MAX_PORTS)
+ return 0; /* undefined */
+ return dev->module_status[module_num];
+}
+
+static void mlx5_port_module_event(struct mlx5_core_dev *dev,
+ struct mlx5_eqe *eqe)
+{
+ unsigned int module_num;
+ unsigned int module_status;
+ unsigned int error_type;
+ struct mlx5_eqe_port_module_event *module_event_eqe;
+ struct pci_dev *pdev = dev->pdev;
+
+ module_event_eqe = &eqe->data.port_module_event;
+
+ module_num = (unsigned int)module_event_eqe->module;
+ module_status = (unsigned int)module_event_eqe->module_status &
+ PORT_MODULE_EVENT_MODULE_STATUS_MASK;
+ error_type = (unsigned int)module_event_eqe->error_type &
+ PORT_MODULE_EVENT_ERROR_TYPE_MASK;
+
+ switch (module_status) {
+ case MLX5_MODULE_STATUS_PLUGGED:
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged\n", module_num);
+ break;
+
+ case MLX5_MODULE_STATUS_UNPLUGGED:
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num);
+ break;
+
+ case MLX5_MODULE_STATUS_ERROR:
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type));
+ break;
+
+ default:
+ device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num);
+ }
+ /* store module status */
+ if (module_num < MLX5_MAX_PORTS)
+ dev->module_status[module_num] = module_status;
+}
+
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_eq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,805 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/flow_table.h>
+#include <dev/mlx5/eswitch_vacl.h>
+#include "mlx5_core.h"
+
+enum {
+ MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
+ MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
+ MLX5_ACL_VLAN_GROUP_IDX = 2,
+ MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
+ MLX5_ACL_DEFAULT_GROUP_IDX = 4,
+ MLX5_ACL_GROUPS_NUM,
+};
+
+struct mlx_vacl_fr {
+ bool applied;
+ u32 fi;
+ u16 action;
+};
+
+struct mlx5_vacl_table {
+ struct mlx5_core_dev *dev;
+ u16 vport;
+ void *ft;
+ int max_ft_size;
+ int acl_type;
+
+ struct mlx_vacl_fr loopback_fr;
+ struct mlx_vacl_fr untagged_fr;
+ struct mlx_vacl_fr unknown_vlan_fr;
+ struct mlx_vacl_fr default_fr;
+
+ bool vlan_filter_enabled;
+ bool vlan_filter_applied;
+ unsigned long *vlan_allowed_bitmap;
+ u32 vlan_fi_table[4096];
+
+ bool spoofchk_enabled;
+ u8 smac[ETH_ALEN];
+};
+
+static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ u8 *smac;
+ int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ int err = 0;
+
+ if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return -EINVAL;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Apply vlan rule */
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
+ vlan);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
+ 0xfff);
+ if (acl_table->spoofchk_enabled) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->vlan_fi_table[vlan]);
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->loopback_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->loopback_fr.fi);
+
+ /* Apply new loopback rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
+ misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+
+ err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->loopback_fr.fi);
+ if (err) {
+ acl_table->loopback_fr.applied = false;
+ } else {
+ acl_table->loopback_fr.applied = true;
+ acl_table->loopback_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 default_mc_enable = 0;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ int err = 0;
+
+ if (!acl_table->spoofchk_enabled)
+ return -EINVAL;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->default_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->default_fr.fi);
+
+ /* Apply new default rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->default_fr.fi);
+ if (err) {
+ acl_table->default_fr.applied = false;
+ } else {
+ acl_table->default_fr.applied = true;
+ acl_table->default_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ u8 *smac;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->untagged_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->untagged_fr.fi);
+
+ /* Apply new untagged rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 0);
+ MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
+ if (acl_table->spoofchk_enabled) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->untagged_fr.fi);
+ if (err) {
+ acl_table->untagged_fr.applied = false;
+ } else {
+ acl_table->untagged_fr.applied = true;
+ acl_table->untagged_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS;
+ u32 *flow_context = NULL;
+ void *in_match_criteria = NULL;
+ void *in_match_value = NULL;
+ u8 *smac;
+ int err = 0;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
+ if (!flow_context) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!in_match_criteria) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (acl_table->unknown_vlan_fr.applied)
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->unknown_vlan_fr.fi);
+
+ /* Apply new unknown vlan rule */
+ MLX5_SET(flow_context, flow_context, action, new_action);
+ if (acl_table->spoofchk_enabled) {
+ in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
+ match_value);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac, acl_table->smac);
+ smac = MLX5_ADDR_OF(fte_match_param,
+ in_match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+ err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
+ in_match_criteria, flow_context,
+ &acl_table->unknown_vlan_fr.fi);
+ if (err) {
+ acl_table->unknown_vlan_fr.applied = false;
+ } else {
+ acl_table->unknown_vlan_fr.applied = true;
+ acl_table->unknown_vlan_fr.action = new_action;
+ }
+
+out:
+ if (flow_context)
+ vfree(flow_context);
+ if (in_match_criteria)
+ vfree(in_match_criteria);
+ return err;
+}
+
+static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int index = 0;
+ int err_index = 0;
+ int err = 0;
+
+ if (acl_table->vlan_filter_applied)
+ return 0;
+
+ for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ index < 4096;
+ index = find_next_bit(acl_table->vlan_allowed_bitmap,
+ 4096, ++index)) {
+ err = mlx5_vacl_table_allow_vlan(acl_t, index);
+ if (err)
+ goto err_disable_vlans;
+ }
+
+ acl_table->vlan_filter_applied = true;
+ return 0;
+
+err_disable_vlans:
+ for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ err_index < index;
+ err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
+ ++err_index)) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[err_index]);
+ }
+ return err;
+}
+
+static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int index = 0;
+
+ if (!acl_table->vlan_filter_applied)
+ return;
+
+ for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
+ index < 4096;
+ index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
+ ++index)) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[index]);
+ }
+
+ acl_table->vlan_filter_applied = false;
+}
+
+static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ if (acl_table->default_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->default_fr.fi);
+ acl_table->default_fr.applied = false;
+ }
+ if (acl_table->unknown_vlan_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->unknown_vlan_fr.fi);
+ acl_table->unknown_vlan_fr.applied = false;
+ }
+ if (acl_table->loopback_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->loopback_fr.fi);
+ acl_table->loopback_fr.applied = false;
+ }
+ if (acl_table->untagged_fr.applied) {
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->untagged_fr.fi);
+ acl_table->untagged_fr.applied = false;
+ }
+ if (acl_table->vlan_filter_applied) {
+ mlx5_vacl_table_disapply_vlan_filter(acl_t);
+ acl_table->vlan_filter_applied = false;
+ }
+}
+
+static int mlx5_vacl_table_apply_all_filters(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
+ err = mlx5_vacl_table_apply_default(acl_table,
+ acl_table->default_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->unknown_vlan_fr.applied) {
+ err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
+ acl_table->unknown_vlan_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->loopback_fr.applied &&
+ acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
+ err = mlx5_vacl_table_apply_loopback_filter(
+ acl_table,
+ acl_table->loopback_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->untagged_fr.applied) {
+ err = mlx5_vacl_table_apply_untagged(acl_table,
+ acl_table->untagged_fr.action);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
+ err = mlx5_vacl_table_apply_vlan_filter(acl_t);
+ if (err)
+ goto err_disapply_all;
+ }
+
+ goto out;
+
+err_disapply_all:
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+
+out:
+ return err;
+}
+
+static void mlx5_vacl_table_destroy_ft(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+ if (acl_table->ft)
+ mlx5_destroy_flow_table(acl_table->ft);
+ acl_table->ft = NULL;
+}
+
+static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int log_acl_ft_size;
+ int err = 0;
+ int groups_num = MLX5_ACL_GROUPS_NUM - 1;
+ int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
+ u8 *smac;
+ struct mlx5_flow_table_group *g;
+
+ if (acl_table->ft)
+ return -EINVAL;
+
+ g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ goto out;
+
+ acl_table->spoofchk_enabled = spoofchk;
+
+ /*
+ * for vlan group
+ */
+ log_acl_ft_size = 4096;
+ /*
+ * for loopback filter rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for untagged rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for unknown vlan rule
+ */
+ log_acl_ft_size += 1;
+ /*
+ * for default rule
+ */
+ log_acl_ft_size += 1;
+
+ log_acl_ft_size = order_base_2(log_acl_ft_size);
+ log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
+
+ if (log_acl_ft_size < 2)
+ goto out;
+
+ if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
+ /* Loopback filter group */
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
+ MLX5_MATCH_MISC_PARAMETERS;
+ MLX5_SET_TO_ONES(fte_match_param,
+ g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
+ misc_parameters.source_port);
+ groups_num++;
+ shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
+ }
+ /* Untagged traffic group */
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
+ MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.cvlan_tag, 1);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /* Allowed vlans group */
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
+ MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.cvlan_tag, 1);
+ MLX5_SET(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
+ outer_headers.first_vid, 0xfff);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(fte_match_param,
+ g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /* Unknown vlan traffic group */
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
+ (spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
+ if (spoofchk) {
+ smac = MLX5_ADDR_OF(
+ fte_match_param,
+ g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
+ .match_criteria,
+ outer_headers.smac_47_16);
+ memset(smac, 0xff, ETH_ALEN);
+ }
+
+ /*
+ * Default group - for spoofchk only.
+ */
+ g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
+ g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
+
+ acl_table->ft = mlx5_create_flow_table(acl_table->dev,
+ 0,
+ acl_table->acl_type,
+ acl_table->vport,
+ groups_num,
+ g);
+ if (!acl_table->ft) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = mlx5_vacl_table_apply_all_filters(acl_t);
+ if (err)
+ goto err_destroy_ft;
+
+ goto out;
+
+err_destroy_ft:
+ mlx5_vacl_table_destroy_ft(acl_table->ft);
+ acl_table->ft = NULL;
+
+out:
+ kfree(g);
+ return err;
+}
+
+void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
+ u16 vport, bool is_egress)
+{
+ struct mlx5_vacl_table *acl_table;
+ int err = 0;
+
+ if (is_egress && !MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+ return NULL;
+
+ if (!is_egress && !MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return NULL;
+
+ acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
+ if (!acl_table)
+ return NULL;
+
+ acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
+ MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
+ acl_table->max_ft_size = (is_egress ?
+ MLX5_CAP_ESW_EGRESS_ACL(dev,
+ log_max_ft_size) :
+ MLX5_CAP_ESW_INGRESS_ACL(dev,
+ log_max_ft_size));
+ acl_table->dev = dev;
+ acl_table->vport = vport;
+
+ /*
+ * default behavior : Allow and if spoofchk drop the default
+ */
+ acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ err = mlx5_vacl_table_create_ft(acl_table, false);
+ if (err)
+ goto err_free_acl_table;
+
+ acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
+ sizeof(uintptr_t),
+ GFP_KERNEL);
+ if (!acl_table->vlan_allowed_bitmap)
+ goto err_destroy_ft;
+
+ goto out;
+
+err_destroy_ft:
+ mlx5_vacl_table_destroy_ft(acl_table->ft);
+ acl_table->ft = NULL;
+
+err_free_acl_table:
+ kfree(acl_table);
+ acl_table = NULL;
+
+out:
+ return (void *)acl_table;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_create);
+
+void mlx5_vacl_table_cleanup(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ mlx5_vacl_table_destroy_ft(acl_t);
+ kfree(acl_table->vlan_allowed_bitmap);
+ kfree(acl_table);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
+
+int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return 0;
+ __set_bit(vlan, acl_table->vlan_allowed_bitmap);
+ if (!acl_table->vlan_filter_applied)
+ return 0;
+
+ err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
+ if (err)
+ goto err_clear_vbit;
+
+ goto out;
+
+err_clear_vbit:
+ __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
+
+void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
+ return;
+
+ __clear_bit(vlan, acl_table->vlan_allowed_bitmap);
+
+ if (!acl_table->vlan_filter_applied)
+ return;
+
+ mlx5_del_flow_table_entry(acl_table->ft,
+ acl_table->vlan_fi_table[vlan]);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
+
+int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ acl_table->vlan_filter_enabled = true;
+ return mlx5_vacl_table_apply_vlan_filter(acl_t);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
+
+void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+
+ acl_table->vlan_filter_enabled = false;
+ mlx5_vacl_table_disapply_vlan_filter(acl_t);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
+
+int mlx5_vacl_table_drop_untagged(void *acl_t)
+{
+ return mlx5_vacl_table_apply_untagged(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_DROP);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
+
+int mlx5_vacl_table_allow_untagged(void *acl_t)
+{
+ return mlx5_vacl_table_apply_untagged(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
+
+int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
+{
+ return mlx5_vacl_table_apply_unknown_vlan(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_DROP);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
+
+int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
+{
+ return mlx5_vacl_table_apply_unknown_vlan(acl_t,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+}
+EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
+
+int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
+{
+ struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
+ int err = 0;
+
+ if (spoofchk == acl_table->spoofchk_enabled) {
+ if (!spoofchk ||
+ (spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
+ return 0;
+ }
+
+ ether_addr_copy(acl_table->smac, vport_mac);
+ if (spoofchk != acl_table->spoofchk_enabled) {
+ mlx5_vacl_table_destroy_ft(acl_t);
+ err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
+ } else {
+ mlx5_vacl_table_disapply_all_filters(acl_t);
+ err = mlx5_vacl_table_apply_all_filters(acl_t);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);
+
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,480 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ u16 vport;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, !!ft->vport);
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id, ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, vport_number, ft->vport);
+ MLX5_SET_DFTEI(in, other_vport, !!ft->vport);
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ return mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, vport_number, ft->vport);
+ MLX5_SET_DFGI(in, other_vport, !!ft->vport);
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, vport_number, ft->vport);
+ MLX5_SET_CFGI(in, other_vport, !!ft->vport);
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_table_in, in, other_vport, !!ft->vport);
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, vport_number, ft->vport);
+ MLX5_SET_DFTI(in, other_vport, !!ft->vport);
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ err = mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+ if (err)
+ mlx5_free_flow_index(ft, *flow_index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int ret;
+
+ ret = mlx5_del_flow_entry_cmd(ft, flow_index);
+ if (!ret)
+ mlx5_free_flow_index(ft, flow_index);
+ return ret;
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 vport,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->vport = vport;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
+
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+ u8 vport_num, u8 table_type, u32 table_id,
+ u32 underlay_qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
+ u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
+ int err;
+ int is_group_manager;
+
+ is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(set_flow_table_root_in, in, op_mod, op_mod);
+ MLX5_SET(set_flow_table_root_in, in, table_type, table_type);
+ MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
+ if (op_mod == MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET)
+ MLX5_SET(set_flow_table_root_in, in, table_id, table_id);
+
+ MLX5_SET(set_flow_table_root_in, in, opcode,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+
+ if (vport_num) {
+ if (is_group_manager) {
+ MLX5_SET(set_flow_table_root_in, in, other_vport,
+ 1);
+ MLX5_SET(set_flow_table_root_in, in, vport_number,
+ vport_num);
+ } else {
+ return -EPERM;
+ }
+ }
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_set_flow_table_root);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_fw.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_fw.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_fw.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,314 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_fw.c 308683 2016-11-15 08:58:12Z hselasky $
+ */
+
+#include <dev/mlx5/driver.h>
+#include <linux/module.h>
+#include "mlx5_core.h"
+
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+ return err;
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+
+ err = mlx5_cmd_query_adapter(dev, out, outlen);
+ if (err)
+ goto out_out;
+
+ memcpy(dev->board_id,
+ MLX5_ADDR_OF(query_adapter_out, out,
+ query_adapter_struct.vsd_contd_psid),
+ MLX5_FLD_SZ_BYTES(query_adapter_out,
+ query_adapter_struct.vsd_contd_psid));
+
+out_out:
+ kfree(out);
+
+ return err;
+}
+
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+
+ err = mlx5_cmd_query_adapter(mdev, out, outlen);
+ if (err)
+ goto out_out;
+
+ *vendor_id = MLX5_GET(query_adapter_out, out,
+ query_adapter_struct.ieee_vendor_id);
+
+out_out:
+ kfree(out);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
+
+static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
+ out, resd_lkey);
+
+ return err;
+}
+
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if ((MLX5_CAP_GEN(dev, port_type) ==
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET &&
+ MLX5_CAP_GEN(dev, nic_flow_table)) ||
+ (MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (
+ MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, vport_group_manager)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, snapshot)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, debug)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, qos)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ err = mlx5_core_query_special_contexts(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(init_hca_in)];
+ u32 out[MLX5_ST_SZ_DW(init_hca_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(teardown_hca_in)];
+ u32 out[MLX5_ST_SZ_DW(teardown_hca_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
+ u64 addr)
+{
+ struct mlx5_cmd_set_dc_cnak_mbox_in *in;
+ struct mlx5_cmd_set_dc_cnak_mbox_out out;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_DC_CNAK_TRACE);
+ in->enable = !!enable << 7;
+ in->pa = cpu_to_be64(addr);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (err)
+ goto out;
+
+ if (out.hdr.status)
+ err = mlx5_cmd_status_to_err(&out.hdr);
+
+out:
+ kfree(in);
+
+ return err;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_fw.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,187 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_health.c 322149 2017-08-07 12:45:26Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_ifc.h>
+#include "mlx5_core.h"
+
+#define MLX5_HEALTH_POLL_INTERVAL (2 * HZ)
+#define MAX_MISSES 3
+
+static DEFINE_SPINLOCK(health_lock);
+static LIST_HEAD(health_list);
+static struct work_struct health_work;
+
+static void health_care(struct work_struct *work)
+{
+ struct mlx5_core_health *health, *n;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ LIST_HEAD(tlist);
+
+ spin_lock_irq(&health_lock);
+ list_splice_init(&health_list, &tlist);
+
+ spin_unlock_irq(&health_lock);
+
+ list_for_each_entry_safe(health, n, &tlist, list) {
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ mlx5_core_warn(dev, "handling bad device here\n");
+ /* nothing yet */
+ spin_lock_irq(&health_lock);
+ list_del_init(&health->list);
+ spin_unlock_irq(&health_lock);
+ }
+}
+
+static const char *hsynd_str(u8 synd)
+{
+ switch (synd) {
+ case MLX5_HEALTH_SYNDR_FW_ERR:
+ return "firmware internal error";
+ case MLX5_HEALTH_SYNDR_IRISC_ERR:
+ return "irisc not responding";
+ case MLX5_HEALTH_SYNDR_CRC_ERR:
+ return "firmware CRC error";
+ case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
+ return "ICM fetch PCI error";
+ case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
+ return "HW fatal error\n";
+ case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
+ return "async EQ buffer overrun";
+ case MLX5_HEALTH_SYNDR_EQ_ERR:
+ return "EQ error";
+ case MLX5_HEALTH_SYNDR_FFSER_ERR:
+ return "FFSER error";
+ default:
+ return "unrecognized error";
+ }
+}
+
+static u16 read_be16(__be16 __iomem *p)
+{
+ return swab16(readl((__force u16 __iomem *) p));
+}
+
+static u32 read_be32(__be32 __iomem *p)
+{
+ return swab32(readl((__force u32 __iomem *) p));
+}
+
+static void print_health_info(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+ struct mlx5_health_buffer __iomem *h = health->health;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
+ printf("mlx5_core: INFO: ""assert_var[%d] 0x%08x\n", i, read_be32(h->assert_var + i));
+
+ printf("mlx5_core: INFO: ""assert_exit_ptr 0x%08x\n", read_be32(&h->assert_exit_ptr));
+ printf("mlx5_core: INFO: ""assert_callra 0x%08x\n", read_be32(&h->assert_callra));
+ printf("mlx5_core: INFO: ""fw_ver 0x%08x\n", read_be32(&h->fw_ver));
+ printf("mlx5_core: INFO: ""hw_id 0x%08x\n", read_be32(&h->hw_id));
+ printf("mlx5_core: INFO: ""irisc_index %d\n", readb(&h->irisc_index));
+ printf("mlx5_core: INFO: ""synd 0x%x: %s\n", readb(&h->synd), hsynd_str(readb(&h->synd)));
+ printf("mlx5_core: INFO: ""ext_sync 0x%04x\n", read_be16(&h->ext_sync));
+}
+
+static void poll_health(unsigned long data)
+{
+ struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
+ struct mlx5_core_health *health = &dev->priv.health;
+ int next;
+ u32 count;
+
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+
+ count = ioread32be(health->health_counter);
+ if (count == health->prev)
+ ++health->miss_counter;
+ else
+ health->miss_counter = 0;
+
+ health->prev = count;
+ if (health->miss_counter == MAX_MISSES) {
+ mlx5_core_err(dev, "device's health compromised\n");
+ print_health_info(dev);
+ spin_lock_irq(&health_lock);
+ list_add_tail(&health->list, &health_list);
+ spin_unlock_irq(&health_lock);
+
+ if (!queue_work(mlx5_core_wq, &health_work))
+ mlx5_core_warn(dev, "failed to queue health work\n");
+ } else {
+ get_random_bytes(&next, sizeof(next));
+ next %= HZ;
+ next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+ mod_timer(&health->timer, next);
+ }
+}
+
+void mlx5_start_health_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ INIT_LIST_HEAD(&health->list);
+ init_timer(&health->timer);
+ health->health = &dev->iseg->health;
+ health->health_counter = &dev->iseg->health_counter;
+
+ setup_timer(&health->timer, poll_health, (unsigned long)dev);
+ mod_timer(&health->timer,
+ round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL));
+}
+
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ del_timer_sync(&health->timer);
+
+ spin_lock_irq(&health_lock);
+ if (!list_empty(&health->list))
+ list_del_init(&health->list);
+ spin_unlock_irq(&health_lock);
+}
+
+void mlx5_health_cleanup(void)
+{
+}
+
+void __init mlx5_health_init(void)
+{
+
+ INIT_WORK(&health_work, health_care);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_health.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_mad.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_mad.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_mad.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,67 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_mad.c 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
+ u16 opmod, u8 port)
+{
+ struct mlx5_mad_ifc_mbox_in *in = NULL;
+ struct mlx5_mad_ifc_mbox_out *out = NULL;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MAD_IFC);
+ in->hdr.opmod = cpu_to_be16(opmod);
+ in->port = port;
+
+ memcpy(in->data, inb, sizeof(in->data));
+
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), out, sizeof(*out));
+ if (err)
+ goto out;
+
+ if (out->hdr.status) {
+ err = mlx5_cmd_status_to_err(&out->hdr);
+ goto out;
+ }
+
+ memcpy(outb, out->data, sizeof(out->data));
+
+out:
+ kfree(out);
+ kfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_mad.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_main.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_main.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_main.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1153 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_main.c 324685 2017-10-17 11:20:32Z hselasky $
+ */
+
+#define LINUXKPI_PARAM_PREFIX mlx5_
+
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/cq.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/srq.h>
+#include <linux/delay.h>
+#include <dev/mlx5/mlx5_ifc.h>
+#include "mlx5_core.h"
+
+MODULE_AUTHOR("Eli Cohen <eli at mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
+MODULE_LICENSE("Dual BSD/GPL");
+#if (__FreeBSD_version >= 1100000)
+MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
+#endif
+MODULE_VERSION(mlx5, 1);
+
+int mlx5_core_debug_mask;
+module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
+MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
+
+#define MLX5_DEFAULT_PROF 2
+static int prof_sel = MLX5_DEFAULT_PROF;
+module_param_named(prof_sel, prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+
+#define NUMA_NO_NODE -1
+
+struct workqueue_struct *mlx5_core_wq;
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+struct mlx5_device_context {
+ struct list_head list;
+ struct mlx5_interface *intf;
+ void *context;
+};
+
+static struct mlx5_profile profiles[] = {
+ [0] = {
+ .mask = 0,
+ },
+ [1] = {
+ .mask = MLX5_PROF_MASK_QP_SIZE,
+ .log_max_qp = 12,
+ },
+ [2] = {
+ .mask = MLX5_PROF_MASK_QP_SIZE |
+ MLX5_PROF_MASK_MR_CACHE,
+ .log_max_qp = 17,
+ .mr_cache[0] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[1] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[2] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[3] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[4] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[5] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[6] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[7] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[8] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[9] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[10] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[11] = {
+ .size = 500,
+ .limit = 250
+ },
+ .mr_cache[12] = {
+ .size = 64,
+ .limit = 32
+ },
+ .mr_cache[13] = {
+ .size = 32,
+ .limit = 16
+ },
+ .mr_cache[14] = {
+ .size = 16,
+ .limit = 8
+ },
+ },
+ [3] = {
+ .mask = MLX5_PROF_MASK_QP_SIZE,
+ .log_max_qp = 17,
+ },
+};
+
+static int set_dma_caps(struct pci_dev *pdev)
+{
+ int err;
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit PCI DMA mask\n");
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set PCI DMA mask, aborting\n");
+ return err;
+ }
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "WARN: ""Warning: couldn't set 64-bit consistent PCI DMA mask\n");
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Can't set consistent PCI DMA mask, aborting\n");
+ return err;
+ }
+ }
+
+ dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
+ return err;
+}
+
+static int request_bar(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Missing registers BAR, aborting\n");
+ return -ENODEV;
+ }
+
+ err = pci_request_regions(pdev, DRIVER_NAME);
+ if (err)
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Couldn't get PCI resources, aborting\n");
+
+ return err;
+}
+
+static void release_bar(struct pci_dev *pdev)
+{
+ pci_release_regions(pdev);
+}
+
+static int mlx5_enable_msix(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int nvec;
+ int i;
+
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
+ nvec = min_t(int, nvec, num_eqs);
+ if (nvec <= MLX5_EQ_VEC_COMP_BASE)
+ return -ENOMEM;
+
+ priv->msix_arr = kzalloc(nvec * sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kzalloc(nvec * sizeof(*priv->irq_info), GFP_KERNEL);
+
+ for (i = 0; i < nvec; i++)
+ priv->msix_arr[i].entry = i;
+
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
+ MLX5_EQ_VEC_COMP_BASE + 1, nvec);
+ if (nvec < 0)
+ return nvec;
+
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
+
+ return 0;
+
+}
+
+static void mlx5_disable_msix(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+
+ pci_disable_msix(dev->pdev);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+}
+
+struct mlx5_reg_host_endianess {
+ u8 he;
+ u8 rsvd[15];
+};
+
+
+#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
+
+enum {
+ MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
+ MLX5_DEV_CAP_FLAG_DCT |
+ MLX5_DEV_CAP_FLAG_DRAIN_SIGERR,
+};
+
+static u16 to_fw_pkey_sz(u32 size)
+{
+ switch (size) {
+ case 128:
+ return 0;
+ case 256:
+ return 1;
+ case 512:
+ return 2;
+ case 1024:
+ return 3;
+ case 2048:
+ return 4;
+ case 4096:
+ return 5;
+ default:
+ printf("mlx5_core: WARN: ""invalid pkey table size %d\n", size);
+ return 0;
+ }
+}
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
+{
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
+ int err;
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto query_ex;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err) {
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
+ goto query_ex;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
+query_ex:
+ kfree(out);
+ return err;
+}
+
+static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
+{
+ u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
+ int err;
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ if (err)
+ return err;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+
+ return err;
+}
+
+static int handle_hca_cap(struct mlx5_core_dev *dev)
+{
+ void *set_ctx = NULL;
+ struct mlx5_profile *prof = dev->profile;
+ int err = -ENOMEM;
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
+
+ set_ctx = kzalloc(set_sz, GFP_KERNEL);
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ goto query_ex;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ goto query_ex;
+
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
+ /* we limit the size of the pkey table to 128 entries for now */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
+
+ if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
+
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+
+ /* enable drain sigerr */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, drain_sigerr, 1);
+
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
+ err = set_caps(dev, set_ctx, set_sz);
+
+query_ex:
+ kfree(set_ctx);
+ return err;
+}
+
+static int set_hca_ctrl(struct mlx5_core_dev *dev)
+{
+ struct mlx5_reg_host_endianess he_in;
+ struct mlx5_reg_host_endianess he_out;
+ int err;
+
+ if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
+ !MLX5_CAP_GEN(dev, roce))
+ return 0;
+
+ memset(&he_in, 0, sizeof(he_in));
+ he_in.he = MLX5_SET_HOST_ENDIANNESS;
+ err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
+ &he_out, sizeof(he_out),
+ MLX5_REG_HOST_ENDIANNESS, 0, 1);
+ return err;
+}
+
+static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ printf("mlx5_core: ERR: ""failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ printf("mlx5_core: ERR: ""failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0)) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq;
+ int err = -ENOENT;
+
+ spin_lock(&table->lock);
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ if (eq->index == vector) {
+ *eqn = eq->eqn;
+ *irqn = eq->irqn;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock(&table->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+int mlx5_rename_eq(struct mlx5_core_dev *dev, int eq_ix, char *name)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ struct mlx5_eq *eq;
+ int err = -ENOENT;
+
+ spin_lock(&table->lock);
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ if (eq->index == eq_ix) {
+ int irq_ix = eq_ix + MLX5_EQ_VEC_COMP_BASE;
+
+ snprintf(priv->irq_info[irq_ix].name, MLX5_MAX_IRQ_NAME,
+ "%s-%d", name, eq_ix);
+
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock(&table->lock);
+
+ return err;
+}
+
+static void free_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq, *n;
+
+ spin_lock(&table->lock);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_del(&eq->list);
+ spin_unlock(&table->lock);
+ if (mlx5_destroy_unmap_eq(dev, eq))
+ mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
+ eq->eqn);
+ kfree(eq);
+ spin_lock(&table->lock);
+ }
+ spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ char name[MLX5_MAX_IRQ_NAME];
+ struct mlx5_eq *eq;
+ int ncomp_vec;
+ int nent;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&table->comp_eqs_list);
+ ncomp_vec = table->num_comp_vectors;
+ nent = MLX5_COMP_EQ_SIZE;
+ for (i = 0; i < ncomp_vec; i++) {
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
+ err = mlx5_create_map_eq(dev, eq,
+ i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
+ name, &dev->priv.uuari.uars[0]);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
+ eq->index = i;
+ spin_lock(&table->lock);
+ list_add_tail(&eq->list, &table->comp_eqs_list);
+ spin_unlock(&table->lock);
+ }
+
+ return 0;
+
+clean:
+ free_comp_eqs(dev);
+ return err;
+}
+
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+ resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+ resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+ dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+ return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+ if (dev->priv.bf_mapping)
+ io_mapping_free(dev->priv.bf_mapping);
+}
+
+static inline int fw_initializing(struct mlx5_core_dev *dev)
+{
+ return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+ u64 end = jiffies + msecs_to_jiffies(max_wait_mili);
+ int err = 0;
+
+ while (fw_initializing(dev)) {
+ if (time_after(jiffies, end)) {
+ err = -EBUSY;
+ break;
+ }
+ msleep(FW_INIT_WAIT_MS);
+ }
+
+ return err;
+}
+
+static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev->pdev = pdev;
+ pci_set_drvdata(dev->pdev, dev);
+ strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
+ priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
+
+ mutex_init(&priv->pgdir_mutex);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ spin_lock_init(&priv->mkey_lock);
+
+ priv->numa_node = NUMA_NO_NODE;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Cannot enable PCI device, aborting\n");
+ goto err_dbg;
+ }
+
+ err = request_bar(pdev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""error requesting BARs, aborting\n");
+ goto err_disable;
+ }
+
+ pci_set_master(pdev);
+
+ err = set_dma_caps(pdev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed setting DMA capabilities mask, aborting\n");
+ goto err_clr_master;
+ }
+
+ dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
+ sizeof(*dev->iseg));
+ if (!dev->iseg) {
+ err = -ENOMEM;
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
+ goto err_clr_master;
+ }
+ device_printf((&pdev->dev)->bsddev, "INFO: ""firmware version: %d.%d.%d\n", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
+
+ /*
+ * On load removing any previous indication of internal error,
+ * device is up
+ */
+ dev->state = MLX5_DEVICE_STATE_UP;
+
+ err = mlx5_cmd_init(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed initializing command interface, aborting\n");
+ goto err_unmap;
+ }
+
+ err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+ if (err) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Firmware over %d MS in initializing state, aborting\n", FW_INIT_TIMEOUT_MILI);
+ goto err_cmd_cleanup;
+ }
+
+ mlx5_pagealloc_init(dev);
+
+ err = mlx5_core_enable_hca(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""enable hca failed\n");
+ goto err_pagealloc_cleanup;
+ }
+
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""failed to set issi\n");
+ goto err_disable_hca;
+ }
+
+ err = mlx5_pagealloc_start(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_pagealloc_start failed\n");
+ goto err_disable_hca;
+ }
+
+ err = mlx5_satisfy_startup_pages(dev, 1);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate boot pages\n");
+ goto err_pagealloc_stop;
+ }
+
+ err = handle_hca_cap(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
+ goto reclaim_boot_pages;
+ }
+
+ err = set_hca_ctrl(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
+ goto reclaim_boot_pages;
+ }
+
+ err = mlx5_satisfy_startup_pages(dev, 0);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""failed to allocate init pages\n");
+ goto reclaim_boot_pages;
+ }
+
+ err = mlx5_cmd_init_hca(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""init hca failed\n");
+ goto reclaim_boot_pages;
+ }
+
+ mlx5_start_health_poll(dev);
+
+ err = mlx5_query_hca_caps(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""query hca failed\n");
+ goto err_stop_poll;
+ }
+
+ err = mlx5_query_board_id(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""query board id failed\n");
+ goto err_stop_poll;
+ }
+
+ err = mlx5_enable_msix(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""enable msix failed\n");
+ goto err_stop_poll;
+ }
+
+ err = mlx5_eq_init(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""failed to initialize eq\n");
+ goto disable_msix;
+ }
+
+ err = mlx5_alloc_uuars(dev, &priv->uuari);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed allocating uar, aborting\n");
+ goto err_eq_cleanup;
+ }
+
+ err = mlx5_start_eqs(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to start pages and async EQs\n");
+ goto err_free_uar;
+ }
+
+ err = alloc_comp_eqs(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to alloc completion EQs\n");
+ goto err_stop_eqs;
+ }
+
+ if (map_bf_area(dev))
+ device_printf((&pdev->dev)->bsddev, "ERR: ""Failed to map blue flame area\n");
+
+ MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
+
+ mlx5_init_cq_table(dev);
+ mlx5_init_qp_table(dev);
+ mlx5_init_srq_table(dev);
+ mlx5_init_mr_table(dev);
+
+ return 0;
+
+err_stop_eqs:
+ mlx5_stop_eqs(dev);
+
+err_free_uar:
+ mlx5_free_uuars(dev, &priv->uuari);
+
+err_eq_cleanup:
+ mlx5_eq_cleanup(dev);
+
+disable_msix:
+ mlx5_disable_msix(dev);
+
+err_stop_poll:
+ mlx5_stop_health_poll(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
+ return err;
+ }
+
+reclaim_boot_pages:
+ mlx5_reclaim_startup_pages(dev);
+
+err_pagealloc_stop:
+ mlx5_pagealloc_stop(dev);
+
+err_disable_hca:
+ mlx5_core_disable_hca(dev);
+
+err_pagealloc_cleanup:
+ mlx5_pagealloc_cleanup(dev);
+err_cmd_cleanup:
+ mlx5_cmd_cleanup(dev);
+
+err_unmap:
+ iounmap(dev->iseg);
+
+err_clr_master:
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+
+err_disable:
+ pci_disable_device(dev->pdev);
+
+err_dbg:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ return err;
+}
+
+static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+
+ mlx5_cleanup_mr_table(dev);
+ mlx5_cleanup_srq_table(dev);
+ mlx5_cleanup_qp_table(dev);
+ mlx5_cleanup_cq_table(dev);
+ unmap_bf_area(dev);
+ mlx5_wait_for_reclaim_vfs_pages(dev);
+ free_comp_eqs(dev);
+ mlx5_stop_eqs(dev);
+ mlx5_free_uuars(dev, &priv->uuari);
+ mlx5_eq_cleanup(dev);
+ mlx5_disable_msix(dev);
+ mlx5_stop_health_poll(dev);
+ if (mlx5_cmd_teardown_hca(dev)) {
+ device_printf((&dev->pdev->dev)->bsddev, "ERR: ""tear_down_hca failed, skip cleanup\n");
+ return;
+ }
+ mlx5_pagealloc_stop(dev);
+ mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev);
+ mlx5_pagealloc_cleanup(dev);
+ mlx5_cmd_cleanup(dev);
+ iounmap(dev->iseg);
+ pci_clear_master(dev->pdev);
+ release_bar(dev->pdev);
+ pci_disable_device(dev->pdev);
+}
+
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+
+ dev_ctx->intf = intf;
+ dev_ctx->context = intf->add(dev);
+
+ if (dev_ctx->context) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_add_tail(&dev_ctx->list, &priv->ctx_list);
+ spin_unlock_irq(&priv->ctx_lock);
+ } else {
+ kfree(dev_ctx);
+ }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+ struct mlx5_device_context *dev_ctx;
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf == intf) {
+ spin_lock_irq(&priv->ctx_lock);
+ list_del(&dev_ctx->list);
+ spin_unlock_irq(&priv->ctx_lock);
+
+ intf->remove(dev, dev_ctx->context);
+ kfree(dev_ctx);
+ return;
+ }
+}
+static int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&priv->dev_list, &dev_list);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_interface *intf;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ if (!intf->add || !intf->remove)
+ return -EINVAL;
+
+ mutex_lock(&intf_mutex);
+ list_add_tail(&intf->list, &intf_list);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_add_device(intf, priv);
+ mutex_unlock(&intf_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+ struct mlx5_priv *priv;
+
+ mutex_lock(&intf_mutex);
+ list_for_each_entry(priv, &dev_list, dev_list)
+ mlx5_remove_device(intf, priv);
+ list_del(&intf->list);
+ mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+ if ((dev_ctx->intf->protocol == protocol) &&
+ dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev_ctx->context);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+ unsigned long param)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_device_context *dev_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->event)
+ dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+struct mlx5_core_event_handler {
+ void (*event)(struct mlx5_core_dev *dev,
+ enum mlx5_dev_event event,
+ void *data);
+};
+
+
+static int init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ priv = &dev->priv;
+ if (id)
+ priv->pci_dev_data = id->driver_data;
+
+ if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profiles)) {
+ printf("mlx5_core: WARN: ""selected profile out of range, selecting default (%d)\n", MLX5_DEFAULT_PROF);
+ prof_sel = MLX5_DEFAULT_PROF;
+ }
+ dev->profile = &profiles[prof_sel];
+ dev->event = mlx5_core_event;
+
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
+ err = mlx5_dev_init(dev, pdev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_dev_init failed %d\n", err);
+ goto out;
+ }
+
+ err = mlx5_register_device(dev);
+ if (err) {
+ device_printf((&pdev->dev)->bsddev, "ERR: ""mlx5_register_device failed %d\n", err);
+ goto out_init;
+ }
+
+
+ return 0;
+
+out_init:
+ mlx5_dev_cleanup(dev);
+out:
+ kfree(dev);
+ return err;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+
+ mlx5_unregister_device(dev);
+ mlx5_dev_cleanup(dev);
+ kfree(dev);
+}
+
+static const struct pci_device_id mlx5_core_pci_table[] = {
+ { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
+ { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
+ { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */
+ { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */
+ { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */
+ { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */
+ { PCI_VDEVICE(MELLANOX, 4119) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 4120) }, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 4121) },
+ { PCI_VDEVICE(MELLANOX, 4122) },
+ { PCI_VDEVICE(MELLANOX, 4123) },
+ { PCI_VDEVICE(MELLANOX, 4124) },
+ { PCI_VDEVICE(MELLANOX, 4125) },
+ { PCI_VDEVICE(MELLANOX, 4126) },
+ { PCI_VDEVICE(MELLANOX, 4127) },
+ { PCI_VDEVICE(MELLANOX, 4128) },
+ { PCI_VDEVICE(MELLANOX, 4129) },
+ { PCI_VDEVICE(MELLANOX, 4130) },
+ { PCI_VDEVICE(MELLANOX, 4131) },
+ { PCI_VDEVICE(MELLANOX, 4132) },
+ { PCI_VDEVICE(MELLANOX, 4133) },
+ { PCI_VDEVICE(MELLANOX, 4134) },
+ { PCI_VDEVICE(MELLANOX, 4135) },
+ { PCI_VDEVICE(MELLANOX, 4136) },
+ { PCI_VDEVICE(MELLANOX, 4137) },
+ { PCI_VDEVICE(MELLANOX, 4138) },
+ { PCI_VDEVICE(MELLANOX, 4139) },
+ { PCI_VDEVICE(MELLANOX, 4140) },
+ { PCI_VDEVICE(MELLANOX, 4141) },
+ { PCI_VDEVICE(MELLANOX, 4142) },
+ { PCI_VDEVICE(MELLANOX, 4143) },
+ { PCI_VDEVICE(MELLANOX, 4144) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+
+static struct pci_driver mlx5_core_driver = {
+ .name = DRIVER_NAME,
+ .id_table = mlx5_core_pci_table,
+ .probe = init_one,
+ .remove = remove_one
+};
+
+static int __init init(void)
+{
+ int err;
+
+ mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
+ if (!mlx5_core_wq) {
+ err = -ENOMEM;
+ goto err_debug;
+ }
+ mlx5_health_init();
+
+ err = pci_register_driver(&mlx5_core_driver);
+ if (err)
+ goto err_health;
+
+
+ return 0;
+
+err_health:
+ mlx5_health_cleanup();
+ destroy_workqueue(mlx5_core_wq);
+err_debug:
+ return err;
+}
+
+static void __exit cleanup(void)
+{
+ pci_unregister_driver(&mlx5_core_driver);
+ mlx5_health_cleanup();
+ destroy_workqueue(mlx5_core_wq);
+}
+
+module_init(init);
+module_exit(cleanup);
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_trigger_cmd_completions(dev);
+}
+EXPORT_SYMBOL(mlx5_enter_error_state);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_main.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_mcg.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_mcg.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_mcg.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,69 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_mcg.c 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include <rdma/ib_verbs.h>
+#include "mlx5_core.h"
+
+int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)];
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ memcpy(MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid), mgid,
+ sizeof(*mgid));
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_attach_mcg);
+
+int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
+{
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)];
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid), mgid,
+ sizeof(*mgid));
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_detach_mcg);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_mcg.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_mr.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_mr.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_mr.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,237 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_mr.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+void mlx5_init_mr_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
+{
+}
+
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_create_mkey_mbox_in *in, int inlen,
+ mlx5_cmd_cbk_t callback, void *context,
+ struct mlx5_create_mkey_mbox_out *out)
+{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
+ struct mlx5_create_mkey_mbox_out lout;
+ unsigned long flags;
+ int err;
+ u8 key;
+
+ memset(&lout, 0, sizeof(lout));
+ spin_lock_irq(&dev->priv.mkey_lock);
+ key = dev->priv.mkey_key++;
+ spin_unlock_irq(&dev->priv.mkey_lock);
+ in->seg.qpn_mkey7_0 |= cpu_to_be32(key);
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_MKEY);
+ if (callback) {
+ err = mlx5_cmd_exec_cb(dev, in, inlen, out, sizeof(*out),
+ callback, context);
+ return err;
+ } else {
+ err = mlx5_cmd_exec(dev, in, inlen, &lout, sizeof(lout));
+ }
+
+ if (err) {
+ mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
+ return err;
+ }
+
+ if (lout.hdr.status) {
+ mlx5_core_dbg(dev, "status %d\n", lout.hdr.status);
+ return mlx5_cmd_status_to_err(&lout.hdr);
+ }
+
+ mr->iova = be64_to_cpu(in->seg.start_addr);
+ mr->size = be64_to_cpu(in->seg.len);
+ mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+ mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+
+ mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
+ be32_to_cpu(lout.mkey), key, mr->key);
+
+ /* connect to MR tree */
+ spin_lock_irqsave(&table->lock, flags);
+ err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mr->key), mr);
+ spin_unlock_irqrestore(&table->lock, flags);
+ if (err) {
+ mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+ mr->key, err);
+ mlx5_core_destroy_mkey(dev, mr);
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_mkey);
+
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
+{
+ struct mlx5_mr_table *table = &dev->priv.mr_table;
+ u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)];
+ struct mlx5_core_mr *deleted_mr;
+ unsigned long flags;
+
+ memset(in, 0, sizeof(in));
+
+ spin_lock_irqsave(&table->lock, flags);
+ deleted_mr = radix_tree_delete(&table->tree, mlx5_mkey_to_idx(mr->key));
+ spin_unlock_irqrestore(&table->lock, flags);
+ if (!deleted_mr) {
+ mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n", mr->key);
+ return -ENOENT;
+ }
+
+ MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mr->key));
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_mkey);
+
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ struct mlx5_query_mkey_mbox_out *out, int outlen)
+{
+ struct mlx5_query_mkey_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, outlen);
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
+ in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ return mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_mkey);
+
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+ u32 *mkey)
+{
+ struct mlx5_query_special_ctxs_mbox_in in;
+ struct mlx5_query_special_ctxs_mbox_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ *mkey = be32_to_cpu(out.dump_fill_mkey);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+ int npsvs, u32 *sig_index)
+{
+ struct mlx5_allocate_psv_in in;
+ struct mlx5_allocate_psv_out out;
+ int i, err;
+
+ if (npsvs > MLX5_MAX_PSVS)
+ return -EINVAL;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_PSV);
+ in.npsv_pd = cpu_to_be32((npsvs << 28) | pdn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "cmd exec failed %d\n", err);
+ return err;
+ }
+
+ if (out.hdr.status) {
+ mlx5_core_err(dev, "create_psv bad status %d\n",
+ out.hdr.status);
+ return mlx5_cmd_status_to_err(&out.hdr);
+ }
+
+ for (i = 0; i < npsvs; i++)
+ sig_index[i] = be32_to_cpu(out.psv_idx[i]) & 0xffffff;
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_psv);
+
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
+{
+ struct mlx5_destroy_psv_in in;
+ struct mlx5_destroy_psv_out out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.psv_number = cpu_to_be32(psv_num);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_PSV);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "destroy_psv cmd exec failed %d\n", err);
+ goto out;
+ }
+
+ if (out.hdr.status) {
+ mlx5_core_err(dev, "destroy_psv bad status %d\n",
+ out.hdr.status);
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ goto out;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_psv);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_mr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,620 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c 322151 2017-08-07 12:49:30Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE);
+
+struct mlx5_pages_req {
+ struct mlx5_core_dev *dev;
+ u16 func_id;
+ s32 npages;
+ struct work_struct work;
+};
+
+
+struct mlx5_manage_pages_inbox {
+ struct mlx5_inbox_hdr hdr;
+ __be16 rsvd;
+ __be16 func_id;
+ __be32 num_entries;
+ __be64 pas[0];
+};
+
+struct mlx5_manage_pages_outbox {
+ struct mlx5_outbox_hdr hdr;
+ __be32 num_entries;
+ u8 rsvd[4];
+ __be64 pas[0];
+};
+
+enum {
+ MAX_RECLAIM_TIME_MSECS = 5000,
+};
+
+static void
+mlx5_fwp_load_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+ struct mlx5_fw_page *fwp;
+ uint8_t owned;
+
+ fwp = (struct mlx5_fw_page *)arg;
+ owned = MLX5_DMA_OWNED(fwp->dev);
+
+ if (!owned)
+ MLX5_DMA_LOCK(fwp->dev);
+
+ if (error == 0) {
+ KASSERT(nseg == 1, ("Number of segments is different from 1"));
+ fwp->dma_addr = segs->ds_addr;
+ fwp->load_done = MLX5_LOAD_ST_SUCCESS;
+ } else {
+ fwp->load_done = MLX5_LOAD_ST_FAILURE;
+ }
+ MLX5_DMA_DONE(fwp->dev);
+
+ if (!owned)
+ MLX5_DMA_UNLOCK(fwp->dev);
+}
+
+void
+mlx5_fwp_flush(struct mlx5_fw_page *fwp)
+{
+ unsigned num = fwp->numpages;
+
+ while (num--)
+ bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREWRITE);
+}
+
+void
+mlx5_fwp_invalidate(struct mlx5_fw_page *fwp)
+{
+ unsigned num = fwp->numpages;
+
+ while (num--) {
+ bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(fwp[num].dev->cmd.dma_tag, fwp[num].dma_map, BUS_DMASYNC_PREREAD);
+ }
+}
+
+struct mlx5_fw_page *
+mlx5_fwp_alloc(struct mlx5_core_dev *dev, gfp_t flags, unsigned num)
+{
+ struct mlx5_fw_page *fwp;
+ unsigned x;
+ int err;
+
+ /* check for special case */
+ if (num == 0) {
+ fwp = kzalloc(sizeof(*fwp), flags);
+ if (fwp != NULL)
+ fwp->dev = dev;
+ return (fwp);
+ }
+
+ /* we need sleeping context for this function */
+ if (flags & M_NOWAIT)
+ return (NULL);
+
+ fwp = kzalloc(sizeof(*fwp) * num, flags);
+
+ /* serialize loading the DMA map(s) */
+ sx_xlock(&dev->cmd.dma_sx);
+
+ for (x = 0; x != num; x++) {
+ /* store pointer to MLX5 core device */
+ fwp[x].dev = dev;
+ /* store number of pages left from the array */
+ fwp[x].numpages = num - x;
+
+ /* allocate memory */
+ err = bus_dmamem_alloc(dev->cmd.dma_tag, &fwp[x].virt_addr,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &fwp[x].dma_map);
+ if (err != 0)
+ goto failure;
+
+ /* load memory into DMA */
+ MLX5_DMA_LOCK(dev);
+ err = bus_dmamap_load(
+ dev->cmd.dma_tag, fwp[x].dma_map, fwp[x].virt_addr,
+ MLX5_ADAPTER_PAGE_SIZE, &mlx5_fwp_load_mem_cb,
+ fwp + x, BUS_DMA_WAITOK | BUS_DMA_COHERENT);
+
+ while (fwp[x].load_done == MLX5_LOAD_ST_NONE)
+ MLX5_DMA_WAIT(dev);
+ MLX5_DMA_UNLOCK(dev);
+
+ /* check for error */
+ if (fwp[x].load_done != MLX5_LOAD_ST_SUCCESS) {
+ bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr,
+ fwp[x].dma_map);
+ goto failure;
+ }
+ }
+ sx_xunlock(&dev->cmd.dma_sx);
+ return (fwp);
+
+failure:
+ while (x--) {
+ bus_dmamap_unload(dev->cmd.dma_tag, fwp[x].dma_map);
+ bus_dmamem_free(dev->cmd.dma_tag, fwp[x].virt_addr, fwp[x].dma_map);
+ }
+ sx_xunlock(&dev->cmd.dma_sx);
+ return (NULL);
+}
+
+void
+mlx5_fwp_free(struct mlx5_fw_page *fwp)
+{
+ struct mlx5_core_dev *dev;
+ unsigned num;
+
+ /* be NULL safe */
+ if (fwp == NULL)
+ return;
+
+ /* check for special case */
+ if (fwp->numpages == 0) {
+ kfree(fwp);
+ return;
+ }
+
+ num = fwp->numpages;
+ dev = fwp->dev;
+
+ while (num--) {
+ bus_dmamap_unload(dev->cmd.dma_tag, fwp[num].dma_map);
+ bus_dmamem_free(dev->cmd.dma_tag, fwp[num].virt_addr, fwp[num].dma_map);
+ }
+
+ kfree(fwp);
+}
+
+u64
+mlx5_fwp_get_dma(struct mlx5_fw_page *fwp, size_t offset)
+{
+ size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
+ KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
+
+ return ((fwp + index)->dma_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
+}
+
+void *
+mlx5_fwp_get_virt(struct mlx5_fw_page *fwp, size_t offset)
+{
+ size_t index = (offset / MLX5_ADAPTER_PAGE_SIZE);
+ KASSERT(index < fwp->numpages, ("Invalid offset: %lld", (long long)offset));
+
+ return ((char *)(fwp + index)->virt_addr + (offset % MLX5_ADAPTER_PAGE_SIZE));
+}
+
+static int
+mlx5_insert_fw_page_locked(struct mlx5_core_dev *dev, struct mlx5_fw_page *nfp)
+{
+ struct rb_root *root = &dev->priv.page_root;
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct mlx5_fw_page *tfp;
+
+ while (*new) {
+ parent = *new;
+ tfp = rb_entry(parent, struct mlx5_fw_page, rb_node);
+ if (tfp->dma_addr < nfp->dma_addr)
+ new = &parent->rb_left;
+ else if (tfp->dma_addr > nfp->dma_addr)
+ new = &parent->rb_right;
+ else
+ return (-EEXIST);
+ }
+
+ rb_link_node(&nfp->rb_node, parent, new);
+ rb_insert_color(&nfp->rb_node, root);
+ return (0);
+}
+
+static struct mlx5_fw_page *
+mlx5_remove_fw_page_locked(struct mlx5_core_dev *dev, bus_addr_t addr)
+{
+ struct rb_root *root = &dev->priv.page_root;
+ struct rb_node *tmp = root->rb_node;
+ struct mlx5_fw_page *result = NULL;
+ struct mlx5_fw_page *tfp;
+
+ while (tmp) {
+ tfp = rb_entry(tmp, struct mlx5_fw_page, rb_node);
+ if (tfp->dma_addr < addr) {
+ tmp = tmp->rb_left;
+ } else if (tfp->dma_addr > addr) {
+ tmp = tmp->rb_right;
+ } else {
+ rb_erase(&tfp->rb_node, &dev->priv.page_root);
+ result = tfp;
+ break;
+ }
+ }
+ return (result);
+}
+
+static int
+alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+{
+ struct mlx5_fw_page *fwp;
+ int err;
+
+ fwp = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
+ if (fwp == NULL)
+ return (-ENOMEM);
+
+ fwp->func_id = func_id;
+
+ MLX5_DMA_LOCK(dev);
+ err = mlx5_insert_fw_page_locked(dev, fwp);
+ MLX5_DMA_UNLOCK(dev);
+
+ if (err != 0) {
+ mlx5_fwp_free(fwp);
+ } else {
+ /* make sure cached data is cleaned */
+ mlx5_fwp_invalidate(fwp);
+
+ /* store DMA address */
+ *addr = fwp->dma_addr;
+ }
+ return (err);
+}
+
+static void
+free_4k(struct mlx5_core_dev *dev, u64 addr)
+{
+ struct mlx5_fw_page *fwp;
+
+ MLX5_DMA_LOCK(dev);
+ fwp = mlx5_remove_fw_page_locked(dev, addr);
+ MLX5_DMA_UNLOCK(dev);
+
+ if (fwp == NULL) {
+ mlx5_core_warn(dev, "Cannot free 4K page at 0x%llx\n", (long long)addr);
+ return;
+ }
+ mlx5_fwp_free(fwp);
+}
+
+static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
+ s32 *npages, int boot)
+{
+ u32 in[MLX5_ST_SZ_DW(query_pages_in)];
+ u32 out[MLX5_ST_SZ_DW(query_pages_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
+ MLX5_SET(query_pages_in, in, op_mod,
+ boot ? MLX5_BOOT_PAGES : MLX5_INIT_PAGES);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *npages = MLX5_GET(query_pages_out, out, num_pages);
+ *func_id = MLX5_GET(query_pages_out, out, function_id);
+
+ return 0;
+}
+
+static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+ int notify_fail)
+{
+ struct mlx5_manage_pages_inbox *in;
+ struct mlx5_manage_pages_outbox out;
+ struct mlx5_manage_pages_inbox *nin;
+ int inlen;
+ u64 addr;
+ int err;
+ int i = 0;
+
+ inlen = sizeof(*in) + npages * sizeof(in->pas[0]);
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
+ err = -ENOMEM;
+ goto out_alloc;
+ }
+ memset(&out, 0, sizeof(out));
+
+ for (i = 0; i < npages; i++) {
+ err = alloc_4k(dev, &addr, func_id);
+ if (err)
+ goto out_alloc;
+ in->pas[i] = cpu_to_be64(addr);
+ }
+
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
+ in->func_id = cpu_to_be16(func_id);
+ in->num_entries = cpu_to_be32(npages);
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
+ goto out_alloc;
+ }
+ dev->priv.fw_pages += npages;
+ dev->priv.pages_per_func[func_id] += npages;
+
+ if (out.hdr.status) {
+ err = mlx5_cmd_status_to_err(&out.hdr);
+ if (err) {
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
+ goto out_alloc;
+ }
+ }
+
+ mlx5_core_dbg(dev, "err %d\n", err);
+
+ goto out_free;
+
+out_alloc:
+ if (notify_fail) {
+ nin = kzalloc(sizeof(*nin), GFP_KERNEL);
+ if (!nin)
+ goto out_4k;
+
+ memset(&out, 0, sizeof(out));
+ nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ nin->hdr.opmod = cpu_to_be16(MLX5_PAGES_CANT_GIVE);
+ nin->func_id = cpu_to_be16(func_id);
+ if (mlx5_cmd_exec(dev, nin, sizeof(*nin), &out, sizeof(out)))
+ mlx5_core_warn(dev, "page notify failed\n");
+ kfree(nin);
+ }
+
+out_4k:
+ for (i--; i >= 0; i--)
+ free_4k(dev, be64_to_cpu(in->pas[i]));
+out_free:
+ kvfree(in);
+ return err;
+}
+
+static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+ int *nclaimed)
+{
+ struct mlx5_manage_pages_inbox in;
+ struct mlx5_manage_pages_outbox *out;
+ int num_claimed;
+ int outlen;
+ u64 addr;
+ int err;
+ int i;
+
+ if (nclaimed)
+ *nclaimed = 0;
+
+ memset(&in, 0, sizeof(in));
+ outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
+ in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
+ in.func_id = cpu_to_be16(func_id);
+ in.num_entries = cpu_to_be32(npages);
+ mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ if (err) {
+ mlx5_core_err(dev, "failed reclaiming pages\n");
+ goto out_free;
+ }
+
+ if (out->hdr.status) {
+ err = mlx5_cmd_status_to_err(&out->hdr);
+ goto out_free;
+ }
+
+ num_claimed = be32_to_cpu(out->num_entries);
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
+ dev->priv.fw_pages -= num_claimed;
+ dev->priv.pages_per_func[func_id] -= num_claimed;
+ for (i = 0; i < num_claimed; i++) {
+ addr = be64_to_cpu(out->pas[i]);
+ free_4k(dev, addr);
+ }
+
+out_free:
+ kvfree(out);
+ return err;
+}
+
+static void pages_work_handler(struct work_struct *work)
+{
+ struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
+ struct mlx5_core_dev *dev = req->dev;
+ int err = 0;
+
+ if (req->npages < 0)
+ err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
+ else if (req->npages > 0)
+ err = give_pages(dev, req->func_id, req->npages, 1);
+
+ if (err)
+ mlx5_core_warn(dev, "%s fail %d\n",
+ req->npages < 0 ? "reclaim" : "give", err);
+
+ kfree(req);
+}
+
+void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
+ s32 npages)
+{
+ struct mlx5_pages_req *req;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req) {
+ mlx5_core_warn(dev, "failed to allocate pages request\n");
+ return;
+ }
+
+ req->dev = dev;
+ req->func_id = func_id;
+ req->npages = npages;
+ INIT_WORK(&req->work, pages_work_handler);
+ if (!queue_work(dev->priv.pg_wq, &req->work))
+ mlx5_core_warn(dev, "failed to queue pages handler work\n");
+}
+
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
+{
+ u16 uninitialized_var(func_id);
+ s32 uninitialized_var(npages);
+ int err;
+
+ err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
+ if (err)
+ return err;
+
+ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+ npages, boot ? "boot" : "init", func_id);
+
+ return give_pages(dev, func_id, npages, 0);
+}
+
+enum {
+ MLX5_BLKS_FOR_RECLAIM_PAGES = 12
+};
+
+s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
+{
+ int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ s64 prevpages = 0;
+ s64 npages = 0;
+
+ while (!time_after(jiffies, end)) {
+ /* exclude own function, VFs only */
+ npages = dev->priv.fw_pages - dev->priv.pages_per_func[0];
+ if (!npages)
+ break;
+
+ if (npages != prevpages)
+ end = end + msecs_to_jiffies(100);
+
+ prevpages = npages;
+ msleep(1);
+ }
+
+ if (npages)
+ mlx5_core_warn(dev, "FW did not return all VFs pages, will cause to memory leak\n");
+
+ return -npages;
+}
+
+static int optimal_reclaimed_pages(void)
+{
+ struct mlx5_cmd_prot_block *block;
+ struct mlx5_cmd_layout *lay;
+ int ret;
+
+ ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
+ sizeof(struct mlx5_manage_pages_outbox)) /
+ FIELD_SIZEOF(struct mlx5_manage_pages_outbox, pas[0]);
+
+ return ret;
+}
+
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
+{
+ int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ struct mlx5_fw_page *fwp;
+ struct rb_node *p;
+ int nclaimed = 0;
+ int err;
+
+ do {
+ p = rb_first(&dev->priv.page_root);
+ if (p) {
+ fwp = rb_entry(p, struct mlx5_fw_page, rb_node);
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ --dev->priv.fw_pages;
+ free_4k(dev, fwp->dma_addr);
+ nclaimed = 1;
+ } else {
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+ if (err) {
+ mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ if (nclaimed)
+ end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ }
+ if (time_after(jiffies, end)) {
+ mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
+ break;
+ }
+ } while (p);
+
+ return 0;
+}
+
+void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
+{
+
+ dev->priv.page_root = RB_ROOT;
+}
+
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
+{
+ /* nothing */
+}
+
+int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
+{
+ dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
+ if (!dev->priv.pg_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
+{
+ destroy_workqueue(dev->priv.pg_wq);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_pd.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_pd.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_pd.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,68 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_pd.c 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *pdn = MLX5_GET(alloc_pd_out, out, pd);
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_core_alloc_pd);
+
+int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_dealloc_pd);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_pd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_port.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_port.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_port.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,863 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_port.c 308684 2016-11-15 08:58:51Z hselasky $
+ */
+
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+ int size_in, void *data_out, int size_out,
+ u16 reg_num, int arg, int write)
+{
+ struct mlx5_access_reg_mbox_in *in = NULL;
+ struct mlx5_access_reg_mbox_out *out = NULL;
+ int err = -ENOMEM;
+
+ in = mlx5_vzalloc(sizeof(*in) + size_in);
+ if (!in)
+ return -ENOMEM;
+
+ out = mlx5_vzalloc(sizeof(*out) + size_out);
+ if (!out)
+ goto ex1;
+
+ memcpy(in->data, data_in, size_in);
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ACCESS_REG);
+ in->hdr.opmod = cpu_to_be16(!write);
+ in->arg = cpu_to_be32(arg);
+ in->register_id = cpu_to_be16(reg_num);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in) + size_in, out,
+ sizeof(*out) + size_out);
+ if (err)
+ goto ex2;
+
+ if (out->hdr.status)
+ err = mlx5_cmd_status_to_err(&out->hdr);
+
+ if (!err)
+ memcpy(data_out, out->data, size_out);
+
+ex2:
+ kvfree(out);
+ex1:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
+
+
+struct mlx5_reg_pcap {
+ u8 rsvd0;
+ u8 port_num;
+ u8 rsvd1[2];
+ __be32 caps_127_96;
+ __be32 caps_95_64;
+ __be32 caps_63_32;
+ __be32 caps_31_0;
+};
+
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+ enum mlx5_port_status ps;
+
+ mlx5_query_port_admin_status(dev, &ps);
+ mlx5_set_port_status(dev, MLX5_PORT_DOWN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
+
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
+{
+ struct mlx5_reg_pcap in;
+ struct mlx5_reg_pcap out;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ in.caps_127_96 = cpu_to_be32(caps);
+ in.port_num = port_num;
+
+ err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_PCAP, 0, 1);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_disable_cap, u8 *an_disable_status)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ *an_disable_status = MLX5_GET(ptys_reg, out, an_disable_admin);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
+int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
+ u32 eth_proto_admin, int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_cap;
+ u8 an_disable_status;
+ int err;
+
+ err = mlx5_query_port_autoneg(dev, proto_mask, &an_disable_cap,
+ &an_disable_status);
+ if (err)
+ return err;
+ if (!an_disable_cap)
+ return -EPERM;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, disable);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, eth_proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_autoneg);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, local_port, 1);
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+ return err;
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ MLX5_SET(paos_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+ *status = MLX5_GET(paos_reg, out, admin_status);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+
+static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
+ int *admin_mtu, int *max_mtu, int *oper_mtu)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+ if (err)
+ return err;
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+
+ return err;
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
+{
+ return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
+ u32 rx_pause, u32 tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(pfcc_reg, in, local_port, port);
+ MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+ MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
+ u32 *rx_pause, u32 *tx_pause)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(pfcc_reg, in, local_port, port);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ *rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+ *tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+ return 0;
+}
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
+ MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
+ MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+{
+ u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+ u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+ int err;
+
+ MLX5_SET(pfcc_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 0);
+ if (err)
+ return err;
+
+ if (pfc_en_tx)
+ *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
+
+ if (pfc_en_rx)
+ *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
+{
+ return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev)
+{
+ u8 wol_supported = 0;
+
+ if (MLX5_CAP_GEN(dev, wol_s))
+ wol_supported |= MLX5_WOL_SECURED_MAGIC;
+ if (MLX5_CAP_GEN(dev, wol_g))
+ wol_supported |= MLX5_WOL_MAGIC;
+ if (MLX5_CAP_GEN(dev, wol_a))
+ wol_supported |= MLX5_WOL_ARP;
+ if (MLX5_CAP_GEN(dev, wol_b))
+ wol_supported |= MLX5_WOL_BROADCAST;
+ if (MLX5_CAP_GEN(dev, wol_m))
+ wol_supported |= MLX5_WOL_MULTICAST;
+ if (MLX5_CAP_GEN(dev, wol_u))
+ wol_supported |= MLX5_WOL_UNICAST;
+ if (MLX5_CAP_GEN(dev, wol_p))
+ wol_supported |= MLX5_WOL_PHY_ACTIVITY;
+
+ return wol_supported;
+}
+EXPORT_SYMBOL_GPL(mlx5_is_wol_supported);
+
+int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)];
+ u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
+ MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
+ MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_wol);
+
+int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
+ struct mlx5_pvlc_reg *pvlc, int write)
+{
+ int sz = MLX5_ST_SZ_BYTES(pvlc_reg);
+ u8 in[MLX5_ST_SZ_BYTES(pvlc_reg)];
+ u8 out[MLX5_ST_SZ_BYTES(pvlc_reg)];
+ int err;
+
+ memset(out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pvlc_reg, in, local_port, pvlc->local_port);
+ if (write)
+ MLX5_SET(pvlc_reg, in, vl_admin, pvlc->vl_admin);
+
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PVLC, 0,
+ !!write);
+ if (err)
+ return err;
+
+ if (!write) {
+ pvlc->local_port = MLX5_GET(pvlc_reg, out, local_port);
+ pvlc->vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+ pvlc->vl_admin = MLX5_GET(pvlc_reg, out, vl_admin);
+ pvlc->vl_operational = MLX5_GET(pvlc_reg, out, vl_operational);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_access_pvlc);
+
+int mlx5_core_access_ptys(struct mlx5_core_dev *dev,
+ struct mlx5_ptys_reg *ptys, int write)
+{
+ int sz = MLX5_ST_SZ_BYTES(ptys_reg);
+ void *out = NULL;
+ void *in = NULL;
+ int err;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
+ return -ENOMEM;
+
+ out = mlx5_vzalloc(sz);
+ if (!out) {
+ kfree(in);
+ return -ENOMEM;
+ }
+
+ MLX5_SET(ptys_reg, in, local_port, ptys->local_port);
+ MLX5_SET(ptys_reg, in, proto_mask, ptys->proto_mask);
+ if (write) {
+ MLX5_SET(ptys_reg, in, eth_proto_capability,
+ ptys->eth_proto_cap);
+ MLX5_SET(ptys_reg, in, ib_link_width_capability,
+ ptys->ib_link_width_cap);
+ MLX5_SET(ptys_reg, in, ib_proto_capability,
+ ptys->ib_proto_cap);
+ MLX5_SET(ptys_reg, in, eth_proto_admin, ptys->eth_proto_admin);
+ MLX5_SET(ptys_reg, in, ib_link_width_admin,
+ ptys->ib_link_width_admin);
+ MLX5_SET(ptys_reg, in, ib_proto_admin, ptys->ib_proto_admin);
+ MLX5_SET(ptys_reg, in, eth_proto_oper, ptys->eth_proto_oper);
+ MLX5_SET(ptys_reg, in, ib_link_width_oper,
+ ptys->ib_link_width_oper);
+ MLX5_SET(ptys_reg, in, ib_proto_oper, ptys->ib_proto_oper);
+ MLX5_SET(ptys_reg, in, eth_proto_lp_advertise,
+ ptys->eth_proto_lp_advertise);
+ }
+
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PTYS, 0,
+ !!write);
+ if (err)
+ goto out;
+
+ if (!write) {
+ ptys->local_port = MLX5_GET(ptys_reg, out, local_port);
+ ptys->proto_mask = MLX5_GET(ptys_reg, out, proto_mask);
+ ptys->eth_proto_cap = MLX5_GET(ptys_reg, out,
+ eth_proto_capability);
+ ptys->ib_link_width_cap = MLX5_GET(ptys_reg, out,
+ ib_link_width_capability);
+ ptys->ib_proto_cap = MLX5_GET(ptys_reg, out,
+ ib_proto_capability);
+ ptys->eth_proto_admin = MLX5_GET(ptys_reg, out,
+ eth_proto_admin);
+ ptys->ib_link_width_admin = MLX5_GET(ptys_reg, out,
+ ib_link_width_admin);
+ ptys->ib_proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+ ptys->eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ ptys->ib_link_width_oper = MLX5_GET(ptys_reg, out,
+ ib_link_width_oper);
+ ptys->ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+ ptys->eth_proto_lp_advertise = MLX5_GET(ptys_reg, out,
+ eth_proto_lp_advertise);
+ }
+
+out:
+ kvfree(in);
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_access_ptys);
+
+static int mtu_to_ib_mtu(int mtu)
+{
+ switch (mtu) {
+ case 256: return 1;
+ case 512: return 2;
+ case 1024: return 3;
+ case 2048: return 4;
+ case 4096: return 5;
+ default:
+ printf("mlx5_core: WARN: ""invalid mtu\n");
+ return -1;
+ }
+}
+
+int mlx5_core_access_pmtu(struct mlx5_core_dev *dev,
+ struct mlx5_pmtu_reg *pmtu, int write)
+{
+ int sz = MLX5_ST_SZ_BYTES(pmtu_reg);
+ void *out = NULL;
+ void *in = NULL;
+ int err;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
+ return -ENOMEM;
+
+ out = mlx5_vzalloc(sz);
+ if (!out) {
+ kfree(in);
+ return -ENOMEM;
+ }
+
+ MLX5_SET(pmtu_reg, in, local_port, pmtu->local_port);
+ if (write)
+ MLX5_SET(pmtu_reg, in, admin_mtu, pmtu->admin_mtu);
+
+ err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PMTU, 0,
+ !!write);
+ if (err)
+ goto out;
+
+ if (!write) {
+ pmtu->local_port = MLX5_GET(pmtu_reg, out, local_port);
+ pmtu->max_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
+ max_mtu));
+ pmtu->admin_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
+ admin_mtu));
+ pmtu->oper_mtu = mtu_to_ib_mtu(MLX5_GET(pmtu_reg, out,
+ oper_mtu));
+ }
+
+out:
+ kvfree(in);
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_access_pmtu);
+
+int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+ int lane = 0;
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return err;
+
+ lane = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+ *module_num = lane & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_num);
+
+int mlx5_query_eeprom(struct mlx5_core_dev *dev,
+ int i2c_addr, int page_num, int device_addr,
+ int size, int module_num, u32 *data, int *size_read)
+{
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ u32 *ptr = (u32 *)MLX5_ADDR_OF(mcia_reg, out, dword_0);
+ int status;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+ MLX5_SET(mcia_reg, in, page_number, page_num);
+ MLX5_SET(mcia_reg, in, device_address, device_addr);
+ MLX5_SET(mcia_reg, in, size, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status)
+ return status;
+
+ memcpy(data, ptr, size);
+ *size_read = size;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_eeprom);
+
+int mlx5_vxlan_udp_port_add(struct mlx5_core_dev *dev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)];
+ u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+ MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "Failed %s, port %u, err - %d",
+ mlx5_command_str(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT),
+ port, err);
+ }
+
+ return err;
+}
+
+int mlx5_vxlan_udp_port_delete(struct mlx5_core_dev *dev, u16 port)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(dev, "Failed %s, port %u, err - %d",
+ mlx5_command_str(MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT),
+ port, err);
+ }
+
+ return err;
+}
+
+int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)];
+ u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+
+ if (!err)
+ *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_wol);
+
+int mlx5_query_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
+ int priority, int *is_enable)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_status_in)];
+ u32 out[MLX5_ST_SZ_DW(query_cong_status_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ *is_enable = 0;
+
+ MLX5_SET(query_cong_status_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATUS);
+ MLX5_SET(query_cong_status_in, in, cong_protocol, protocol);
+ MLX5_SET(query_cong_status_in, in, priority, priority);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+ if (!err)
+ *is_enable = MLX5_GET(query_cong_status_out, out, enable);
+ return err;
+}
+
+int mlx5_modify_port_cong_status(struct mlx5_core_dev *mdev, int protocol,
+ int priority, int enable)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cong_status_in)];
+ u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(modify_cong_status_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS);
+ MLX5_SET(modify_cong_status_in, in, cong_protocol, protocol);
+ MLX5_SET(modify_cong_status_in, in, priority, priority);
+ MLX5_SET(modify_cong_status_in, in, enable, enable);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+int mlx5_query_port_cong_params(struct mlx5_core_dev *mdev, int protocol,
+ void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_cong_params_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS);
+ MLX5_SET(query_cong_params_in, in, cong_protocol, protocol);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, out_size);
+}
+
+int mlx5_modify_port_cong_params(struct mlx5_core_dev *mdev,
+ void *in, int in_size)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)];
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(modify_cong_params_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS);
+
+ return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
+}
+
+int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
+ void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_cong_statistics_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS);
+ MLX5_SET(query_cong_statistics_in, in, clear, clear);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, out_size);
+}
+
+int mlx5_set_diagnostic_params(struct mlx5_core_dev *mdev, void *in,
+ int in_size)
+{
+ u32 out[MLX5_ST_SZ_DW(set_diagnostic_params_out)];
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_diagnostic_params_in, in, opcode,
+ MLX5_CMD_OP_SET_DIAGNOSTICS);
+
+ return mlx5_cmd_exec_check_status(mdev, in, in_size, out, sizeof(out));
+}
+
+int mlx5_query_diagnostic_counters(struct mlx5_core_dev *mdev,
+ u8 num_of_samples, u16 sample_index,
+ void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_diagnostic_counters_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_diagnostic_counters_in, in, opcode,
+ MLX5_CMD_OP_QUERY_DIAGNOSTICS);
+ MLX5_SET(query_diagnostic_counters_in, in, num_of_samples,
+ num_of_samples);
+ MLX5_SET(query_diagnostic_counters_in, in, sample_index, sample_index);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, out_size);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_port.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_qp.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_qp.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_qp.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,486 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_qp.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+
+#include <linux/gfp.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/driver.h>
+
+#include "mlx5_core.h"
+
+#include "transobj.h"
+
+static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
+ u32 rsn)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_core_rsc_common *common;
+
+ spin_lock(&table->lock);
+
+ common = radix_tree_lookup(&table->tree, rsn);
+ if (common)
+ atomic_inc(&common->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!common) {
+ mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
+ rsn);
+ return NULL;
+ }
+ return common;
+}
+
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
+{
+ if (atomic_dec_and_test(&common->refcount))
+ complete(&common->free);
+}
+
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
+{
+ struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_qp *qp;
+
+ if (!common)
+ return;
+
+ switch (common->res) {
+ case MLX5_RES_QP:
+ qp = (struct mlx5_core_qp *)common;
+ qp->event(qp, event_type);
+ break;
+
+ default:
+ mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
+ }
+
+ mlx5_core_put_rsc(common);
+}
+
+static int create_qprqsq_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ int err;
+
+ qp->common.res = rsc_type;
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ return err;
+
+ atomic_set(&qp->common.refcount, 1);
+ init_completion(&qp->common.free);
+ qp->pid = curthread->td_proc->p_pid;
+
+ return 0;
+}
+
+static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
+ wait_for_completion(&qp->common.free);
+}
+
+int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ struct mlx5_create_qp_mbox_in *in,
+ int inlen)
+{
+ struct mlx5_create_qp_mbox_out out;
+ struct mlx5_destroy_qp_mbox_in din;
+ struct mlx5_destroy_qp_mbox_out dout;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
+
+ err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "ret %d\n", err);
+ return err;
+ }
+
+ if (out.hdr.status) {
+ mlx5_core_warn(dev, "current num of QPs 0x%x\n",
+ atomic_read(&dev->num_qps));
+ return mlx5_cmd_status_to_err(&out.hdr);
+ }
+
+ qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
+ mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
+
+ err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+ if (err)
+ goto err_cmd;
+
+ atomic_inc(&dev->num_qps);
+
+ return 0;
+
+err_cmd:
+ memset(&din, 0, sizeof(din));
+ memset(&dout, 0, sizeof(dout));
+ din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
+ din.qpn = cpu_to_be32(qp->qpn);
+ mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+
+int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp)
+{
+ struct mlx5_destroy_qp_mbox_in in;
+ struct mlx5_destroy_qp_mbox_out out;
+ int err;
+
+
+ destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
+ in.qpn = cpu_to_be32(qp->qpn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ atomic_dec(&dev->num_qps);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
+
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
+ struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+ struct mlx5_core_qp *qp)
+{
+ struct mlx5_modify_qp_mbox_out out;
+ int err = 0;
+
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(operation);
+ in->qpn = cpu_to_be32(qp->qpn);
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ return mlx5_cmd_status_to_err(&out.hdr);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
+
+void mlx5_init_qp_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
+{
+}
+
+int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+ struct mlx5_query_qp_mbox_out *out, int outlen)
+{
+ struct mlx5_query_qp_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, outlen);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
+ in.qpn = cpu_to_be32(qp->qpn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ return mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+
+int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
+
+int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ struct mlx5_create_dct_mbox_in *in)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_create_dct_mbox_out out;
+ struct mlx5_destroy_dct_mbox_in din;
+ struct mlx5_destroy_dct_mbox_out dout;
+ int err;
+
+ init_completion(&dct->drained);
+ memset(&out, 0, sizeof(out));
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
+ if (err) {
+ mlx5_core_warn(dev, "create DCT failed, ret %d", err);
+ return err;
+ }
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ dct->dctn = be32_to_cpu(out.dctn) & 0xffffff;
+
+ dct->common.res = MLX5_RES_DCT;
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, dct->dctn, dct);
+ spin_unlock_irq(&table->lock);
+ if (err) {
+ mlx5_core_warn(dev, "err %d", err);
+ goto err_cmd;
+ }
+
+ dct->pid = curthread->td_proc->p_pid;
+ atomic_set(&dct->common.refcount, 1);
+ init_completion(&dct->common.free);
+
+ return 0;
+
+err_cmd:
+ memset(&din, 0, sizeof(din));
+ memset(&dout, 0, sizeof(dout));
+ din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
+ din.dctn = cpu_to_be32(dct->dctn);
+ mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
+
+static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ struct mlx5_drain_dct_mbox_out out;
+ struct mlx5_drain_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DRAIN_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_destroy_dct_mbox_out out;
+ struct mlx5_destroy_dct_mbox_in in;
+ unsigned long flags;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
+ return err;
+ }
+
+ wait_for_completion(&dct->drained);
+
+ spin_lock_irqsave(&table->lock, flags);
+ if (radix_tree_delete(&table->tree, dct->dctn) != dct)
+ mlx5_core_warn(dev, "dct delete differs\n");
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ if (atomic_dec_and_test(&dct->common.refcount))
+ complete(&dct->common.free);
+ wait_for_completion(&dct->common.free);
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
+
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ struct mlx5_query_dct_mbox_out *out)
+{
+ struct mlx5_query_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(out, 0, sizeof(*out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_DCT);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
+ if (err)
+ return err;
+
+ if (out->hdr.status)
+ return mlx5_cmd_status_to_err(&out->hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
+
+int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
+{
+ struct mlx5_arm_dct_mbox_out out;
+ struct mlx5_arm_dct_mbox_in in;
+ int err;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
+ in.dctn = cpu_to_be32(dct->dctn);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
+
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq)
+{
+ int err;
+
+ err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
+ if (err)
+ return err;
+
+ err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ if (err)
+ mlx5_core_destroy_rq(dev, rq->qpn);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *rq)
+{
+ destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
+ mlx5_core_destroy_rq(dev, rq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
+
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq)
+{
+ int err;
+
+ err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
+ if (err)
+ return err;
+
+ err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ if (err)
+ mlx5_core_destroy_sq(dev, sq->qpn);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
+
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *sq)
+{
+ destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
+ mlx5_core_destroy_sq(dev, sq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_qp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_srq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_srq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_srq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,456 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_srq.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/srq.h>
+#include <rdma/ib_verbs.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
+{
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_core_srq *srq;
+
+ spin_lock(&table->lock);
+
+ srq = radix_tree_lookup(&table->tree, srqn);
+ if (srq)
+ atomic_inc(&srq->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!srq) {
+ mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
+ return;
+ }
+
+ srq->event(srq, event_type);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+}
+
+static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+{
+ void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ if (srqc_to_rmpc) {
+ switch (MLX5_GET(srqc, srqc, state)) {
+ case MLX5_SRQC_STATE_GOOD:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ break;
+ case MLX5_SRQC_STATE_ERROR:
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
+ break;
+ default:
+ printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
+ }
+
+ MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
+ MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
+ MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
+ MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
+ MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
+ MLX5_SET64(wq, wq, dbr_addr,
+ ((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
+ ((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
+ } else {
+ switch (MLX5_GET(rmpc, rmpc, state)) {
+ case MLX5_RMPC_STATE_RDY:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
+ break;
+ case MLX5_RMPC_STATE_ERR:
+ MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
+ break;
+ default:
+ printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
+ }
+
+ MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
+ MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
+ MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
+ MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
+ MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
+ MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
+ MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
+ MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
+ MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
+ }
+}
+
+struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
+{
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_core_srq *srq;
+
+ spin_lock(&table->lock);
+
+ srq = radix_tree_lookup(&table->tree, srqn);
+ if (srq)
+ atomic_inc(&srq->refcount);
+
+ spin_unlock(&table->lock);
+
+ return srq;
+}
+EXPORT_SYMBOL(mlx5_core_get_srq);
+
+static int get_pas_size(void *srqc)
+{
+ u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
+ u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
+ u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
+ u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
+
+ return rq_num_pas * sizeof(u64);
+
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+{
+ void *create_in;
+ void *rmpc;
+ void *srqc;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+
+ memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+ rmpc_srqc_reformat(srqc, rmpc, true);
+
+ err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *rmp_out;
+ void *rmpc;
+ void *srqc;
+ int err;
+
+ rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+ if (!rmp_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ rmpc_srqc_reformat(srqc, rmpc, false);
+
+out:
+ kvfree(rmp_out);
+ return 0;
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
+{
+ return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in,
+ int srq_inlen)
+{
+ void *create_in;
+ void *srqc;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
+ pas_size = get_pas_size(srqc);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ memcpy(pas, in->pas, pas_size);
+
+ err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
+ if (err)
+ goto out;
+
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ return mlx5_core_destroy_xsrq(dev, srq->srqn);
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ u32 *xrcsrq_out;
+ int err;
+
+ xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!xrcsrq_out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
+ if (err)
+ goto out;
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq, u16 lwm)
+{
+ return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
+}
+
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen)
+{
+ struct mlx5_create_srq_mbox_out out;
+ int err;
+
+ memset(&out, 0, sizeof(out));
+
+ in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
+
+ err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
+
+ srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ struct mlx5_destroy_srq_mbox_in in;
+ struct mlx5_destroy_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ struct mlx5_query_srq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
+ in.srqn = cpu_to_be32(srq->srqn);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ struct mlx5_arm_srq_mbox_in in;
+ struct mlx5_arm_srq_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
+ in.hdr.opmod = cpu_to_be16(!!is_srq);
+ in.srqn = cpu_to_be32(srq->srqn);
+ in.lwm = cpu_to_be16(lwm);
+
+ return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ if (!dev->issi)
+ return create_srq_cmd(dev, srq, in, inlen);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return create_xrc_srq_cmd(dev, srq, in, inlen);
+ else
+ return create_rmp_cmd(dev, srq, in, inlen);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
+{
+ if (!dev->issi)
+ return destroy_srq_cmd(dev, srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return destroy_xrc_srq_cmd(dev, srq);
+ else
+ return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_create_srq_mbox_in *in, int inlen,
+ int is_xrc)
+{
+ int err;
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+
+ err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ if (err)
+ return err;
+
+ atomic_set(&srq->refcount, 1);
+ init_completion(&srq->free);
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, srq->srqn, srq);
+ spin_unlock_irq(&table->lock);
+ if (err) {
+ mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
+ goto err_destroy_srq_split;
+ }
+
+ return 0;
+
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_srq);
+
+int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
+{
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_core_srq *tmp;
+ int err;
+
+ spin_lock_irq(&table->lock);
+ tmp = radix_tree_delete(&table->tree, srq->srqn);
+ spin_unlock_irq(&table->lock);
+ if (!tmp) {
+ mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
+ return -EINVAL;
+ }
+ if (tmp != srq) {
+ mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
+ return -EINVAL;
+ }
+
+ err = destroy_srq_split(dev, srq);
+ if (err)
+ return err;
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+ wait_for_completion(&srq->free);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_srq);
+
+int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_query_srq_mbox_out *out)
+{
+ if (!dev->issi)
+ return query_srq_cmd(dev, srq, out);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return query_xrc_srq_cmd(dev, srq, out);
+ else
+ return query_rmp_cmd(dev, srq, out);
+}
+EXPORT_SYMBOL(mlx5_core_query_srq);
+
+int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ if (!dev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ else if (srq->common.res == MLX5_RES_XSRQ)
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ else
+ return arm_rmp_cmd(dev, srq, lwm);
+}
+EXPORT_SYMBOL(mlx5_core_arm_srq);
+
+void mlx5_init_srq_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
+{
+ /* nothing */
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_srq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_transobj.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_transobj.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,418 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_transobj.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <dev/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rq_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+ MLX5_SET(query_rq_in, in, rqn, rqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_sq_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
+ MLX5_SET(query_sq_in, in, sqn, sqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rmp_out)];
+ int err;
+
+ MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+ return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rmp_out)];
+
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_rmp_in)];
+ int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, in, rmpn, rmpn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+ void *in;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int err;
+
+ in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+ if (!in)
+ return -ENOMEM;
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+ err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *xsrqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ int err;
+
+ MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+ return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+ void *srqc;
+ void *xrc_srqc;
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out,
+ MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ if (!err) {
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+ xrc_srq_context_entry);
+ srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+ memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ }
+
+ return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
+ u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+
+}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ int err;
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ return err;
+}
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rqt_out)];
+
+ MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_uar.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_uar.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_uar.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,210 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_uar.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+#include <dev/mlx5/driver.h>
+#include "mlx5_core.h"
+
+int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
+
+int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_free_uar);
+
+static int need_uuar_lock(int uuarn)
+{
+ int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
+
+ if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
+ return 0;
+
+ return 1;
+}
+
+int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+{
+ int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
+ struct mlx5_bf *bf;
+ phys_addr_t addr;
+ int err;
+ int i;
+
+ uuari->num_uars = NUM_DRIVER_UARS;
+ uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
+
+ mutex_init(&uuari->lock);
+ uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
+
+ uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
+
+ uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
+ GFP_KERNEL);
+
+ uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
+
+ for (i = 0; i < uuari->num_uars; i++) {
+ err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
+ if (err)
+ goto out_count;
+
+ addr = pci_resource_start(dev->pdev, 0) +
+ ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
+ uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
+ if (!uuari->uars[i].map) {
+ mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ err = -ENOMEM;
+ goto out_count;
+ }
+ mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
+ uuari->uars[i].index, uuari->uars[i].map);
+ }
+
+ for (i = 0; i < tot_uuars; i++) {
+ bf = &uuari->bfs[i];
+
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
+ bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
+ bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
+ bf->reg = NULL; /* Add WC support */
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
+ bf->need_lock = need_uuar_lock(i);
+ spin_lock_init(&bf->lock);
+ spin_lock_init(&bf->lock32);
+ bf->uuarn = i;
+ }
+
+ return 0;
+
+out_count:
+ for (i--; i >= 0; i--) {
+ iounmap(uuari->uars[i].map);
+ mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ }
+ kfree(uuari->count);
+
+ kfree(uuari->bitmap);
+
+ kfree(uuari->bfs);
+
+ kfree(uuari->uars);
+ return err;
+}
+
+int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+{
+ int i = uuari->num_uars;
+
+ for (i--; i >= 0; i--) {
+ iounmap(uuari->uars[i].map);
+ mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ }
+
+ kfree(uuari->count);
+ kfree(uuari->bitmap);
+ kfree(uuari->bfs);
+ kfree(uuari->uars);
+
+ return 0;
+}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ if (mdev->priv.bf_mapping)
+ uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+ uar->index << PAGE_SHIFT);
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ io_mapping_unmap(uar->bf_map);
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_uar.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1657 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_vport.c 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#include <linux/etherdevice.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/vport.h>
+#include "mlx5_core.h"
+
+static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
+ int inlen);
+
+static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u32 *out, int outlen)
+{
+ int err;
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_vport_state_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return err;
+}
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+ _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
+
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+ _mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
+ return MLX5_GET(query_vport_state_out, out, admin_state);
+}
+EXPORT_SYMBOL(mlx5_query_vport_admin_state);
+
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(modify_vport_state_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE);
+ MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+
+ MLX5_SET(modify_vport_state_in, in, admin_state, state);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
+
+static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+}
+
+static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
+ int client_id)
+{
+ switch (client_id) {
+ case MLX5_INTERFACE_PROTOCOL_IB:
+ return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
+ MLX5_QCOUNTER_SETS_NETDEV);
+ case MLX5_INTERFACE_PROTOCOL_ETH:
+ return MLX5_QCOUNTER_SETS_NETDEV;
+ default:
+ mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
+ return 0;
+ }
+}
+
+int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
+ int client_id, u16 *counter_set_id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ int err;
+
+ if (mdev->num_q_counter_allocated[client_id] >
+ mlx5_vport_max_q_counter_allocator(mdev, client_id))
+ return -EINVAL;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+
+ if (!err)
+ *counter_set_id = MLX5_GET(alloc_q_counter_out, out,
+ counter_set_id);
+
+ mdev->num_q_counter_allocated[client_id]++;
+
+ return err;
+}
+
+int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
+ int client_id, u16 counter_set_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+ int err;
+
+ if (mdev->num_q_counter_allocated[client_id] <= 0)
+ return -EINVAL;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ counter_set_id);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, sizeof(out));
+
+ mdev->num_q_counter_allocated[client_id]--;
+
+ return err;
+}
+
+int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
+ int reset,
+ void *out,
+ int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, clear, reset);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
+ out, out_size);
+}
+
+int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
+ u32 *out_of_rx_buffer)
+{
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
+ int err;
+
+ memset(out, 0, sizeof(out));
+
+ err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
+ sizeof(out));
+
+ if (err)
+ return err;
+
+ *out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+ return err;
+}
+
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ if (err)
+ goto out;
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_ctx;
+ u8 *perm_mac;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.permanent_address, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
+ permanent_address);
+
+ ether_addr_copy(&perm_mac[2], addr);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
+
+int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ u64 *system_image_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
+ nic_vport_context.system_image_guid);
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
+ nic_vport_context.node_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+
+static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u64 *port_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *port_guid = MLX5_GET64(query_nic_vport_context_out, out,
+ nic_vport_context.port_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+ u16 *qkey_viol_cntr)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.qkey_violation_counter);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
+
+static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
+ int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+}
+
+static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
+ int enable_disable)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
+ enable_disable);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
+ bool other_vport, u8 *addr)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ + MLX5_ST_SZ_BYTES(mac_address_layout);
+ u8 *mac_layout;
+ u8 *mac_ptr;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, other_vport);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_UC);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.allowed_list_size, 1);
+
+ mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context.current_uc_mac_address);
+ mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
+ mac_addr_47_32);
+ ether_addr_copy(mac_ptr, addr);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
+
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_context;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EPERM;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
+
+int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 port_guid)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+ void *nic_vport_context;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EPERM;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.port_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
+
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
+ u16 *vlan_list, int list_len)
+{
+ void *in, *ctx;
+ int i, err;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ + MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
+
+ int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
+
+ if (list_len > max_list_size) {
+ mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
+ list_len, max_list_size);
+ return -ENOSPC;
+ }
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
+
+ MLX5_SET(nic_vport_context, ctx, allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_VLAN);
+ MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
+
+ for (i = 0; i < list_len; i++) {
+ u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
+ current_uc_mac_address[i]);
+ MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
+ }
+
+ err = mlx5_modify_nic_vport_context(dev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
+
+int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
+ u64 *addr_list, size_t addr_list_len)
+{
+ void *in, *ctx;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
+ + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
+ int err;
+ size_t i;
+ int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
+
+ if ((int)addr_list_len > max_list_sz) {
+ mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
+ (int)addr_list_len, max_list_sz);
+ return -ENOSPC;
+ }
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
+
+ MLX5_SET(nic_vport_context, ctx, allowed_list_type,
+ MLX5_NIC_VPORT_LIST_TYPE_MC);
+ MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
+
+ for (i = 0; i < addr_list_len; i++) {
+ u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
+ current_uc_mac_address[i]);
+ u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
+ mac_addr_47_32);
+ ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
+ }
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
+
+int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
+ bool promisc_mc, bool promisc_uc,
+ bool promisc_all)
+{
+ u8 in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
+ u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context);
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
+
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_nic_vport_context_in, in,
+ other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
+ if (promisc_mc)
+ MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
+ if (promisc_uc)
+ MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
+ if (promisc_all)
+ MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
+
+ return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
+
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ u16 vport,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int *list_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ void *nic_vport_ctx;
+ int max_list_size;
+ int req_list_size;
+ int out_sz;
+ void *out;
+ int err;
+ int i;
+
+ req_list_size = *list_size;
+
+ max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
+ 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
+
+ if (req_list_size > max_list_size) {
+ mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
+ req_list_size, max_list_size);
+ req_list_size = max_list_size;
+ }
+
+ out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context);
+ req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
+ allowed_list_size);
+
+ *list_size = req_list_size;
+ for (i = 0; i < req_list_size; i++) {
+ u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
+ current_uc_mac_address[i]) + 2;
+ ether_addr_copy(addr_list[i], mac_addr);
+ }
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
+
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int list_size)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ void *nic_vport_ctx;
+ int max_list_size;
+ int in_sz;
+ void *in;
+ int err;
+ int i;
+
+ max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
+ 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
+
+ if (list_size > max_list_size)
+ return -ENOSPC;
+
+ in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+ memset(out, 0, sizeof(out));
+ in = kzalloc(in_sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context);
+
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ allowed_list_type, list_type);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ allowed_list_size, list_size);
+
+ for (i = 0; i < list_size; i++) {
+ u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
+ current_uc_mac_address[i]) + 2;
+ ether_addr_copy(curr_mac, addr_list[i]);
+ }
+
+ err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ kfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
+
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vport,
+ u16 vlans[],
+ int *size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ void *nic_vport_ctx;
+ int req_list_size;
+ int max_list_size;
+ int out_sz;
+ void *out;
+ int err;
+ int i;
+
+ req_list_size = *size;
+ max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
+ if (req_list_size > max_list_size) {
+ mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
+ req_list_size, max_list_size);
+ req_list_size = max_list_size;
+ }
+
+ out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
+
+ memset(in, 0, sizeof(in));
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
+ MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+
+ if (vport)
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context);
+ req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
+ allowed_list_size);
+
+ *size = req_list_size;
+ for (i = 0; i < req_list_size; i++) {
+ void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
+ current_uc_mac_address[i]);
+ vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
+ }
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
+
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vlans[],
+ int list_size)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+ void *nic_vport_ctx;
+ int max_list_size;
+ int in_sz;
+ void *in;
+ int err;
+ int i;
+
+ max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
+
+ if (list_size > max_list_size)
+ return -ENOSPC;
+
+ in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+ list_size * MLX5_ST_SZ_BYTES(vlan_layout);
+
+ memset(out, 0, sizeof(out));
+ in = kzalloc(in_sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.addresses_list, 1);
+
+ nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context);
+
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ allowed_list_size, list_size);
+
+ for (i = 0; i < list_size; i++) {
+ void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+ nic_vport_ctx,
+ current_uc_mac_address[i]);
+ MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
+ }
+
+ err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
+ kfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
+
+int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *enable = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.roce_en);
+
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
+
+int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
+ u8 *addr)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ u8 *mac_ptr;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.permanent_address, 1);
+ mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+ nic_vport_context.permanent_address.mac_addr_47_32);
+ ether_addr_copy(mac_ptr, addr);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
+
+int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
+{
+ return mlx5_nic_vport_enable_disable_roce(mdev, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
+
+int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
+{
+ return mlx5_nic_vport_enable_disable_roce(mdev, 0);
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
+ u8 port_num, u8 vport_num, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
+ int is_group_manager;
+
+ is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_hca_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+ if (vport_num) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_context_in, in, other_vport,
+ 1);
+ MLX5_SET(query_hca_vport_context_in, in, vport_number,
+ vport_num);
+ } else {
+ return -EPERM;
+ }
+ }
+
+ if (MLX5_CAP_GEN(mdev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+ return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ u64 *system_image_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
+ hca_vport_context.system_image_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *node_guid = MLX5_GET64(query_hca_vport_context_out, out,
+ hca_vport_context.node_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
+
+static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
+ u64 *port_guid)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *port_guid = MLX5_GET64(query_hca_vport_context_out, out,
+ hca_vport_context.port_guid);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
+ u16 vport_num, u16 gid_index, union ib_gid *gid)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ union ib_gid *tmp;
+ int tbsz;
+ int nout;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+ tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+
+ if (gid_index > tbsz && gid_index != 0xffff)
+ return -EINVAL;
+
+ if (gid_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * sizeof(*gid);
+
+ in = mlx5_vzalloc(in_sz);
+ out = mlx5_vzalloc(out_sz);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, opcode,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+ if (vport_num) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_gid_in, in, vport_number,
+ vport_num);
+ MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+
+ MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
+ gid->global.subnet_prefix = tmp->global.subnet_prefix;
+ gid->global.interface_id = tmp->global.interface_id;
+
+out:
+ kvfree(in);
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+ int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+ int is_group_manager;
+ void *out = NULL;
+ void *in = NULL;
+ void *pkarr;
+ int nout;
+ int tbsz;
+ int err;
+ int i;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+ if (pkey_index > tbsz && pkey_index != 0xffff)
+ return -EINVAL;
+
+ if (pkey_index == 0xffff)
+ nout = tbsz;
+ else
+ nout = 1;
+
+ out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+ in = kzalloc(in_sz, GFP_KERNEL);
+ out = kzalloc(out_sz, GFP_KERNEL);
+
+ MLX5_SET(query_hca_vport_pkey_in, in, opcode,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+ if (other_vport) {
+ if (is_group_manager) {
+ MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
+ vf_num);
+ MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+ } else {
+ err = -EPERM;
+ goto out;
+ }
+ }
+ MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+ if (err)
+ goto out;
+
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto out;
+
+ pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+ for (i = 0; i < nout; i++, pkey++,
+ pkarr += MLX5_ST_SZ_BYTES(pkey))
+ *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
+ int *min_header)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *min_header = MLX5_GET(query_hca_vport_context_out, out,
+ hca_vport_context.min_wqe_inline_mode);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
+ u16 vport, void *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
+ int err;
+
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+
+ MLX5_SET(modify_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen,
+ out, sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
+
+ return err;
+}
+
+int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
+ u8 insert_mode, u8 strip_mode,
+ u16 vlan, u8 cfi, u8 pcp)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
+
+ memset(in, 0, sizeof(in));
+
+ if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.cvlan_cfi, cfi);
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.cvlan_pcp, pcp);
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.cvlan_id, vlan);
+ }
+
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.vport_cvlan_insert, insert_mode);
+
+ MLX5_SET(modify_esw_vport_context_in, in,
+ esw_vport_context.vport_cvlan_strip, strip_mode);
+
+ MLX5_SET(modify_esw_vport_context_in, in, field_select,
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
+ MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
+
+ return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
+
+int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *mtu = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.mtu);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
+
+int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
+{
+ u32 *in;
+ u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
+
+static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
+ int *min_header)
+{
+ u32 *out;
+ u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *min_header = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+
+out:
+ kvfree(out);
+ return err;
+}
+
+int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
+ u8 vport, int min_header)
+{
+ u32 *in;
+ u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.min_wqe_inline_mode, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.min_wqe_inline_mode, min_header);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
+
+int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_min_wqe_header(dev, min_header);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_vport_min_wqe_header(dev, min_header);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
+
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u16 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+ if (err)
+ goto out;
+
+ *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.promisc_uc);
+ *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.promisc_mc);
+ *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.promisc_all);
+
+out:
+ kfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
+
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ int promisc_uc,
+ int promisc_mc,
+ int promisc_all)
+{
+ void *in;
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_err(mdev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.promisc_uc, promisc_uc);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.promisc_mc, promisc_mc);
+ MLX5_SET(modify_nic_vport_context_in, in,
+ nic_vport_context.promisc_all, promisc_all);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
+
+int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
+ u8 port_num, u16 vport_num,
+ void *out, int out_size)
+{
+ int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+ int is_group_manager;
+ void *in;
+ int err;
+
+ is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+ in = mlx5_vzalloc(in_sz);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ if (vport_num) {
+ if (is_group_manager) {
+ MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+ MLX5_SET(query_vport_counter_in, in, vport_number,
+ vport_num);
+ } else {
+ err = -EPERM;
+ goto ex;
+ }
+ }
+ if (MLX5_CAP_GEN(dev, num_ports) == 2)
+ MLX5_SET(query_vport_counter_in, in, port_num, port_num);
+
+ err = mlx5_cmd_exec(dev, in, in_sz, out, out_size);
+ if (err)
+ goto ex;
+ err = mlx5_cmd_status_to_err_v2(out);
+ if (err)
+ goto ex;
+
+ex:
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
+
+int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
+ struct mlx5_vport_counters *vc)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ void *out;
+ int err;
+
+ out = mlx5_vzalloc(out_sz);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
+ if (err)
+ goto ex;
+
+ vc->received_errors.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_errors.packets);
+ vc->received_errors.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_errors.octets);
+ vc->transmit_errors.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmit_errors.packets);
+ vc->transmit_errors.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmit_errors.octets);
+ vc->received_ib_unicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_ib_unicast.packets);
+ vc->received_ib_unicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_ib_unicast.octets);
+ vc->transmitted_ib_unicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_ib_unicast.packets);
+ vc->transmitted_ib_unicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_ib_unicast.octets);
+ vc->received_ib_multicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_ib_multicast.packets);
+ vc->received_ib_multicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_ib_multicast.octets);
+ vc->transmitted_ib_multicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_ib_multicast.packets);
+ vc->transmitted_ib_multicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_ib_multicast.octets);
+ vc->received_eth_broadcast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_broadcast.packets);
+ vc->received_eth_broadcast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_broadcast.octets);
+ vc->transmitted_eth_broadcast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_broadcast.packets);
+ vc->transmitted_eth_broadcast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_broadcast.octets);
+ vc->received_eth_unicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_unicast.octets);
+ vc->received_eth_unicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_unicast.packets);
+ vc->transmitted_eth_unicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_unicast.octets);
+ vc->transmitted_eth_unicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_unicast.packets);
+ vc->received_eth_multicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_multicast.octets);
+ vc->received_eth_multicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, received_eth_multicast.packets);
+ vc->transmitted_eth_multicast.octets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_multicast.octets);
+ vc->transmitted_eth_multicast.packets =
+ MLX5_GET64(query_vport_counter_out,
+ out, transmitted_eth_multicast.packets);
+
+ex:
+ kvfree(out);
+ return err;
+}
+
+int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_system_image_guid(dev,
+ sys_image_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_system_image_guid(dev,
+ sys_image_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
+
+int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_node_guid(dev, node_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_node_guid(dev, node_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
+
+int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
+{
+ switch (MLX5_CAP_GEN(dev, port_type)) {
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
+ return mlx5_query_hca_vport_port_guid(dev, port_guid);
+
+ case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
+ return mlx5_query_nic_vport_port_guid(dev, port_guid);
+
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
+
+int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
+{
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
+ if (err)
+ goto out;
+
+ *vport_state = MLX5_GET(query_hca_vport_context_out, out,
+ hca_vport_context.vport_state);
+
+out:
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_vport.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/mlx5_wq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/mlx5_wq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/mlx5_wq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,188 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_wq.c 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#include <dev/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int max_direct = param->linear ? INT_MAX : 0;
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+ max_direct, &wq_ctrl->buf,
+ param->buf_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int max_direct = param->linear ? INT_MAX : 0;
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+ max_direct, &wq_ctrl->buf,
+ param->buf_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int max_direct = param->linear ? INT_MAX : 0;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ max_direct, &wq_ctrl->buf,
+ param->buf_numa_node);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_core/mlx5_wq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/transobj.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/transobj.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/transobj.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,66 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/transobj.h 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
+#endif /* __TRANSOBJ_H__ */
Property changes on: trunk/sys/dev/mlx5/mlx5_core/transobj.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_core/wq.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_core/wq.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_core/wq.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,167 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/wq.h 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <dev/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int buf_numa_node;
+ int db_numa_node;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+#endif /* __MLX5_WQ_H__ */
Property changes on: trunk/sys/dev/mlx5/mlx5_core/wq.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/en.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/en.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/en.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,841 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/en.h 329300 2018-02-15 08:48:04Z hselasky $
+ */
+
+#ifndef _MLX5_EN_H_
+#define _MLX5_EN_H_
+
+#include <linux/kmod.h>
+#include <linux/page.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleparam.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+#include <net/ethernet.h>
+#include <sys/buf_ring.h>
+
+#if (__FreeBSD_version >= 1100000)
+#include "opt_rss.h"
+#endif
+
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
+#endif
+
+#include <machine/bus.h>
+
+#ifdef HAVE_TURBO_LRO
+#include "tcp_tlro.h"
+#endif
+
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/cq.h>
+#include <dev/mlx5/vport.h>
+#include <dev/mlx5/diagnostics.h>
+
+#include <dev/mlx5/mlx5_core/wq.h>
+#include <dev/mlx5/mlx5_core/transobj.h>
+#include <dev/mlx5/mlx5_core/mlx5_core.h>
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xe
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xe
+
+/* freeBSD HW LRO is limited by 16KB - the size of max mbuf */
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ MJUM16BYTES
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_CACHELINE_SIZE CACHE_LINE_SIZE
+#define MLX5E_HW2SW_MTU(hwmtu) \
+ ((hwmtu) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) \
+ ((swmtu) + (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
+#define MLX5E_SW2MB_MTU(swmtu) \
+ (MLX5E_SW2HW_MTU(swmtu) + MLX5E_NET_IP_ALIGN)
+#define MLX5E_MTU_MIN 72 /* Min MTU allowed by the kernel */
+#define MLX5E_MTU_MAX MIN(ETHERMTU_JUMBO, MJUM16BYTES) /* Max MTU of Ethernet
+ * jumbo frames */
+
+#define MLX5E_BUDGET_MAX 8192 /* RX and TX */
+#define MLX5E_RX_BUDGET_MAX 256
+#define MLX5E_SQ_BF_BUDGET 16
+#define MLX5E_SQ_TX_QUEUE_SIZE 4096 /* SQ drbr queue size */
+
+#define MLX5E_MAX_TX_NUM_TC 8 /* units */
+#define MLX5E_MAX_TX_HEADER 128 /* bytes */
+#define MLX5E_MAX_TX_PAYLOAD_SIZE 65536 /* bytes */
+#define MLX5E_MAX_TX_MBUF_SIZE 65536 /* bytes */
+#define MLX5E_MAX_TX_MBUF_FRAGS \
+ ((MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - \
+ (MLX5E_MAX_TX_HEADER / MLX5_SEND_WQE_DS)) /* units */
+#define MLX5E_MAX_TX_INLINE \
+ (MLX5E_MAX_TX_HEADER - sizeof(struct mlx5e_tx_wqe) + \
+ sizeof(((struct mlx5e_tx_wqe *)0)->eth.inline_hdr_start)) /* bytes */
+
+MALLOC_DECLARE(M_MLX5EN);
+
+struct mlx5_core_dev;
+struct mlx5e_cq;
+
+typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *);
+
+#define MLX5E_STATS_COUNT(a,b,c,d) a
+#define MLX5E_STATS_VAR(a,b,c,d) b;
+#define MLX5E_STATS_DESC(a,b,c,d) c, d,
+
+#define MLX5E_VPORT_STATS(m) \
+ /* HW counters */ \
+ m(+1, u64 rx_packets, "rx_packets", "Received packets") \
+ m(+1, u64 rx_bytes, "rx_bytes", "Received bytes") \
+ m(+1, u64 tx_packets, "tx_packets", "Transmitted packets") \
+ m(+1, u64 tx_bytes, "tx_bytes", "Transmitted bytes") \
+ m(+1, u64 rx_error_packets, "rx_error_packets", "Received error packets") \
+ m(+1, u64 rx_error_bytes, "rx_error_bytes", "Received error bytes") \
+ m(+1, u64 tx_error_packets, "tx_error_packets", "Transmitted error packets") \
+ m(+1, u64 tx_error_bytes, "tx_error_bytes", "Transmitted error bytes") \
+ m(+1, u64 rx_unicast_packets, "rx_unicast_packets", "Received unicast packets") \
+ m(+1, u64 rx_unicast_bytes, "rx_unicast_bytes", "Received unicast bytes") \
+ m(+1, u64 tx_unicast_packets, "tx_unicast_packets", "Transmitted unicast packets") \
+ m(+1, u64 tx_unicast_bytes, "tx_unicast_bytes", "Transmitted unicast bytes") \
+ m(+1, u64 rx_multicast_packets, "rx_multicast_packets", "Received multicast packets") \
+ m(+1, u64 rx_multicast_bytes, "rx_multicast_bytes", "Received multicast bytes") \
+ m(+1, u64 tx_multicast_packets, "tx_multicast_packets", "Transmitted multicast packets") \
+ m(+1, u64 tx_multicast_bytes, "tx_multicast_bytes", "Transmitted multicast bytes") \
+ m(+1, u64 rx_broadcast_packets, "rx_broadcast_packets", "Received broadcast packets") \
+ m(+1, u64 rx_broadcast_bytes, "rx_broadcast_bytes", "Received broadcast bytes") \
+ m(+1, u64 tx_broadcast_packets, "tx_broadcast_packets", "Transmitted broadcast packets") \
+ m(+1, u64 tx_broadcast_bytes, "tx_broadcast_bytes", "Transmitted broadcast bytes") \
+ m(+1, u64 rx_out_of_buffer, "rx_out_of_buffer", "Receive out of buffer, no recv wqes events") \
+ /* SW counters */ \
+ m(+1, u64 tso_packets, "tso_packets", "Transmitted TSO packets") \
+ m(+1, u64 tso_bytes, "tso_bytes", "Transmitted TSO bytes") \
+ m(+1, u64 lro_packets, "lro_packets", "Received LRO packets") \
+ m(+1, u64 lro_bytes, "lro_bytes", "Received LRO bytes") \
+ m(+1, u64 sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO") \
+ m(+1, u64 sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO") \
+ m(+1, u64 rx_csum_good, "rx_csum_good", "Received checksum valid packets") \
+ m(+1, u64 rx_csum_none, "rx_csum_none", "Received no checksum packets") \
+ m(+1, u64 tx_csum_offload, "tx_csum_offload", "Transmit checksum offload packets") \
+ m(+1, u64 tx_queue_dropped, "tx_queue_dropped", "Transmit queue dropped") \
+ m(+1, u64 tx_defragged, "tx_defragged", "Transmit queue defragged") \
+ m(+1, u64 rx_wqe_err, "rx_wqe_err", "Receive WQE errors")
+
+#define MLX5E_VPORT_STATS_NUM (0 MLX5E_VPORT_STATS(MLX5E_STATS_COUNT))
+
+struct mlx5e_vport_stats {
+ struct sysctl_ctx_list ctx;
+ u64 arg [0];
+ MLX5E_VPORT_STATS(MLX5E_STATS_VAR)
+ u32 rx_out_of_buffer_prev;
+};
+
+#define MLX5E_PPORT_IEEE802_3_STATS(m) \
+ m(+1, u64 frames_tx, "frames_tx", "Frames transmitted") \
+ m(+1, u64 frames_rx, "frames_rx", "Frames received") \
+ m(+1, u64 check_seq_err, "check_seq_err", "Sequence errors") \
+ m(+1, u64 alignment_err, "alignment_err", "Alignment errors") \
+ m(+1, u64 octets_tx, "octets_tx", "Bytes transmitted") \
+ m(+1, u64 octets_received, "octets_received", "Bytes received") \
+ m(+1, u64 multicast_xmitted, "multicast_xmitted", "Multicast transmitted") \
+ m(+1, u64 broadcast_xmitted, "broadcast_xmitted", "Broadcast transmitted") \
+ m(+1, u64 multicast_rx, "multicast_rx", "Multicast received") \
+ m(+1, u64 broadcast_rx, "broadcast_rx", "Broadcast received") \
+ m(+1, u64 in_range_len_errors, "in_range_len_errors", "In range length errors") \
+ m(+1, u64 out_of_range_len, "out_of_range_len", "Out of range length errors") \
+ m(+1, u64 too_long_errors, "too_long_errors", "Too long errors") \
+ m(+1, u64 symbol_err, "symbol_err", "Symbol errors") \
+ m(+1, u64 mac_control_tx, "mac_control_tx", "MAC control transmitted") \
+ m(+1, u64 mac_control_rx, "mac_control_rx", "MAC control received") \
+ m(+1, u64 unsupported_op_rx, "unsupported_op_rx", "Unsupported operation received") \
+ m(+1, u64 pause_ctrl_rx, "pause_ctrl_rx", "Pause control received") \
+ m(+1, u64 pause_ctrl_tx, "pause_ctrl_tx", "Pause control transmitted")
+
+#define MLX5E_PPORT_RFC2819_STATS(m) \
+ m(+1, u64 drop_events, "drop_events", "Dropped events") \
+ m(+1, u64 octets, "octets", "Octets") \
+ m(+1, u64 pkts, "pkts", "Packets") \
+ m(+1, u64 broadcast_pkts, "broadcast_pkts", "Broadcast packets") \
+ m(+1, u64 multicast_pkts, "multicast_pkts", "Multicast packets") \
+ m(+1, u64 crc_align_errors, "crc_align_errors", "CRC alignment errors") \
+ m(+1, u64 undersize_pkts, "undersize_pkts", "Undersized packets") \
+ m(+1, u64 oversize_pkts, "oversize_pkts", "Oversized packets") \
+ m(+1, u64 fragments, "fragments", "Fragments") \
+ m(+1, u64 jabbers, "jabbers", "Jabbers") \
+ m(+1, u64 collisions, "collisions", "Collisions")
+
+#define MLX5E_PPORT_RFC2819_STATS_DEBUG(m) \
+ m(+1, u64 p64octets, "p64octets", "Bytes") \
+ m(+1, u64 p65to127octets, "p65to127octets", "Bytes") \
+ m(+1, u64 p128to255octets, "p128to255octets", "Bytes") \
+ m(+1, u64 p256to511octets, "p256to511octets", "Bytes") \
+ m(+1, u64 p512to1023octets, "p512to1023octets", "Bytes") \
+ m(+1, u64 p1024to1518octets, "p1024to1518octets", "Bytes") \
+ m(+1, u64 p1519to2047octets, "p1519to2047octets", "Bytes") \
+ m(+1, u64 p2048to4095octets, "p2048to4095octets", "Bytes") \
+ m(+1, u64 p4096to8191octets, "p4096to8191octets", "Bytes") \
+ m(+1, u64 p8192to10239octets, "p8192to10239octets", "Bytes")
+
+#define MLX5E_PPORT_RFC2863_STATS_DEBUG(m) \
+ m(+1, u64 in_octets, "in_octets", "In octets") \
+ m(+1, u64 in_ucast_pkts, "in_ucast_pkts", "In unicast packets") \
+ m(+1, u64 in_discards, "in_discards", "In discards") \
+ m(+1, u64 in_errors, "in_errors", "In errors") \
+ m(+1, u64 in_unknown_protos, "in_unknown_protos", "In unknown protocols") \
+ m(+1, u64 out_octets, "out_octets", "Out octets") \
+ m(+1, u64 out_ucast_pkts, "out_ucast_pkts", "Out unicast packets") \
+ m(+1, u64 out_discards, "out_discards", "Out discards") \
+ m(+1, u64 out_errors, "out_errors", "Out errors") \
+ m(+1, u64 in_multicast_pkts, "in_multicast_pkts", "In multicast packets") \
+ m(+1, u64 in_broadcast_pkts, "in_broadcast_pkts", "In broadcast packets") \
+ m(+1, u64 out_multicast_pkts, "out_multicast_pkts", "Out multicast packets") \
+ m(+1, u64 out_broadcast_pkts, "out_broadcast_pkts", "Out broadcast packets")
+
+#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m) \
+ m(+1, u64 time_since_last_clear, "time_since_last_clear", \
+ "Time since the last counters clear event (msec)") \
+ m(+1, u64 symbol_errors, "symbol_errors", "Symbol errors") \
+ m(+1, u64 sync_headers_errors, "sync_headers_errors", "Sync header error counter") \
+ m(+1, u64 bip_errors_lane0, "edpl_bip_errors_lane0", \
+ "Indicates the number of PRBS errors on lane 0") \
+ m(+1, u64 bip_errors_lane1, "edpl_bip_errors_lane1", \
+ "Indicates the number of PRBS errors on lane 1") \
+ m(+1, u64 bip_errors_lane2, "edpl_bip_errors_lane2", \
+ "Indicates the number of PRBS errors on lane 2") \
+ m(+1, u64 bip_errors_lane3, "edpl_bip_errors_lane3", \
+ "Indicates the number of PRBS errors on lane 3") \
+ m(+1, u64 fc_corrected_blocks_lane0, "fc_corrected_blocks_lane0", \
+ "FEC correctable block counter lane 0") \
+ m(+1, u64 fc_corrected_blocks_lane1, "fc_corrected_blocks_lane1", \
+ "FEC correctable block counter lane 1") \
+ m(+1, u64 fc_corrected_blocks_lane2, "fc_corrected_blocks_lane2", \
+ "FEC correctable block counter lane 2") \
+ m(+1, u64 fc_corrected_blocks_lane3, "fc_corrected_blocks_lane3", \
+ "FEC correctable block counter lane 3") \
+ m(+1, u64 rs_corrected_blocks, "rs_corrected_blocks", \
+ "FEC correcable block counter") \
+ m(+1, u64 rs_uncorrectable_blocks, "rs_uncorrectable_blocks", \
+ "FEC uncorrecable block counter") \
+ m(+1, u64 rs_no_errors_blocks, "rs_no_errors_blocks", \
+ "The number of RS-FEC blocks received that had no errors") \
+ m(+1, u64 rs_single_error_blocks, "rs_single_error_blocks", \
+ "The number of corrected RS-FEC blocks received that had" \
+ "exactly 1 error symbol") \
+ m(+1, u64 rs_corrected_symbols_total, "rs_corrected_symbols_total", \
+ "Port FEC corrected symbol counter") \
+ m(+1, u64 rs_corrected_symbols_lane0, "rs_corrected_symbols_lane0", \
+ "FEC corrected symbol counter lane 0") \
+ m(+1, u64 rs_corrected_symbols_lane1, "rs_corrected_symbols_lane1", \
+ "FEC corrected symbol counter lane 1") \
+ m(+1, u64 rs_corrected_symbols_lane2, "rs_corrected_symbols_lane2", \
+ "FEC corrected symbol counter lane 2") \
+ m(+1, u64 rs_corrected_symbols_lane3, "rs_corrected_symbols_lane3", \
+ "FEC corrected symbol counter lane 3") \
+
+/*
+ * Make sure to update mlx5e_update_pport_counters()
+ * when adding a new MLX5E_PPORT_STATS block
+ */
+#define MLX5E_PPORT_STATS(m) \
+ MLX5E_PPORT_IEEE802_3_STATS(m) \
+ MLX5E_PPORT_RFC2819_STATS(m)
+
+#define MLX5E_PORT_STATS_DEBUG(m) \
+ MLX5E_PPORT_RFC2819_STATS_DEBUG(m) \
+ MLX5E_PPORT_RFC2863_STATS_DEBUG(m) \
+ MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)
+
+#define MLX5E_PPORT_IEEE802_3_STATS_NUM \
+ (0 MLX5E_PPORT_IEEE802_3_STATS(MLX5E_STATS_COUNT))
+#define MLX5E_PPORT_RFC2819_STATS_NUM \
+ (0 MLX5E_PPORT_RFC2819_STATS(MLX5E_STATS_COUNT))
+#define MLX5E_PPORT_STATS_NUM \
+ (0 MLX5E_PPORT_STATS(MLX5E_STATS_COUNT))
+
+#define MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM \
+ (0 MLX5E_PPORT_RFC2819_STATS_DEBUG(MLX5E_STATS_COUNT))
+#define MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM \
+ (0 MLX5E_PPORT_RFC2863_STATS_DEBUG(MLX5E_STATS_COUNT))
+#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
+ (0 MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(MLX5E_STATS_COUNT))
+#define MLX5E_PORT_STATS_DEBUG_NUM \
+ (0 MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_COUNT))
+
+struct mlx5e_pport_stats {
+ struct sysctl_ctx_list ctx;
+ u64 arg [0];
+ MLX5E_PPORT_STATS(MLX5E_STATS_VAR)
+};
+
+struct mlx5e_port_stats_debug {
+ struct sysctl_ctx_list ctx;
+ u64 arg [0];
+ MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_VAR)
+};
+
+#define MLX5E_RQ_STATS(m) \
+ m(+1, u64 packets, "packets", "Received packets") \
+ m(+1, u64 csum_none, "csum_none", "Received packets") \
+ m(+1, u64 lro_packets, "lro_packets", "Received packets") \
+ m(+1, u64 lro_bytes, "lro_bytes", "Received packets") \
+ m(+1, u64 sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO") \
+ m(+1, u64 sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO") \
+ m(+1, u64 wqe_err, "wqe_err", "Received packets")
+
+#define MLX5E_RQ_STATS_NUM (0 MLX5E_RQ_STATS(MLX5E_STATS_COUNT))
+
+struct mlx5e_rq_stats {
+ struct sysctl_ctx_list ctx;
+ u64 arg [0];
+ MLX5E_RQ_STATS(MLX5E_STATS_VAR)
+};
+
+#define MLX5E_SQ_STATS(m) \
+ m(+1, u64 packets, "packets", "Transmitted packets") \
+ m(+1, u64 tso_packets, "tso_packets", "Transmitted packets") \
+ m(+1, u64 tso_bytes, "tso_bytes", "Transmitted bytes") \
+ m(+1, u64 csum_offload_none, "csum_offload_none", "Transmitted packets") \
+ m(+1, u64 defragged, "defragged", "Transmitted packets") \
+ m(+1, u64 dropped, "dropped", "Transmitted packets") \
+ m(+1, u64 nop, "nop", "Transmitted packets")
+
+#define MLX5E_SQ_STATS_NUM (0 MLX5E_SQ_STATS(MLX5E_STATS_COUNT))
+
+struct mlx5e_sq_stats {
+ struct sysctl_ctx_list ctx;
+ u64 arg [0];
+ MLX5E_SQ_STATS(MLX5E_STATS_VAR)
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
+ struct mlx5e_port_stats_debug port_stats_debug;
+};
+
+struct mlx5e_rq_param {
+ u32 rqc [MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc [MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc [MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u8 rx_cq_moderation_mode;
+ u8 tx_cq_moderation_mode;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ bool hw_lro_en;
+ bool cqe_zipping_en;
+ u32 lro_wqe_sz;
+ u16 rx_hash_log_tbl_sz;
+ u32 tx_pauseframe_control;
+ u32 rx_pauseframe_control;
+};
+
+#define MLX5E_PARAMS(m) \
+ m(+1, u64 tx_queue_size_max, "tx_queue_size_max", "Max send queue size") \
+ m(+1, u64 rx_queue_size_max, "rx_queue_size_max", "Max receive queue size") \
+ m(+1, u64 tx_queue_size, "tx_queue_size", "Default send queue size") \
+ m(+1, u64 rx_queue_size, "rx_queue_size", "Default receive queue size") \
+ m(+1, u64 channels, "channels", "Default number of channels") \
+ m(+1, u64 coalesce_usecs_max, "coalesce_usecs_max", "Maximum usecs for joining packets") \
+ m(+1, u64 coalesce_pkts_max, "coalesce_pkts_max", "Maximum packets to join") \
+ m(+1, u64 rx_coalesce_usecs, "rx_coalesce_usecs", "Limit in usec for joining rx packets") \
+ m(+1, u64 rx_coalesce_pkts, "rx_coalesce_pkts", "Maximum number of rx packets to join") \
+ m(+1, u64 rx_coalesce_mode, "rx_coalesce_mode", "0: EQE mode 1: CQE mode") \
+ m(+1, u64 tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
+ m(+1, u64 tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
+ m(+1, u64 tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
+ m(+1, u64 tx_bufring_disable, "tx_bufring_disable", "0: Enable bufring 1: Disable bufring") \
+ m(+1, u64 tx_completion_fact, "tx_completion_fact", "1..MAX: Completion event ratio") \
+ m(+1, u64 tx_completion_fact_max, "tx_completion_fact_max", "Maximum completion event ratio") \
+ m(+1, u64 hw_lro, "hw_lro", "set to enable hw_lro") \
+ m(+1, u64 cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled") \
+ m(+1, u64 diag_pci_enable, "diag_pci_enable", "0: Disabled 1: Enabled") \
+ m(+1, u64 diag_general_enable, "diag_general_enable", "0: Disabled 1: Enabled")
+
+#define MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))
+
+struct mlx5e_params_ethtool {
+ u64 arg [0];
+ MLX5E_PARAMS(MLX5E_STATS_VAR)
+};
+
+/* EEPROM Standards for plug in modules */
+#ifndef MLX5E_ETH_MODULE_SFF_8472
+#define MLX5E_ETH_MODULE_SFF_8472 0x1
+#define MLX5E_ETH_MODULE_SFF_8472_LEN 128
+#endif
+
+#ifndef MLX5E_ETH_MODULE_SFF_8636
+#define MLX5E_ETH_MODULE_SFF_8636 0x2
+#define MLX5E_ETH_MODULE_SFF_8636_LEN 256
+#endif
+
+#ifndef MLX5E_ETH_MODULE_SFF_8436
+#define MLX5E_ETH_MODULE_SFF_8436 0x3
+#define MLX5E_ETH_MODULE_SFF_8436_LEN 256
+#endif
+
+/* EEPROM I2C Addresses */
+#define MLX5E_I2C_ADDR_LOW 0x50
+#define MLX5E_I2C_ADDR_HIGH 0x51
+
+#define MLX5E_EEPROM_LOW_PAGE 0x0
+#define MLX5E_EEPROM_HIGH_PAGE 0x3
+
+#define MLX5E_EEPROM_HIGH_PAGE_OFFSET 128
+#define MLX5E_EEPROM_PAGE_LENGTH 256
+
+#define MLX5E_EEPROM_INFO_BYTES 0x3
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per HW polling */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ struct mlx5_wq_ctrl wq_ctrl;
+} __aligned(MLX5E_CACHELINE_SIZE);
+
+struct mlx5e_rq_mbuf {
+ bus_dmamap_t dma_map;
+ caddr_t data;
+ struct mbuf *mbuf;
+};
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ struct mtx mtx;
+ bus_dma_tag_t dma_tag;
+ u32 wqe_sz;
+ struct mlx5e_rq_mbuf *mbuf;
+ struct ifnet *ifp;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+#ifdef HAVE_TURBO_LRO
+ struct tlro_ctrl lro;
+#else
+ struct lro_ctrl lro;
+#endif
+ volatile int enabled;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+ struct callout watchdog;
+} __aligned(MLX5E_CACHELINE_SIZE);
+
+struct mlx5e_sq_mbuf {
+ bus_dmamap_t dma_map;
+ struct mbuf *mbuf;
+ u32 num_bytes;
+ u32 num_wqebbs;
+};
+
+enum {
+ MLX5E_SQ_READY,
+ MLX5E_SQ_FULL
+};
+
+struct mlx5e_sq {
+ /* data path */
+ struct mtx lock;
+ bus_dma_tag_t dma_tag;
+ struct mtx comp_lock;
+
+ /* dirtied @completion */
+ u16 cc;
+
+ /* dirtied @xmit */
+ u16 pc __aligned(MLX5E_CACHELINE_SIZE);
+ u16 bf_offset;
+ u16 cev_counter; /* completion event counter */
+ u16 cev_factor; /* completion event factor */
+ u16 cev_next_state; /* next completion event state */
+#define MLX5E_CEV_STATE_INITIAL 0 /* timer not started */
+#define MLX5E_CEV_STATE_SEND_NOPS 1 /* send NOPs */
+#define MLX5E_CEV_STATE_HOLD_NOPS 2 /* don't send NOPs yet */
+ u16 stopped; /* set if SQ is stopped */
+ struct callout cev_callout;
+ union {
+ u32 d32[2];
+ u64 d64;
+ } doorbell;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+ struct task sq_task;
+ struct taskqueue *sq_tq;
+
+ /* pointers to per packet info: write at xmit, read at completion */
+ struct mlx5e_sq_mbuf *mbuf;
+ struct buf_ring *br;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ struct mlx5_uar uar;
+ struct ifnet *ifp;
+ u32 sqn;
+ u32 bf_buf_size;
+ u32 mkey_be;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5e_priv *priv;
+ int tc;
+ unsigned int queue_state;
+} __aligned(MLX5E_CACHELINE_SIZE);
+
+static inline bool
+mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ u16 cc = sq->cc;
+ u16 pc = sq->pc;
+
+ return ((sq->wq.sz_m1 & (cc - pc)) >= n || cc == pc);
+}
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
+ struct ifnet *ifp;
+ u32 mkey_be;
+ u8 num_tc;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+} __aligned(MLX5E_CACHELINE_SIZE);
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP,
+ MLX5E_TT_IPV6_TCP,
+ MLX5E_TT_IPV4_UDP,
+ MLX5E_TT_IPV6_UDP,
+ MLX5E_TT_IPV4_IPSEC_AH,
+ MLX5E_TT_IPV6_IPSEC_AH,
+ MLX5E_TT_IPV4_IPSEC_ESP,
+ MLX5E_TT_IPV6_IPSEC_ESP,
+ MLX5E_TT_IPV4,
+ MLX5E_TT_IPV6,
+ MLX5E_TT_ANY,
+ MLX5E_NUM_TT,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr [ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_hash_node;
+
+struct mlx5e_eth_addr_hash_head {
+ struct mlx5e_eth_addr_hash_node *lh_first;
+};
+
+struct mlx5e_eth_addr_db {
+ struct mlx5e_eth_addr_hash_head if_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_hash_head if_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ struct mlx5_core_dev *mdev; /* must be first */
+
+ /* priv data path fields - start */
+ int order_base_2_num_channels;
+ int queue_mapping_channel_mask;
+ int num_tc;
+ int default_vlan_prio;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ int gone;
+#define PRIV_LOCK(priv) sx_xlock(&(priv)->state_lock)
+#define PRIV_UNLOCK(priv) sx_xunlock(&(priv)->state_lock)
+#define PRIV_LOCKED(priv) sx_xlocked(&(priv)->state_lock)
+ struct sx state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ u32 tdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel *volatile *channel;
+ u32 tisn[MLX5E_MAX_TX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ struct mlx5e_params_ethtool params_ethtool;
+ union mlx5_core_pci_diagnostics params_pci;
+ union mlx5_core_general_diagnostics params_general;
+ struct mtx async_events_mtx; /* sync hw events */
+ struct work_struct update_stats_work;
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ MLX5_DECLARE_DOORBELL_LOCK(doorbell_lock)
+
+ struct ifnet *ifp;
+ struct sysctl_ctx_list sysctl_ctx;
+ struct sysctl_oid *sysctl_ifnet;
+ struct sysctl_oid *sysctl_hw;
+ int sysctl_debug;
+ struct mlx5e_stats stats;
+ int counter_set_id;
+
+ eventhandler_tag vlan_detach;
+ eventhandler_tag vlan_attach;
+ struct ifmedia media;
+ int media_status_last;
+ int media_active_last;
+
+ struct callout watchdog;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+struct mlx5e_eeprom {
+ int lock_bit;
+ int i2c_addr;
+ int page_num;
+ int device_addr;
+ int module_num;
+ int len;
+ int type;
+ int page_valid;
+ u32 *data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_LR = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << (link_mode))
+#define MLX5E_FLD_MAX(typ, fld) ((1ULL << __mlx5_bit_sz(typ, fld)) - 1ULL)
+
+int mlx5e_xmit(struct ifnet *, struct mbuf *);
+
+int mlx5e_open_locked(struct ifnet *);
+int mlx5e_close_locked(struct ifnet *);
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
+void mlx5e_rx_cq_comp(struct mlx5_core_cq *);
+void mlx5e_tx_cq_comp(struct mlx5_core_cq *);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+void mlx5e_tx_que(void *context, int pending);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+void mlx5e_vlan_rx_add_vid(void *, struct ifnet *, u16);
+void mlx5e_vlan_rx_kill_vid(void *, struct ifnet *, u16);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+static inline void
+mlx5e_tx_notify_hw(struct mlx5e_sq *sq, u32 *wqe, int bf_sz)
+{
+ u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
+ /* ensure wqe is visible to device before updating doorbell record */
+ wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /*
+ * Ensure the doorbell record is visible to device before ringing
+ * the doorbell:
+ */
+ wmb();
+
+ if (bf_sz) {
+ __iowrite64_copy(sq->uar.bf_map + ofst, wqe, bf_sz);
+
+ /* flush the write-combining mapped buffer */
+ wmb();
+
+ } else {
+ mlx5_write64(wqe, sq->uar.map + ofst,
+ MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
+ }
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void
+mlx5e_cq_arm(struct mlx5e_cq *cq, spinlock_t *dblock)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, dblock, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
+void mlx5e_create_ethtool(struct mlx5e_priv *);
+void mlx5e_create_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *, const char *,
+ const char **, unsigned, u64 *);
+void mlx5e_send_nop(struct mlx5e_sq *, u32);
+void mlx5e_sq_cev_timeout(void *);
+int mlx5e_refresh_channel_params(struct mlx5e_priv *);
+int mlx5e_open_cq(struct mlx5e_priv *, struct mlx5e_cq_param *,
+ struct mlx5e_cq *, mlx5e_cq_comp_t *, int eq_ix);
+void mlx5e_close_cq(struct mlx5e_cq *);
+void mlx5e_free_sq_db(struct mlx5e_sq *);
+int mlx5e_alloc_sq_db(struct mlx5e_sq *);
+int mlx5e_enable_sq(struct mlx5e_sq *, struct mlx5e_sq_param *, int tis_num);
+int mlx5e_modify_sq(struct mlx5e_sq *, int curr_state, int next_state);
+void mlx5e_disable_sq(struct mlx5e_sq *);
+void mlx5e_drain_sq(struct mlx5e_sq *);
+
+#endif /* _MLX5_EN_H_ */
Property changes on: trunk/sys/dev/mlx5/mlx5_en/en.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,769 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c 322007 2017-08-03 14:14:13Z hselasky $
+ */
+
+#include "en.h"
+#include <net/sff8472.h>
+
+void
+mlx5e_create_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *parent, const char *buffer,
+ const char **desc, unsigned num, u64 * arg)
+{
+ struct sysctl_oid *node;
+ unsigned x;
+
+ sysctl_ctx_init(ctx);
+
+ node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO,
+ buffer, CTLFLAG_RD, NULL, "Statistics");
+ if (node == NULL)
+ return;
+ for (x = 0; x != num; x++) {
+ SYSCTL_ADD_UQUAD(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ desc[2 * x], CTLFLAG_RD, arg + x, desc[2 * x + 1]);
+ }
+}
+
+static void
+mlx5e_ethtool_sync_tx_completion_fact(struct mlx5e_priv *priv)
+{
+ /*
+ * Limit the maximum distance between completion events to
+ * half of the currently set TX queue size.
+ *
+ * The maximum number of queue entries a single IP packet can
+ * consume is given by MLX5_SEND_WQE_MAX_WQEBBS.
+ *
+ * The worst case max value is then given as below:
+ */
+ uint64_t max = priv->params_ethtool.tx_queue_size /
+ (2 * MLX5_SEND_WQE_MAX_WQEBBS);
+
+ /*
+ * Update the maximum completion factor value in case the
+ * tx_queue_size field changed. Ensure we don't overflow
+ * 16-bits.
+ */
+ if (max < 1)
+ max = 1;
+ else if (max > 65535)
+ max = 65535;
+ priv->params_ethtool.tx_completion_fact_max = max;
+
+ /*
+ * Verify that the current TX completion factor is within the
+ * given limits:
+ */
+ if (priv->params_ethtool.tx_completion_fact < 1)
+ priv->params_ethtool.tx_completion_fact = 1;
+ else if (priv->params_ethtool.tx_completion_fact > max)
+ priv->params_ethtool.tx_completion_fact = max;
+}
+
+#define MLX5_PARAM_OFFSET(n) \
+ __offsetof(struct mlx5e_priv, params_ethtool.n)
+
+static int
+mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct mlx5e_priv *priv = arg1;
+ uint64_t value;
+ int mode_modify;
+ int was_opened;
+ int error;
+
+ PRIV_LOCK(priv);
+ value = priv->params_ethtool.arg[arg2];
+ if (req != NULL) {
+ error = sysctl_handle_64(oidp, &value, 0, req);
+ if (error || req->newptr == NULL ||
+ value == priv->params_ethtool.arg[arg2])
+ goto done;
+
+ /* assign new value */
+ priv->params_ethtool.arg[arg2] = value;
+ } else {
+ error = 0;
+ }
+ /* check if device is gone */
+ if (priv->gone) {
+ error = ENXIO;
+ goto done;
+ }
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ mode_modify = MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify);
+
+ switch (MLX5_PARAM_OFFSET(arg[arg2])) {
+ case MLX5_PARAM_OFFSET(rx_coalesce_usecs):
+ /* import RX coal time */
+ if (priv->params_ethtool.rx_coalesce_usecs < 1)
+ priv->params_ethtool.rx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.rx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.rx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.rx_cq_moderation_usec =
+ priv->params_ethtool.rx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_pkts):
+ /* import RX coal pkts */
+ if (priv->params_ethtool.rx_coalesce_pkts < 1)
+ priv->params_ethtool.rx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.rx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.rx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
+ }
+ priv->params.rx_cq_moderation_pkts =
+ priv->params_ethtool.rx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_usecs):
+ /* import TX coal time */
+ if (priv->params_ethtool.tx_coalesce_usecs < 1)
+ priv->params_ethtool.tx_coalesce_usecs = 0;
+ else if (priv->params_ethtool.tx_coalesce_usecs >
+ MLX5E_FLD_MAX(cqc, cq_period)) {
+ priv->params_ethtool.tx_coalesce_usecs =
+ MLX5E_FLD_MAX(cqc, cq_period);
+ }
+ priv->params.tx_cq_moderation_usec =
+ priv->params_ethtool.tx_coalesce_usecs;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_pkts):
+ /* import TX coal pkts */
+ if (priv->params_ethtool.tx_coalesce_pkts < 1)
+ priv->params_ethtool.tx_coalesce_pkts = 0;
+ else if (priv->params_ethtool.tx_coalesce_pkts >
+ MLX5E_FLD_MAX(cqc, cq_max_count)) {
+ priv->params_ethtool.tx_coalesce_pkts =
+ MLX5E_FLD_MAX(cqc, cq_max_count);
+ }
+ priv->params.tx_cq_moderation_pkts =
+ priv->params_ethtool.tx_coalesce_pkts;
+
+ /* check to avoid down and up the network interface */
+ if (was_opened)
+ error = mlx5e_refresh_channel_params(priv);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX queue size */
+ if (priv->params_ethtool.tx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ priv->params_ethtool.tx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ } else if (priv->params_ethtool.tx_queue_size >
+ priv->params_ethtool.tx_queue_size_max) {
+ priv->params_ethtool.tx_queue_size =
+ priv->params_ethtool.tx_queue_size_max;
+ }
+ /* store actual TX queue size */
+ priv->params.log_sq_size =
+ order_base_2(priv->params_ethtool.tx_queue_size);
+ priv->params_ethtool.tx_queue_size =
+ 1 << priv->params.log_sq_size;
+
+ /* verify TX completion factor */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_queue_size):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX queue size */
+ if (priv->params_ethtool.rx_queue_size <
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ priv->params_ethtool.rx_queue_size =
+ (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ } else if (priv->params_ethtool.rx_queue_size >
+ priv->params_ethtool.rx_queue_size_max) {
+ priv->params_ethtool.rx_queue_size =
+ priv->params_ethtool.rx_queue_size_max;
+ }
+ /* store actual RX queue size */
+ priv->params.log_rq_size =
+ order_base_2(priv->params_ethtool.rx_queue_size);
+ priv->params_ethtool.rx_queue_size =
+ 1 << priv->params.log_rq_size;
+
+ /* update least number of RX WQEs */
+ priv->params.min_rx_wqes = min(
+ priv->params_ethtool.rx_queue_size - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(channels):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import number of channels */
+ if (priv->params_ethtool.channels < 1)
+ priv->params_ethtool.channels = 1;
+ else if (priv->params_ethtool.channels >
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors) {
+ priv->params_ethtool.channels =
+ (u64) priv->mdev->priv.eq_table.num_comp_vectors;
+ }
+ priv->params.num_channels = priv->params_ethtool.channels;
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(rx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened != 0 && mode_modify == 0)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import RX coalesce mode */
+ if (priv->params_ethtool.rx_coalesce_mode != 0)
+ priv->params_ethtool.rx_coalesce_mode = 1;
+ priv->params.rx_cq_moderation_mode =
+ priv->params_ethtool.rx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened != 0) {
+ if (mode_modify == 0)
+ mlx5e_open_locked(priv->ifp);
+ else
+ error = mlx5e_refresh_channel_params(priv);
+ }
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_coalesce_mode):
+ /* network interface must be down */
+ if (was_opened != 0 && mode_modify == 0)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import TX coalesce mode */
+ if (priv->params_ethtool.tx_coalesce_mode != 0)
+ priv->params_ethtool.tx_coalesce_mode = 1;
+ priv->params.tx_cq_moderation_mode =
+ priv->params_ethtool.tx_coalesce_mode;
+
+ /* restart network interface, if any */
+ if (was_opened != 0) {
+ if (mode_modify == 0)
+ mlx5e_open_locked(priv->ifp);
+ else
+ error = mlx5e_refresh_channel_params(priv);
+ }
+ break;
+
+ case MLX5_PARAM_OFFSET(hw_lro):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import HW LRO mode */
+ if (priv->params_ethtool.hw_lro != 0) {
+ if ((priv->ifp->if_capenable & IFCAP_LRO) &&
+ MLX5_CAP_ETH(priv->mdev, lro_cap)) {
+ priv->params.hw_lro_en = 1;
+ priv->params_ethtool.hw_lro = 1;
+ } else {
+ priv->params.hw_lro_en = 0;
+ priv->params_ethtool.hw_lro = 0;
+ error = EINVAL;
+
+ if_printf(priv->ifp, "Can't enable HW LRO: "
+ "The HW or SW LRO feature is disabled\n");
+ }
+ } else {
+ priv->params.hw_lro_en = 0;
+ }
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(cqe_zipping):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* import CQE zipping mode */
+ if (priv->params_ethtool.cqe_zipping &&
+ MLX5_CAP_GEN(priv->mdev, cqe_compression)) {
+ priv->params.cqe_zipping_en = true;
+ priv->params_ethtool.cqe_zipping = 1;
+ } else {
+ priv->params.cqe_zipping_en = false;
+ priv->params_ethtool.cqe_zipping = 0;
+ }
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_bufring_disable):
+ /* rangecheck input value */
+ priv->params_ethtool.tx_bufring_disable =
+ priv->params_ethtool.tx_bufring_disable ? 1 : 0;
+
+ /* reconfigure the sendqueues, if any */
+ if (was_opened) {
+ mlx5e_close_locked(priv->ifp);
+ mlx5e_open_locked(priv->ifp);
+ }
+ break;
+
+ case MLX5_PARAM_OFFSET(tx_completion_fact):
+ /* network interface must be down */
+ if (was_opened)
+ mlx5e_close_locked(priv->ifp);
+
+ /* verify parameter */
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* restart network interface, if any */
+ if (was_opened)
+ mlx5e_open_locked(priv->ifp);
+ break;
+
+ case MLX5_PARAM_OFFSET(diag_pci_enable):
+ priv->params_ethtool.diag_pci_enable =
+ priv->params_ethtool.diag_pci_enable ? 1 : 0;
+
+ error = -mlx5_core_set_diagnostics_full(priv->mdev,
+ priv->params_ethtool.diag_pci_enable,
+ priv->params_ethtool.diag_general_enable);
+ break;
+
+ case MLX5_PARAM_OFFSET(diag_general_enable):
+ priv->params_ethtool.diag_general_enable =
+ priv->params_ethtool.diag_general_enable ? 1 : 0;
+
+ error = -mlx5_core_set_diagnostics_full(priv->mdev,
+ priv->params_ethtool.diag_pci_enable,
+ priv->params_ethtool.diag_general_enable);
+ break;
+
+ default:
+ break;
+ }
+done:
+ PRIV_UNLOCK(priv);
+ return (error);
+}
+
+/*
+ * Read the first three bytes of the eeprom in order to get the needed info
+ * for the whole reading.
+ * Byte 0 - Identifier byte
+ * Byte 1 - Revision byte
+ * Byte 2 - Status byte
+ */
+static int
+mlx5e_get_eeprom_info(struct mlx5e_priv *priv, struct mlx5e_eeprom *eeprom)
+{
+ struct mlx5_core_dev *dev = priv->mdev;
+ u32 data = 0;
+ int size_read = 0;
+ int ret;
+
+ ret = mlx5_query_module_num(dev, &eeprom->module_num);
+ if (ret) {
+ if_printf(priv->ifp, "%s:%d: Failed query module error=%d\n",
+ __func__, __LINE__, ret);
+ return (ret);
+ }
+
+ /* Read the first three bytes to get Identifier, Revision and Status */
+ ret = mlx5_query_eeprom(dev, eeprom->i2c_addr, eeprom->page_num,
+ eeprom->device_addr, MLX5E_EEPROM_INFO_BYTES, eeprom->module_num, &data,
+ &size_read);
+ if (ret) {
+ if_printf(priv->ifp, "%s:%d: Failed query eeprom module error=0x%x\n",
+ __func__, __LINE__, ret);
+ return (ret);
+ }
+
+ switch (data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) {
+ case SFF_8024_ID_QSFP:
+ eeprom->type = MLX5E_ETH_MODULE_SFF_8436;
+ eeprom->len = MLX5E_ETH_MODULE_SFF_8436_LEN;
+ break;
+ case SFF_8024_ID_QSFPPLUS:
+ case SFF_8024_ID_QSFP28:
+ if ((data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) == SFF_8024_ID_QSFP28 ||
+ ((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
+ eeprom->type = MLX5E_ETH_MODULE_SFF_8636;
+ eeprom->len = MLX5E_ETH_MODULE_SFF_8636_LEN;
+ } else {
+ eeprom->type = MLX5E_ETH_MODULE_SFF_8436;
+ eeprom->len = MLX5E_ETH_MODULE_SFF_8436_LEN;
+ }
+ if ((data & MLX5_EEPROM_PAGE_3_VALID_BIT_MASK) == 0)
+ eeprom->page_valid = 1;
+ break;
+ case SFF_8024_ID_SFP:
+ eeprom->type = MLX5E_ETH_MODULE_SFF_8472;
+ eeprom->len = MLX5E_ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ if_printf(priv->ifp, "%s:%d: Not recognized cable type = 0x%x(%s)\n",
+ __func__, __LINE__, data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK,
+ sff_8024_id[data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK]);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+/* Read both low and high pages of the eeprom */
+static int
+mlx5e_get_eeprom(struct mlx5e_priv *priv, struct mlx5e_eeprom *ee)
+{
+ struct mlx5_core_dev *dev = priv->mdev;
+ int size_read = 0;
+ int ret;
+
+ if (ee->len == 0)
+ return (EINVAL);
+
+ /* Read low page of the eeprom */
+ while (ee->device_addr < ee->len) {
+ ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num, ee->device_addr,
+ ee->len - ee->device_addr, ee->module_num,
+ ee->data + (ee->device_addr / 4), &size_read);
+ if (ret) {
+ if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
+ "error = 0x%02x\n", __func__, __LINE__, ret);
+ return (ret);
+ }
+ ee->device_addr += size_read;
+ }
+
+ /* Read high page of the eeprom */
+ if (ee->page_valid) {
+ ee->device_addr = MLX5E_EEPROM_HIGH_PAGE_OFFSET;
+ ee->page_num = MLX5E_EEPROM_HIGH_PAGE;
+ size_read = 0;
+ while (ee->device_addr < MLX5E_EEPROM_PAGE_LENGTH) {
+ ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num,
+ ee->device_addr, MLX5E_EEPROM_PAGE_LENGTH - ee->device_addr,
+ ee->module_num, ee->data + (ee->len / 4) +
+ ((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET) / 4),
+ &size_read);
+ if (ret) {
+ if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
+ "error = 0x%02x\n", __func__, __LINE__, ret);
+ return (ret);
+ }
+ ee->device_addr += size_read;
+ }
+ }
+ return (0);
+}
+
+static void
+mlx5e_print_eeprom(struct mlx5e_eeprom *eeprom)
+{
+ int row;
+ int index_in_row;
+ int byte_to_write = 0;
+ int line_length = 16;
+
+ printf("\nOffset\t\tValues\n");
+ printf("------\t\t------");
+ while (byte_to_write < eeprom->len) {
+ printf("\n0x%04X\t\t", byte_to_write);
+ for (index_in_row = 0; index_in_row < line_length; index_in_row++) {
+ printf("%02X ", ((u8 *)eeprom->data)[byte_to_write]);
+ byte_to_write++;
+ }
+ }
+
+ if (eeprom->page_valid) {
+ row = MLX5E_EEPROM_HIGH_PAGE_OFFSET;
+ printf("\n\nUpper Page 0x03\n");
+ printf("\nOffset\t\tValues\n");
+ printf("------\t\t------");
+ while (row < MLX5E_EEPROM_PAGE_LENGTH) {
+ printf("\n0x%04X\t\t", row);
+ for (index_in_row = 0; index_in_row < line_length; index_in_row++) {
+ printf("%02X ", ((u8 *)eeprom->data)[byte_to_write]);
+ byte_to_write++;
+ row++;
+ }
+ }
+ }
+}
+
+/*
+ * Read cable EEPROM module information by first inspecting the first
+ * three bytes to get the initial information for a whole reading.
+ * Information will be printed to dmesg.
+ */
+static int
+mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
+{
+ struct mlx5e_priv *priv = arg1;
+ struct mlx5e_eeprom eeprom;
+ int error;
+ int result = 0;
+
+ PRIV_LOCK(priv);
+ error = sysctl_handle_int(oidp, &result, 0, req);
+ if (error || !req->newptr)
+ goto done;
+
+ /* Check if device is gone */
+ if (priv->gone) {
+ error = ENXIO;
+ goto done;
+ }
+
+ if (result == 1) {
+ eeprom.i2c_addr = MLX5E_I2C_ADDR_LOW;
+ eeprom.device_addr = 0;
+ eeprom.page_num = MLX5E_EEPROM_LOW_PAGE;
+ eeprom.page_valid = 0;
+
+ /* Read three first bytes to get important info */
+ error = mlx5e_get_eeprom_info(priv, &eeprom);
+ if (error) {
+ if_printf(priv->ifp, "%s:%d: Failed reading eeprom's "
+ "initial information\n", __func__, __LINE__);
+ error = 0;
+ goto done;
+ }
+ /*
+ * Allocate needed length buffer and additional space for
+ * page 0x03
+ */
+ eeprom.data = malloc(eeprom.len + MLX5E_EEPROM_PAGE_LENGTH,
+ M_MLX5EN, M_WAITOK | M_ZERO);
+
+ /* Read the whole eeprom information */
+ error = mlx5e_get_eeprom(priv, &eeprom);
+ if (error) {
+ if_printf(priv->ifp, "%s:%d: Failed reading eeprom\n",
+ __func__, __LINE__);
+ error = 0;
+ /*
+ * Continue printing partial information in case of
+ * an error
+ */
+ }
+ mlx5e_print_eeprom(&eeprom);
+ free(eeprom.data, M_MLX5EN);
+ }
+done:
+ PRIV_UNLOCK(priv);
+ return (error);
+}
+
+static const char *mlx5e_params_desc[] = {
+ MLX5E_PARAMS(MLX5E_STATS_DESC)
+};
+
+static const char *mlx5e_port_stats_debug_desc[] = {
+ MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_DESC)
+};
+
+static int
+mlx5e_ethtool_debug_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct mlx5e_priv *priv = arg1;
+ int error;
+ int sys_debug;
+
+ sys_debug = priv->sysctl_debug;
+ error = sysctl_handle_int(oidp, &priv->sysctl_debug, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ priv->sysctl_debug = !!priv->sysctl_debug;
+ if (sys_debug == priv->sysctl_debug)
+ return (error);
+ if (priv->sysctl_debug)
+ mlx5e_create_stats(&priv->stats.port_stats_debug.ctx,
+ SYSCTL_CHILDREN(priv->sysctl_ifnet), "debug_stats",
+ mlx5e_port_stats_debug_desc, MLX5E_PORT_STATS_DEBUG_NUM,
+ priv->stats.port_stats_debug.arg);
+ else
+ sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
+ return (error);
+}
+
+static void
+mlx5e_create_diagnostics(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_diagnostics_entry entry;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *node;
+ int x;
+
+ /* sysctl context we are using */
+ ctx = &priv->sysctl_ctx;
+
+ /* create root node */
+ node = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
+ "diagnostics", CTLFLAG_RD, NULL, "Diagnostics");
+ if (node == NULL)
+ return;
+
+ /* create PCI diagnostics */
+ for (x = 0; x != MLX5_CORE_PCI_DIAGNOSTICS_NUM; x++) {
+ entry = mlx5_core_pci_diagnostics_table[x];
+ if (mlx5_core_supports_diagnostics(priv->mdev, entry.counter_id) == 0)
+ continue;
+ SYSCTL_ADD_UQUAD(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ entry.desc, CTLFLAG_RD, priv->params_pci.array + x,
+ "PCI diagnostics counter");
+ }
+
+ /* create general diagnostics */
+ for (x = 0; x != MLX5_CORE_GENERAL_DIAGNOSTICS_NUM; x++) {
+ entry = mlx5_core_general_diagnostics_table[x];
+ if (mlx5_core_supports_diagnostics(priv->mdev, entry.counter_id) == 0)
+ continue;
+ SYSCTL_ADD_UQUAD(ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ entry.desc, CTLFLAG_RD, priv->params_general.array + x,
+ "General diagnostics counter");
+ }
+}
+
+void
+mlx5e_create_ethtool(struct mlx5e_priv *priv)
+{
+ struct sysctl_oid *node;
+ const char *pnameunit;
+ unsigned x;
+
+ /* set some defaults */
+ priv->params_ethtool.tx_queue_size_max = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ priv->params_ethtool.rx_queue_size_max = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ priv->params_ethtool.tx_queue_size = 1 << priv->params.log_sq_size;
+ priv->params_ethtool.rx_queue_size = 1 << priv->params.log_rq_size;
+ priv->params_ethtool.channels = priv->params.num_channels;
+ priv->params_ethtool.coalesce_pkts_max = MLX5E_FLD_MAX(cqc, cq_max_count);
+ priv->params_ethtool.coalesce_usecs_max = MLX5E_FLD_MAX(cqc, cq_period);
+ priv->params_ethtool.rx_coalesce_mode = priv->params.rx_cq_moderation_mode;
+ priv->params_ethtool.rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ priv->params_ethtool.rx_coalesce_pkts = priv->params.rx_cq_moderation_pkts;
+ priv->params_ethtool.tx_coalesce_mode = priv->params.tx_cq_moderation_mode;
+ priv->params_ethtool.tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ priv->params_ethtool.tx_coalesce_pkts = priv->params.tx_cq_moderation_pkts;
+ priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
+ priv->params_ethtool.cqe_zipping = priv->params.cqe_zipping_en;
+ mlx5e_ethtool_sync_tx_completion_fact(priv);
+
+ /* create root node */
+ node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
+ SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
+ "conf", CTLFLAG_RW, NULL, "Configuration");
+ if (node == NULL)
+ return;
+ for (x = 0; x != MLX5E_PARAMS_NUM; x++) {
+ /* check for read-only parameter */
+ if (strstr(mlx5e_params_desc[2 * x], "_max") != NULL) {
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RD |
+ CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
+ mlx5e_params_desc[2 * x + 1]);
+ } else {
+#if (__FreeBSD_version < 1100000)
+ char path[64];
+#endif
+ /*
+ * NOTE: In FreeBSD-11 and newer the
+ * CTLFLAG_RWTUN flag will take care of
+ * loading default sysctl value from the
+ * kernel environment, if any:
+ */
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RWTUN |
+ CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
+ mlx5e_params_desc[2 * x + 1]);
+
+#if (__FreeBSD_version < 1100000)
+ /* compute path for sysctl */
+ snprintf(path, sizeof(path), "dev.mce.%d.conf.%s",
+ device_get_unit(priv->mdev->pdev->dev.bsddev),
+ mlx5e_params_desc[2 * x]);
+
+ /* try to fetch tunable, if any */
+ if (TUNABLE_QUAD_FETCH(path, &priv->params_ethtool.arg[x]))
+ mlx5e_ethtool_handler(NULL, priv, x, NULL);
+#endif
+ }
+ }
+
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "debug_stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv,
+ 0, &mlx5e_ethtool_debug_stats, "I", "Extended debug statistics");
+
+ pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev);
+
+ SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(node),
+ OID_AUTO, "device_name", CTLFLAG_RD,
+ __DECONST(void *, pnameunit), 0,
+ "PCI device name");
+
+ /* EEPROM support */
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO, "eeprom_info",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ mlx5e_read_eeprom, "I", "EEPROM information");
+
+ /* Diagnostics support */
+ mlx5e_create_diagnostics(priv);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,996 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c 310244 2016-12-19 09:38:34Z hselasky $
+ */
+
+#include "en.h"
+
+#include <linux/list.h>
+#include <dev/mlx5/flow_table.h>
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ LIST_ENTRY(mlx5e_eth_addr_hash_node) hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int
+mlx5e_hash_eth_addr(const u8 * addr)
+{
+ return (addr[5]);
+}
+
+static void
+mlx5e_add_eth_addr_to_hash(struct mlx5e_eth_addr_hash_head *hash,
+ const u8 * addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+
+ LIST_FOREACH(hn, &hash[ix], hlist) {
+ if (bcmp(hn->ai.addr, addr, ETHER_ADDR_LEN) == 0) {
+ if (hn->action == MLX5E_ACTION_DEL)
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+ }
+
+ hn = malloc(sizeof(*hn), M_MLX5EN, M_NOWAIT | M_ZERO);
+ if (hn == NULL)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ LIST_INSERT_HEAD(&hash[ix], hn, hlist);
+}
+
+static void
+mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ LIST_REMOVE(hn, hlist);
+ free(hn, M_MLX5EN);
+}
+
+static void
+mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int
+mlx5e_get_eth_addr_type(const u8 * addr)
+{
+ if (ETHER_IS_MULTICAST(addr) == 0)
+ return (MLX5E_UC);
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return (MLX5E_MC_IPV4);
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return (MLX5E_MC_IPV6);
+
+ return (MLX5E_MC_OTHER);
+}
+
+static u32
+mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ default:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return (ret);
+}
+
+static int
+mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ default:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETHERTYPE_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context, &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return (err);
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+ return (0);
+}
+
+static int
+mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ if_printf(priv->ifp, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+ err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ if_printf(priv->ifp, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return (err);
+}
+
+static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+{
+ struct ifnet *ifp = priv->ifp;
+ int max_list_size;
+ int list_size;
+ u16 *vlans;
+ int vlan;
+ int err;
+ int i;
+
+ list_size = 0;
+ for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+ list_size++;
+
+ max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+
+ if (list_size > max_list_size) {
+ if_printf(ifp,
+ "ifnet vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+ list_size, max_list_size);
+ list_size = max_list_size;
+ }
+
+ vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
+ if (!vlans)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+ if (i >= list_size)
+ break;
+ vlans[i++] = vlan;
+ }
+
+ err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+ if (err)
+ if_printf(ifp, "Failed to modify vport vlans list err(%d)\n",
+ err);
+
+ kfree(vlans);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int
+mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ if_printf(priv->ifp, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.cvlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.cvlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ mlx5e_vport_context_update_vlans(priv);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ if_printf(priv->ifp, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return (err);
+}
+
+static void
+mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ mlx5e_vport_context_update_vlans(priv);
+ break;
+ }
+}
+
+void
+mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void
+mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void
+mlx5e_vlan_rx_add_vid(void *arg, struct ifnet *ifp, u16 vid)
+{
+ struct mlx5e_priv *priv = arg;
+
+ if (ifp != priv->ifp)
+ return;
+
+ PRIV_LOCK(priv);
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ PRIV_UNLOCK(priv);
+}
+
+void
+mlx5e_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, u16 vid)
+{
+ struct mlx5e_priv *priv = arg;
+
+ if (ifp != priv->ifp)
+ return;
+
+ PRIV_LOCK(priv);
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ PRIV_UNLOCK(priv);
+}
+
+int
+mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return (err);
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return (err);
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return (err);
+ }
+ return (0);
+}
+
+void
+mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ LIST_FOREACH_SAFE(hn, &(hash)[i], hlist, tmp)
+
+static void
+mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+mlx5e_sync_ifp_addr(struct mlx5e_priv *priv)
+{
+ struct ifnet *ifp = priv->ifp;
+ struct ifaddr *ifa;
+ struct ifmultiaddr *ifma;
+
+ /* XXX adding this entry might not be needed */
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
+ LLADDR((struct sockaddr_dl *)(ifp->if_addr->ifa_addr)));
+
+ if_addr_rlock(ifp);
+ TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_LINK)
+ continue;
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_uc,
+ LLADDR((struct sockaddr_dl *)ifa->ifa_addr));
+ }
+ if_addr_runlock(ifp);
+
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.if_mc,
+ LLADDR((struct sockaddr_dl *)ifma->ifma_addr));
+ }
+ if_maddr_runlock(ifp);
+}
+
+static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+ u8 addr_array[][ETH_ALEN], int size)
+{
+ bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
+ struct ifnet *ifp = priv->ifp;
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_eth_addr_hash_head *addr_list;
+ struct mlx5e_eth_addr_hash_node *tmp;
+ int i = 0;
+ int hi;
+
+ addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
+
+ if (is_uc) /* Make sure our own address is pushed first */
+ ether_addr_copy(addr_array[i++], IF_LLADDR(ifp));
+ else if (priv->eth_addr.broadcast_enabled)
+ ether_addr_copy(addr_array[i++], ifp->if_broadcastaddr);
+
+ mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
+ if (ether_addr_equal(IF_LLADDR(ifp), hn->ai.addr))
+ continue;
+ if (i >= size)
+ break;
+ ether_addr_copy(addr_array[i++], hn->ai.addr);
+ }
+}
+
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+ int list_type)
+{
+ bool is_uc = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC);
+ struct mlx5e_eth_addr_hash_node *hn;
+ u8 (*addr_array)[ETH_ALEN] = NULL;
+ struct mlx5e_eth_addr_hash_head *addr_list;
+ struct mlx5e_eth_addr_hash_node *tmp;
+ int max_size;
+ int size;
+ int err;
+ int hi;
+
+ size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+ max_size = is_uc ?
+ 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
+ 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+
+ addr_list = is_uc ? priv->eth_addr.if_uc : priv->eth_addr.if_mc;
+ mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
+ size++;
+
+ if (size > max_size) {
+ if_printf(priv->ifp,
+ "ifp %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+ is_uc ? "UC" : "MC", size, max_size);
+ size = max_size;
+ }
+
+ if (size) {
+ addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
+ if (!addr_array) {
+ err = -ENOMEM;
+ goto out;
+ }
+ mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+ }
+
+ err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+out:
+ if (err)
+ if_printf(priv->ifp,
+ "Failed to modify vport %s list err(%d)\n",
+ is_uc ? "UC" : "MC", err);
+ kfree(addr_array);
+}
+
+static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+
+ mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_UC);
+ mlx5e_vport_context_update_addr_list(priv, MLX5_NIC_VPORT_LIST_TYPE_MC);
+ mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+ ea->allmulti_enabled,
+ ea->promisc_enabled);
+}
+
+static void
+mlx5e_apply_ifp_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_eth_addr_hash_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void
+mlx5e_handle_ifp_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_eth_addr_hash_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.if_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_ifp_addr(priv);
+
+ mlx5e_apply_ifp_addr(priv);
+}
+
+void
+mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct ifnet *ndev = priv->ifp;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->if_flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->if_flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ /* update broadcast address */
+ ether_addr_copy(priv->eth_addr.broadcast.addr,
+ priv->ifp->if_broadcastaddr);
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_ifp_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+
+ mlx5e_vport_context_update(priv);
+}
+
+void
+mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv =
+ container_of(work, struct mlx5e_priv, set_rx_mode_work);
+
+ PRIV_LOCK(priv);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ PRIV_UNLOCK(priv);
+}
+
+static int
+mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = malloc(9 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 0, 9, g);
+ free(g, M_MLX5EN);
+
+ return (priv->ft.main ? 0 : -ENOMEM);
+}
+
+static void
+mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+ priv->ft.main = NULL;
+}
+
+static int
+mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = malloc(2 * sizeof(*g), M_MLX5EN, M_WAITOK | M_ZERO);
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.cvlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 0, 2, g);
+ free(g, M_MLX5EN);
+
+ return (priv->ft.vlan ? 0 : -ENOMEM);
+}
+
+static void
+mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+ priv->ft.vlan = NULL;
+}
+
+int
+mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return (err);
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return (0);
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return (err);
+}
+
+void
+mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,3307 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_main.c 324523 2017-10-11 10:04:17Z hselasky $
+ */
+
+#include "en.h"
+
+#include <sys/sockio.h>
+#include <machine/atomic.h>
+
+#define ETH_DRIVER_VERSION "3.1.0-dev"
+char mlx5e_version[] = "Mellanox Ethernet driver"
+ " (" ETH_DRIVER_VERSION ")";
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static const struct {
+ u32 subtype;
+ u64 baudrate;
+} mlx5e_mode_table[MLX5E_LINK_MODES_NUMBER] = {
+
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .subtype = IFM_1000_CX_SGMII,
+ .baudrate = IF_Mbps(1000ULL),
+ },
+ [MLX5E_1000BASE_KX] = {
+ .subtype = IFM_1000_KX,
+ .baudrate = IF_Mbps(1000ULL),
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .subtype = IFM_10G_CX4,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .subtype = IFM_10G_KX4,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_10GBASE_KR] = {
+ .subtype = IFM_10G_KR,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .subtype = IFM_20G_KR2,
+ .baudrate = IF_Gbps(20ULL),
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .subtype = IFM_40G_CR4,
+ .baudrate = IF_Gbps(40ULL),
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .subtype = IFM_40G_KR4,
+ .baudrate = IF_Gbps(40ULL),
+ },
+ [MLX5E_56GBASE_R4] = {
+ .subtype = IFM_56G_R4,
+ .baudrate = IF_Gbps(56ULL),
+ },
+ [MLX5E_10GBASE_CR] = {
+ .subtype = IFM_10G_CR1,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_10GBASE_SR] = {
+ .subtype = IFM_10G_SR,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_10GBASE_LR] = {
+ .subtype = IFM_10G_LR,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .subtype = IFM_40G_SR4,
+ .baudrate = IF_Gbps(40ULL),
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .subtype = IFM_40G_LR4,
+ .baudrate = IF_Gbps(40ULL),
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .subtype = IFM_100G_CR4,
+ .baudrate = IF_Gbps(100ULL),
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .subtype = IFM_100G_SR4,
+ .baudrate = IF_Gbps(100ULL),
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .subtype = IFM_100G_KR4,
+ .baudrate = IF_Gbps(100ULL),
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .subtype = IFM_100G_LR4,
+ .baudrate = IF_Gbps(100ULL),
+ },
+ [MLX5E_100BASE_TX] = {
+ .subtype = IFM_100_TX,
+ .baudrate = IF_Mbps(100ULL),
+ },
+ [MLX5E_100BASE_T] = {
+ .subtype = IFM_100_T,
+ .baudrate = IF_Mbps(100ULL),
+ },
+ [MLX5E_10GBASE_T] = {
+ .subtype = IFM_10G_T,
+ .baudrate = IF_Gbps(10ULL),
+ },
+ [MLX5E_25GBASE_CR] = {
+ .subtype = IFM_25G_CR,
+ .baudrate = IF_Gbps(25ULL),
+ },
+ [MLX5E_25GBASE_KR] = {
+ .subtype = IFM_25G_KR,
+ .baudrate = IF_Gbps(25ULL),
+ },
+ [MLX5E_25GBASE_SR] = {
+ .subtype = IFM_25G_SR,
+ .baudrate = IF_Gbps(25ULL),
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .subtype = IFM_50G_CR2,
+ .baudrate = IF_Gbps(50ULL),
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .subtype = IFM_50G_KR2,
+ .baudrate = IF_Gbps(50ULL),
+ },
+};
+
+MALLOC_DEFINE(M_MLX5EN, "MLX5EN", "MLX5 Ethernet");
+
+static void
+mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_oper;
+ int error;
+ u8 port_state;
+ u8 i;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+
+ if (port_state == VPORT_STATE_UP) {
+ priv->media_status_last |= IFM_ACTIVE;
+ } else {
+ priv->media_status_last &= ~IFM_ACTIVE;
+ priv->media_active_last = IFM_ETHER;
+ if_link_state_change(priv->ifp, LINK_STATE_DOWN);
+ return;
+ }
+
+ error = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
+ if (error) {
+ priv->media_active_last = IFM_ETHER;
+ priv->ifp->if_baudrate = 1;
+ if_printf(priv->ifp, "%s: query port ptys failed: 0x%x\n",
+ __func__, error);
+ return;
+ }
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+
+ for (i = 0; i != MLX5E_LINK_MODES_NUMBER; i++) {
+ if (mlx5e_mode_table[i].baudrate == 0)
+ continue;
+ if (MLX5E_PROT_MASK(i) & eth_proto_oper) {
+ priv->ifp->if_baudrate =
+ mlx5e_mode_table[i].baudrate;
+ priv->media_active_last =
+ mlx5e_mode_table[i].subtype | IFM_ETHER | IFM_FDX;
+ }
+ }
+ if_link_state_change(priv->ifp, LINK_STATE_UP);
+}
+
+static void
+mlx5e_media_status(struct ifnet *dev, struct ifmediareq *ifmr)
+{
+ struct mlx5e_priv *priv = dev->if_softc;
+
+ ifmr->ifm_status = priv->media_status_last;
+ ifmr->ifm_active = priv->media_active_last |
+ (priv->params.rx_pauseframe_control ? IFM_ETH_RXPAUSE : 0) |
+ (priv->params.tx_pauseframe_control ? IFM_ETH_TXPAUSE : 0);
+
+}
+
+static u32
+mlx5e_find_link_mode(u32 subtype)
+{
+ u32 i;
+ u32 link_mode = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (mlx5e_mode_table[i].baudrate == 0)
+ continue;
+ if (mlx5e_mode_table[i].subtype == subtype)
+ link_mode |= MLX5E_PROT_MASK(i);
+ }
+
+ return (link_mode);
+}
+
+static int
+mlx5e_media_change(struct ifnet *dev)
+{
+ struct mlx5e_priv *priv = dev->if_softc;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 eth_proto_cap;
+ u32 link_mode;
+ int was_opened;
+ int locked;
+ int error;
+
+ locked = PRIV_LOCKED(priv);
+ if (!locked)
+ PRIV_LOCK(priv);
+
+ if (IFM_TYPE(priv->media.ifm_media) != IFM_ETHER) {
+ error = EINVAL;
+ goto done;
+ }
+ link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
+
+ /* query supported capabilities */
+ error = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
+ if (error != 0) {
+ if_printf(dev, "Query port media capability failed\n");
+ goto done;
+ }
+ /* check for autoselect */
+ if (IFM_SUBTYPE(priv->media.ifm_media) == IFM_AUTO) {
+ link_mode = eth_proto_cap;
+ if (link_mode == 0) {
+ if_printf(dev, "Port media capability is zero\n");
+ error = EINVAL;
+ goto done;
+ }
+ } else {
+ link_mode = link_mode & eth_proto_cap;
+ if (link_mode == 0) {
+ if_printf(dev, "Not supported link mode requested\n");
+ error = EINVAL;
+ goto done;
+ }
+ }
+ /* update pauseframe control bits */
+ priv->params.rx_pauseframe_control =
+ (priv->media.ifm_media & IFM_ETH_RXPAUSE) ? 1 : 0;
+ priv->params.tx_pauseframe_control =
+ (priv->media.ifm_media & IFM_ETH_TXPAUSE) ? 1 : 0;
+
+ /* check if device is opened */
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ /* reconfigure the hardware */
+ mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
+ mlx5_set_port_pause(mdev, 1,
+ priv->params.rx_pauseframe_control,
+ priv->params.tx_pauseframe_control);
+ if (was_opened)
+ mlx5_set_port_status(mdev, MLX5_PORT_UP);
+
+done:
+ if (!locked)
+ PRIV_UNLOCK(priv);
+ return (error);
+}
+
+static void
+mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ PRIV_LOCK(priv);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ PRIV_UNLOCK(priv);
+}
+
+/*
+ * This function reads the physical port counters from the firmware
+ * using a pre-defined layout defined by various MLX5E_PPORT_XXX()
+ * macros. The output is converted from big-endian 64-bit values into
+ * host endian ones and stored in the "priv->stats.pport" structure.
+ */
+static void
+mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *s = &priv->stats.pport;
+ struct mlx5e_port_stats_debug *s_debug = &priv->stats.port_stats_debug;
+ u32 *in;
+ u32 *out;
+ const u64 *ptr;
+ unsigned sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ unsigned x;
+ unsigned y;
+
+ /* allocate firmware request structures */
+ in = mlx5_vzalloc(sz);
+ out = mlx5_vzalloc(sz);
+ if (in == NULL || out == NULL)
+ goto free_out;
+
+ /*
+ * Get pointer to the 64-bit counter set which is located at a
+ * fixed offset in the output firmware request structure:
+ */
+ ptr = (const uint64_t *)MLX5_ADDR_OF(ppcnt_reg, out, counter_set);
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ /* read IEEE802_3 counter group using predefined counter layout */
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ for (x = y = 0; x != MLX5E_PPORT_IEEE802_3_STATS_NUM; x++, y++)
+ s->arg[y] = be64toh(ptr[x]);
+
+ /* read RFC2819 counter group using predefined counter layout */
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
+ s->arg[y] = be64toh(ptr[x]);
+ for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
+ MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
+ s_debug->arg[y] = be64toh(ptr[x]);
+
+ /* read RFC2863 counter group using predefined counter layout */
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ for (x = 0; x != MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM; x++, y++)
+ s_debug->arg[y] = be64toh(ptr[x]);
+
+ /* read physical layer stats counter group using predefined counter layout */
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
+ s_debug->arg[y] = be64toh(ptr[x]);
+free_out:
+ /* free firmware request structures */
+ kvfree(in);
+ kvfree(out);
+}
+
+/*
+ * This function is called regularly to collect all statistics
+ * counters from the firmware. The values can be viewed through the
+ * sysctl interface. Execution is serialized using the priv's global
+ * configuration lock.
+ */
+static void
+mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_stats_work);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ struct buf_ring *sq_br;
+#if (__FreeBSD_version < 1100000)
+ struct ifnet *ifp = priv->ifp;
+#endif
+
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tso_packets = 0;
+ u64 tso_bytes = 0;
+ u64 tx_queue_dropped = 0;
+ u64 tx_defragged = 0;
+ u64 tx_offload_none = 0;
+ u64 lro_packets = 0;
+ u64 lro_bytes = 0;
+ u64 sw_lro_queued = 0;
+ u64 sw_lro_flushed = 0;
+ u64 rx_csum_none = 0;
+ u64 rx_wqe_err = 0;
+ u32 rx_out_of_buffer = 0;
+ int i;
+ int j;
+
+ PRIV_LOCK(priv);
+ out = mlx5_vzalloc(outlen);
+ if (out == NULL)
+ goto free_out;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
+ goto free_out;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ for (i = 0; i < priv->params.num_channels; i++) {
+ struct mlx5e_rq *rq = &priv->channel[i]->rq;
+
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ /* collect stats from LRO */
+ rq_stats->sw_lro_queued = rq->lro.lro_queued;
+ rq_stats->sw_lro_flushed = rq->lro.lro_flushed;
+ sw_lro_queued += rq_stats->sw_lro_queued;
+ sw_lro_flushed += rq_stats->sw_lro_flushed;
+ lro_packets += rq_stats->lro_packets;
+ lro_bytes += rq_stats->lro_bytes;
+ rx_csum_none += rq_stats->csum_none;
+ rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+ sq_br = priv->channel[i]->sq[j].br;
+
+ tso_packets += sq_stats->tso_packets;
+ tso_bytes += sq_stats->tso_bytes;
+ tx_queue_dropped += sq_stats->dropped;
+ if (sq_br != NULL)
+ tx_queue_dropped += sq_br->br_drops;
+ tx_defragged += sq_stats->defragged;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* update counters */
+ s->tso_packets = tso_packets;
+ s->tso_bytes = tso_bytes;
+ s->tx_queue_dropped = tx_queue_dropped;
+ s->tx_defragged = tx_defragged;
+ s->lro_packets = lro_packets;
+ s->lro_bytes = lro_bytes;
+ s->sw_lro_queued = sw_lro_queued;
+ s->sw_lro_flushed = sw_lro_flushed;
+ s->rx_csum_none = rx_csum_none;
+ s->rx_wqe_err = rx_wqe_err;
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ /* get number of out-of-buffer drops first */
+ if (mlx5_vport_query_out_of_rx_buffer(mdev, priv->counter_set_id,
+ &rx_out_of_buffer))
+ goto free_out;
+
+ /* accumulate difference into a 64-bit counter */
+ s->rx_out_of_buffer += (u64)(u32)(rx_out_of_buffer - s->rx_out_of_buffer_prev);
+ s->rx_out_of_buffer_prev = rx_out_of_buffer;
+
+ /* get port statistics */
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(out, x) \
+ MLX5_GET64(query_vport_counter_out, out, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets -
+ s->rx_out_of_buffer;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+ /* Get physical port counters */
+ mlx5e_update_pport_counters(priv);
+
+#if (__FreeBSD_version < 1100000)
+ /* no get_counters interface in fbsd 10 */
+ ifp->if_ipackets = s->rx_packets;
+ ifp->if_ierrors = s->rx_error_packets +
+ priv->stats.pport.alignment_err +
+ priv->stats.pport.check_seq_err +
+ priv->stats.pport.crc_align_errors +
+ priv->stats.pport.in_range_len_errors +
+ priv->stats.pport.jabbers +
+ priv->stats.pport.out_of_range_len +
+ priv->stats.pport.oversize_pkts +
+ priv->stats.pport.symbol_err +
+ priv->stats.pport.too_long_errors +
+ priv->stats.pport.undersize_pkts +
+ priv->stats.pport.unsupported_op_rx;
+ ifp->if_iqdrops = s->rx_out_of_buffer +
+ priv->stats.pport.drop_events;
+ ifp->if_opackets = s->tx_packets;
+ ifp->if_oerrors = s->tx_error_packets;
+ ifp->if_snd.ifq_drops = s->tx_queue_dropped;
+ ifp->if_ibytes = s->rx_bytes;
+ ifp->if_obytes = s->tx_bytes;
+ ifp->if_collisions =
+ priv->stats.pport.collisions;
+#endif
+
+free_out:
+ kvfree(out);
+
+ /* Update diagnostics, if any */
+ if (priv->params_ethtool.diag_pci_enable ||
+ priv->params_ethtool.diag_general_enable) {
+ int error = mlx5_core_get_diagnostics_full(mdev,
+ priv->params_ethtool.diag_pci_enable ? &priv->params_pci : NULL,
+ priv->params_ethtool.diag_general_enable ? &priv->params_general : NULL);
+ if (error != 0)
+ if_printf(priv->ifp, "Failed reading diagnostics: %d\n", error);
+ }
+ PRIV_UNLOCK(priv);
+}
+
+static void
+mlx5e_update_stats(void *arg)
+{
+ struct mlx5e_priv *priv = arg;
+
+ schedule_work(&priv->update_stats_work);
+
+ callout_reset(&priv->watchdog, hz, &mlx5e_update_stats, priv);
+}
+
+static void
+mlx5e_async_event_sub(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ mtx_lock(&priv->async_events_mtx);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ mlx5e_async_event_sub(priv, event);
+ mtx_unlock(&priv->async_events_mtx);
+}
+
+static void
+mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void
+mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ mtx_lock(&priv->async_events_mtx);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ mtx_unlock(&priv->async_events_mtx);
+}
+
+static const char *mlx5e_rq_stats_desc[] = {
+ MLX5E_RQ_STATS(MLX5E_STATS_DESC)
+};
+
+static int
+mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ char buffer[16];
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ /* Create DMA descriptor TAG */
+ if ((err = -bus_dma_tag_create(
+ bus_get_dma_tag(mdev->pdev->dev.bsddev),
+ 1, /* any alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MJUM16BYTES, /* maxsize */
+ 1, /* nsegments */
+ MJUM16BYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &rq->dma_tag)))
+ goto done;
+
+ err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ goto err_free_dma_tag;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ if (priv->params.hw_lro_en) {
+ rq->wqe_sz = priv->params.lro_wqe_sz;
+ } else {
+ rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
+ }
+ if (rq->wqe_sz > MJUM16BYTES) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ } else if (rq->wqe_sz > MJUM9BYTES) {
+ rq->wqe_sz = MJUM16BYTES;
+ } else if (rq->wqe_sz > MJUMPAGESIZE) {
+ rq->wqe_sz = MJUM9BYTES;
+ } else if (rq->wqe_sz > MCLBYTES) {
+ rq->wqe_sz = MJUMPAGESIZE;
+ } else {
+ rq->wqe_sz = MCLBYTES;
+ }
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
+ for (i = 0; i != wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+ uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+
+ err = -bus_dmamap_create(rq->dma_tag, 0, &rq->mbuf[i].dma_map);
+ if (err != 0) {
+ while (i--)
+ bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
+ goto err_rq_mbuf_free;
+ }
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count = cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ }
+
+ rq->ifp = c->ifp;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ snprintf(buffer, sizeof(buffer), "rxstat%d", c->ix);
+ mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
+ rq->stats.arg);
+
+#ifdef HAVE_TURBO_LRO
+ if (tcp_tlro_init(&rq->lro, c->ifp, MLX5E_BUDGET_MAX) != 0)
+ rq->lro.mbuf = NULL;
+#else
+ if (tcp_lro_init(&rq->lro))
+ rq->lro.lro_cnt = 0;
+ else
+ rq->lro.ifp = c->ifp;
+#endif
+ return (0);
+
+err_rq_mbuf_free:
+ free(rq->mbuf, M_MLX5EN);
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+err_free_dma_tag:
+ bus_dma_tag_destroy(rq->dma_tag);
+done:
+ return (err);
+}
+
+static void
+mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ int wq_sz;
+ int i;
+
+ /* destroy all sysctl nodes */
+ sysctl_ctx_free(&rq->stats.ctx);
+
+ /* free leftover LRO packets, if any */
+#ifdef HAVE_TURBO_LRO
+ tcp_tlro_free(&rq->lro);
+#else
+ tcp_lro_free(&rq->lro);
+#endif
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ for (i = 0; i != wq_sz; i++) {
+ if (rq->mbuf[i].mbuf != NULL) {
+ bus_dmamap_unload(rq->dma_tag,
+ rq->mbuf[i].dma_map);
+ m_freem(rq->mbuf[i].mbuf);
+ }
+ bus_dmamap_destroy(rq->dma_tag, rq->mbuf[i].dma_map);
+ }
+ free(rq->mbuf, M_MLX5EN);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int
+mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ if (priv->counter_set_id >= 0)
+ MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return (err);
+}
+
+static int
+mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rqn, rq->rqn);
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_core_modify_rq(mdev, in, inlen);
+
+ kvfree(in);
+
+ return (err);
+}
+
+static void
+mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_rq(mdev, rq->rqn);
+}
+
+static int
+mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return (0);
+
+ msleep(4);
+ }
+ return (-ETIMEDOUT);
+}
+
+static int
+mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return (err);
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ c->rq.enabled = 1;
+
+ return (0);
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return (err);
+}
+
+static void
+mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ mtx_lock(&rq->mtx);
+ rq->enabled = 0;
+ callout_stop(&rq->watchdog);
+ mtx_unlock(&rq->mtx);
+
+ callout_drain(&rq->watchdog);
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+}
+
+static void
+mlx5e_close_rq_wait(struct mlx5e_rq *rq)
+{
+ /* wait till RQ is empty */
+ while (!mlx5_wq_ll_is_empty(&rq->wq)) {
+ msleep(4);
+ rq->cq.mcq.comp(&rq->cq.mcq);
+ }
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+void
+mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int x;
+
+ for (x = 0; x != wq_sz; x++)
+ bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
+ free(sq->mbuf, M_MLX5EN);
+}
+
+int
+mlx5e_alloc_sq_db(struct mlx5e_sq *sq)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int err;
+ int x;
+
+ sq->mbuf = malloc(wq_sz * sizeof(sq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
+
+ /* Create DMA descriptor MAPs */
+ for (x = 0; x != wq_sz; x++) {
+ err = -bus_dmamap_create(sq->dma_tag, 0, &sq->mbuf[x].dma_map);
+ if (err != 0) {
+ while (x--)
+ bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
+ free(sq->mbuf, M_MLX5EN);
+ return (err);
+ }
+ }
+ return (0);
+}
+
+static const char *mlx5e_sq_stats_desc[] = {
+ MLX5E_SQ_STATS(MLX5E_STATS_DESC)
+};
+
+static int
+mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ char buffer[16];
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+#ifdef RSS
+ cpuset_t cpu_mask;
+ int cpu_id;
+#endif
+ int err;
+
+ /* Create DMA descriptor TAG */
+ if ((err = -bus_dma_tag_create(
+ bus_get_dma_tag(mdev->pdev->dev.bsddev),
+ 1, /* any alignment */
+ 0, /* no boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ MLX5E_MAX_TX_PAYLOAD_SIZE, /* maxsize */
+ MLX5E_MAX_TX_MBUF_FRAGS, /* nsegments */
+ MLX5E_MAX_TX_MBUF_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockfuncarg */
+ &sq->dma_tag)))
+ goto done;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ goto err_free_dma_tag;
+
+ err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ err = mlx5e_alloc_sq_db(sq);
+ if (err)
+ goto err_sq_wq_destroy;
+
+ sq->mkey_be = c->mkey_be;
+ sq->ifp = priv->ifp;
+ sq->priv = priv;
+ sq->tc = tc;
+
+ /* check if we should allocate a second packet buffer */
+ if (priv->params_ethtool.tx_bufring_disable == 0) {
+ sq->br = buf_ring_alloc(MLX5E_SQ_TX_QUEUE_SIZE, M_MLX5EN,
+ M_WAITOK, &sq->lock);
+ if (sq->br == NULL) {
+ if_printf(c->ifp, "%s: Failed allocating sq drbr buffer\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_free_sq_db;
+ }
+
+ sq->sq_tq = taskqueue_create_fast("mlx5e_que", M_WAITOK,
+ taskqueue_thread_enqueue, &sq->sq_tq);
+ if (sq->sq_tq == NULL) {
+ if_printf(c->ifp, "%s: Failed allocating taskqueue\n",
+ __func__);
+ err = -ENOMEM;
+ goto err_free_drbr;
+ }
+
+ TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
+#ifdef RSS
+ cpu_id = rss_getcpu(c->ix % rss_getnumbuckets());
+ CPU_SETOF(cpu_id, &cpu_mask);
+ taskqueue_start_threads_cpuset(&sq->sq_tq, 1, PI_NET, &cpu_mask,
+ "%s TX SQ%d.%d CPU%d", c->ifp->if_xname, c->ix, tc, cpu_id);
+#else
+ taskqueue_start_threads(&sq->sq_tq, 1, PI_NET,
+ "%s TX SQ%d.%d", c->ifp->if_xname, c->ix, tc);
+#endif
+ }
+ snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
+ mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ buffer, mlx5e_sq_stats_desc, MLX5E_SQ_STATS_NUM,
+ sq->stats.arg);
+
+ return (0);
+
+err_free_drbr:
+ buf_ring_free(sq->br, M_MLX5EN);
+err_free_sq_db:
+ mlx5e_free_sq_db(sq);
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+err_free_dma_tag:
+ bus_dma_tag_destroy(sq->dma_tag);
+done:
+ return (err);
+}
+
+static void
+mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ /* destroy all sysctl nodes */
+ sysctl_ctx_free(&sq->stats.ctx);
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(sq->priv->mdev, &sq->uar);
+ if (sq->sq_tq != NULL) {
+ taskqueue_drain(sq->sq_tq, &sq->sq_task);
+ taskqueue_free(sq->sq_tq);
+ }
+ if (sq->br != NULL)
+ buf_ring_free(sq->br, M_MLX5EN);
+}
+
+int
+mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param,
+ int tis_num)
+{
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, tis_num_0, tis_num);
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *) MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(sq->priv->mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return (err);
+}
+
+int
+mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sqn, sq->sqn);
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_core_modify_sq(sq->priv->mdev, in, inlen);
+
+ kvfree(in);
+
+ return (err);
+}
+
+void
+mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+
+ mlx5_core_destroy_sq(sq->priv->mdev, sq->sqn);
+}
+
+static int
+mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return (err);
+
+ err = mlx5e_enable_sq(sq, param, c->priv->tisn[tc]);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_READY);
+
+ return (0);
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return (err);
+}
+
+static void
+mlx5e_sq_send_nops_locked(struct mlx5e_sq *sq, int can_sleep)
+{
+ /* fill up remainder with NOPs */
+ while (sq->cev_counter != 0) {
+ while (!mlx5e_sq_has_room_for(sq, 1)) {
+ if (can_sleep != 0) {
+ mtx_unlock(&sq->lock);
+ msleep(4);
+ mtx_lock(&sq->lock);
+ } else {
+ goto done;
+ }
+ }
+ /* send a single NOP */
+ mlx5e_send_nop(sq, 1);
+ wmb();
+ }
+done:
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+}
+
+void
+mlx5e_sq_cev_timeout(void *arg)
+{
+ struct mlx5e_sq *sq = arg;
+
+ mtx_assert(&sq->lock, MA_OWNED);
+
+ /* check next state */
+ switch (sq->cev_next_state) {
+ case MLX5E_CEV_STATE_SEND_NOPS:
+ /* fill TX ring with NOPs, if any */
+ mlx5e_sq_send_nops_locked(sq, 0);
+
+ /* check if completed */
+ if (sq->cev_counter == 0) {
+ sq->cev_next_state = MLX5E_CEV_STATE_INITIAL;
+ return;
+ }
+ break;
+ default:
+ /* send NOPs on next timeout */
+ sq->cev_next_state = MLX5E_CEV_STATE_SEND_NOPS;
+ break;
+ }
+
+ /* restart timer */
+ callout_reset_curcpu(&sq->cev_callout, hz, mlx5e_sq_cev_timeout, sq);
+}
+
+void
+mlx5e_drain_sq(struct mlx5e_sq *sq)
+{
+ int error;
+
+ /*
+ * Check if already stopped.
+ *
+ * NOTE: The "stopped" variable is only written when both the
+ * priv's configuration lock and the SQ's lock is locked. It
+ * can therefore safely be read when only one of the two locks
+ * is locked. This function is always called when the priv's
+ * configuration lock is locked.
+ */
+ if (sq->stopped != 0)
+ return;
+
+ mtx_lock(&sq->lock);
+
+ /* don't put more packets into the SQ */
+ sq->stopped = 1;
+
+ /* teardown event factor timer, if any */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ callout_stop(&sq->cev_callout);
+
+ /* send dummy NOPs in order to flush the transmit ring */
+ mlx5e_sq_send_nops_locked(sq, 1);
+ mtx_unlock(&sq->lock);
+
+ /* make sure it is safe to free the callout */
+ callout_drain(&sq->cev_callout);
+
+ /* wait till SQ is empty or link is down */
+ mtx_lock(&sq->lock);
+ while (sq->cc != sq->pc &&
+ (sq->priv->media_status_last & IFM_ACTIVE) != 0) {
+ mtx_unlock(&sq->lock);
+ msleep(1);
+ sq->cq.mcq.comp(&sq->cq.mcq);
+ mtx_lock(&sq->lock);
+ }
+ mtx_unlock(&sq->lock);
+
+ /* error out remaining requests */
+ error = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ if (error != 0) {
+ if_printf(sq->ifp,
+ "mlx5e_modify_sq() from RDY to ERR failed: %d\n", error);
+ }
+
+ /* wait till SQ is empty */
+ mtx_lock(&sq->lock);
+ while (sq->cc != sq->pc) {
+ mtx_unlock(&sq->lock);
+ msleep(1);
+ sq->cq.mcq.comp(&sq->cq.mcq);
+ mtx_lock(&sq->lock);
+ }
+ mtx_unlock(&sq->lock);
+}
+
+static void
+mlx5e_close_sq_wait(struct mlx5e_sq *sq)
+{
+
+ mlx5e_drain_sq(sq);
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int
+mlx5e_create_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ mlx5e_cq_comp_t *comp,
+ int eq_ix)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.buf_numa_node = 0;
+ param->wq.db_numa_node = 0;
+
+ err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return (err);
+
+ mlx5_vector2eqn(mdev, eq_ix, &eqn_not_used, &irqn);
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = eq_ix;
+ mcq->comp = comp;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->priv = priv;
+
+ return (0);
+}
+
+static void
+mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int
+mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param, int eq_ix)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *) MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(cq->priv->mdev, eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(cq->priv->mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return (err);
+
+ mlx5e_cq_arm(cq, MLX5_GET_DOORBELL_LOCK(&cq->priv->doorbell_lock));
+
+ return (0);
+}
+
+static void
+mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+
+ mlx5_core_destroy_cq(cq->priv->mdev, &cq->mcq);
+}
+
+int
+mlx5e_open_cq(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ mlx5e_cq_comp_t *comp,
+ int eq_ix)
+{
+ int err;
+
+ err = mlx5e_create_cq(priv, param, cq, comp, eq_ix);
+ if (err)
+ return (err);
+
+ err = mlx5e_enable_cq(cq, param, eq_ix);
+ if (err)
+ goto err_destroy_cq;
+
+ return (0);
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return (err);
+}
+
+void
+mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int
+mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ /* open completion queue */
+ err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
+ &mlx5e_tx_cq_comp, c->ix);
+ if (err)
+ goto err_close_tx_cqs;
+ }
+ return (0);
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return (err);
+}
+
+static void
+mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int
+mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return (0);
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq_wait(&c->sq[tc]);
+
+ return (err);
+}
+
+static void
+mlx5e_close_sqs_wait(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq_wait(&c->sq[tc]);
+}
+
+static void
+mlx5e_chan_mtx_init(struct mlx5e_channel *c)
+{
+ int tc;
+
+ mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
+
+ callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ struct mlx5e_sq *sq = c->sq + tc;
+
+ mtx_init(&sq->lock, "mlx5tx",
+ MTX_NETWORK_LOCK " TX", MTX_DEF);
+ mtx_init(&sq->comp_lock, "mlx5comp",
+ MTX_NETWORK_LOCK " TX", MTX_DEF);
+
+ callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
+
+ sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
+
+ /* ensure the TX completion event factor is not zero */
+ if (sq->cev_factor == 0)
+ sq->cev_factor = 1;
+ }
+}
+
+static void
+mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
+{
+ int tc;
+
+ mtx_destroy(&c->rq.mtx);
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mtx_destroy(&c->sq[tc].lock);
+ mtx_destroy(&c->sq[tc].comp_lock);
+ }
+}
+
+static int
+mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel *volatile *cp)
+{
+ struct mlx5e_channel *c;
+ int err;
+
+ c = malloc(sizeof(*c), M_MLX5EN, M_WAITOK | M_ZERO);
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = 0;
+ c->ifp = priv->ifp;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ /* init mutexes */
+ mlx5e_chan_mtx_init(c);
+
+ /* open transmit completion queue */
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_free;
+
+ /* open receive completion queue */
+ err = mlx5e_open_cq(c->priv, &cparam->rx_cq, &c->rq.cq,
+ &mlx5e_rx_cq_comp, c->ix);
+ if (err)
+ goto err_close_tx_cqs;
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_close_rx_cq;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ /* store channel pointer */
+ *cp = c;
+
+ /* poll receive queue initially */
+ c->rq.cq.mcq.comp(&c->rq.cq.mcq);
+
+ return (0);
+
+err_close_sqs:
+ mlx5e_close_sqs_wait(c);
+
+err_close_rx_cq:
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_free:
+ /* destroy mutexes */
+ mlx5e_chan_mtx_destroy(c);
+ free(c, M_MLX5EN);
+ return (err);
+}
+
+static void
+mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
+{
+ struct mlx5e_channel *c = *pp;
+
+ /* check if channel is already closed */
+ if (c == NULL)
+ return;
+ mlx5e_close_rq(&c->rq);
+}
+
+static void
+mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
+{
+ struct mlx5e_channel *c = *pp;
+
+ /* check if channel is already closed */
+ if (c == NULL)
+ return;
+ /* ensure channel pointer is no longer used */
+ *pp = NULL;
+
+ mlx5e_close_rq_wait(&c->rq);
+ mlx5e_close_sqs_wait(c);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ /* destroy mutexes */
+ mlx5e_chan_mtx_destroy(c);
+ free(c, M_MLX5EN);
+}
+
+static void
+mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.buf_numa_node = 0;
+ param->wq.db_numa_node = 0;
+ param->wq.linear = 1;
+}
+
+static void
+mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.buf_numa_node = 0;
+ param->wq.db_numa_node = 0;
+ param->wq.linear = 1;
+}
+
+static void
+mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void
+mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+
+ /*
+ * TODO The sysctl to control on/off is a bool value for now, which means
+ * we only support CSUM, once HASH is implemnted we'll need to address that.
+ */
+ if (priv->params.cqe_zipping_en) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_compression_en, 1);
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+ MLX5_SET(cqc, cqc, cq_period, priv->params.rx_cq_moderation_usec);
+ MLX5_SET(cqc, cqc, cq_max_count, priv->params.rx_cq_moderation_pkts);
+
+ switch (priv->params.rx_cq_moderation_mode) {
+ case 0:
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ break;
+ default:
+ if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ else
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ break;
+ }
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void
+mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, cq_period, priv->params.tx_cq_moderation_usec);
+ MLX5_SET(cqc, cqc, cq_max_count, priv->params.tx_cq_moderation_pkts);
+
+ switch (priv->params.tx_cq_moderation_mode) {
+ case 0:
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ break;
+ default:
+ if (MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ else
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ break;
+ }
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void
+mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int
+mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ void *ptr;
+ int err;
+ int i;
+ int j;
+
+ priv->channel = malloc(priv->params.num_channels *
+ sizeof(struct mlx5e_channel *), M_MLX5EN, M_WAITOK | M_ZERO);
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return (0);
+
+err_close_channels:
+ for (i--; i >= 0; i--) {
+ mlx5e_close_channel(&priv->channel[i]);
+ mlx5e_close_channel_wait(&priv->channel[i]);
+ }
+
+ /* remove "volatile" attribute from "channel" pointer */
+ ptr = __DECONST(void *, priv->channel);
+ priv->channel = NULL;
+
+ free(ptr, M_MLX5EN);
+
+ return (err);
+}
+
+static void
+mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ void *ptr;
+ int i;
+
+ if (priv->channel == NULL)
+ return;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(&priv->channel[i]);
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel_wait(&priv->channel[i]);
+
+ /* remove "volatile" attribute from "channel" pointer */
+ ptr = __DECONST(void *, priv->channel);
+ priv->channel = NULL;
+
+ free(ptr, M_MLX5EN);
+}
+
+static int
+mlx5e_refresh_sq_params(struct mlx5e_priv *priv, struct mlx5e_sq *sq)
+{
+
+ if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
+ uint8_t cq_mode;
+
+ switch (priv->params.tx_cq_moderation_mode) {
+ case 0:
+ cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ break;
+ default:
+ cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ }
+
+ return (mlx5_core_modify_cq_moderation_mode(priv->mdev, &sq->cq.mcq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts,
+ cq_mode));
+ }
+
+ return (mlx5_core_modify_cq_moderation(priv->mdev, &sq->cq.mcq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts));
+}
+
+static int
+mlx5e_refresh_rq_params(struct mlx5e_priv *priv, struct mlx5e_rq *rq)
+{
+
+ if (MLX5_CAP_GEN(priv->mdev, cq_period_mode_modify)) {
+ uint8_t cq_mode;
+ int retval;
+
+ switch (priv->params.rx_cq_moderation_mode) {
+ case 0:
+ cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ break;
+ default:
+ cq_mode = MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ break;
+ }
+
+ retval = mlx5_core_modify_cq_moderation_mode(priv->mdev, &rq->cq.mcq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts,
+ cq_mode);
+
+ return (retval);
+ }
+
+ return (mlx5_core_modify_cq_moderation(priv->mdev, &rq->cq.mcq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts));
+}
+
+static int
+mlx5e_refresh_channel_params_sub(struct mlx5e_priv *priv, struct mlx5e_channel *c)
+{
+ int err;
+ int i;
+
+ if (c == NULL)
+ return (EINVAL);
+
+ err = mlx5e_refresh_rq_params(priv, &c->rq);
+ if (err)
+ goto done;
+
+ for (i = 0; i != c->num_tc; i++) {
+ err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
+ if (err)
+ goto done;
+ }
+done:
+ return (err);
+}
+
+int
+mlx5e_refresh_channel_params(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (priv->channel == NULL)
+ return (EINVAL);
+
+ for (i = 0; i < priv->params.num_channels; i++) {
+ int err;
+
+ err = mlx5e_refresh_channel_params_sub(priv, priv->channel[i]);
+ if (err)
+ return (err);
+ }
+ return (0);
+}
+
+static int
+mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+ MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+
+ return (mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]));
+}
+
+static void
+mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int
+mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return (0);
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return (err);
+}
+
+static void
+mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int
+mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix;
+#ifdef RSS
+ ix = rss_get_indirection_to_bucket(i);
+#else
+ ix = i;
+#endif
+ /* ensure we don't overflow */
+ ix %= priv->params.num_channels;
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return (err);
+}
+
+static void
+mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void
+mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+ __be32 *hkey;
+
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+ if (priv->params.hw_lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_msg_sz,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ /* TODO: add the option to choose timer value dynamically */
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[2]));
+ }
+
+ /* setup parameters for hashing TIR type, if any */
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ hkey = (__be32 *) MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+#ifdef RSS
+ /*
+ * The FreeBSD RSS implementation does currently not
+ * support symmetric Toeplitz hashes:
+ */
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 0);
+ rss_getkey((uint8_t *)hkey);
+#else
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ hkey[0] = cpu_to_be32(0xD181C62C);
+ hkey[1] = cpu_to_be32(0xF7F4DB5B);
+ hkey[2] = cpu_to_be32(0x1983A2FC);
+ hkey[3] = cpu_to_be32(0x943E1ADB);
+ hkey[4] = cpu_to_be32(0xD9389E6B);
+ hkey[5] = cpu_to_be32(0xD1039C2C);
+ hkey[6] = cpu_to_be32(0xA74499AD);
+ hkey[7] = cpu_to_be32(0x593D56D9);
+ hkey[8] = cpu_to_be32(0xF3253C06);
+ hkey[9] = cpu_to_be32(0x2ADC1FFC);
+#endif
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV4)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_TCP_IPV6)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV4)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+#ifdef RSS
+ if (!(rss_gethashconfig() & RSS_HASHTYPE_RSS_UDP_IPV6)) {
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ } else
+#endif
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int
+mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, tir_context);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return (err);
+}
+
+static void
+mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int
+mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return (0);
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return (err);
+}
+
+static void
+mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+/*
+ * SW MTU does not include headers,
+ * HW MTU includes all headers and checksums.
+ */
+static int
+mlx5e_set_dev_port_mtu(struct ifnet *ifp, int sw_mtu)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int hw_mtu;
+ int err;
+
+ err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(sw_mtu));
+ if (err) {
+ if_printf(ifp, "%s: mlx5_set_port_mtu failed setting %d, err=%d\n",
+ __func__, sw_mtu, err);
+ return (err);
+ }
+ err = mlx5_query_port_oper_mtu(mdev, &hw_mtu);
+ if (err) {
+ if_printf(ifp, "Query port MTU, after setting new "
+ "MTU value, failed\n");
+ } else if (MLX5E_HW2SW_MTU(hw_mtu) < sw_mtu) {
+ err = -E2BIG,
+ if_printf(ifp, "Port MTU %d is smaller than "
+ "ifp mtu %d\n", hw_mtu, sw_mtu);
+ } else if (MLX5E_HW2SW_MTU(hw_mtu) > sw_mtu) {
+ err = -EINVAL;
+ if_printf(ifp, "Port MTU %d is bigger than "
+ "ifp mtu %d\n", hw_mtu, sw_mtu);
+ }
+ ifp->if_mtu = sw_mtu;
+ return (err);
+}
+
+int
+mlx5e_open_locked(struct ifnet *ifp)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+ int err;
+ u16 set_id;
+
+ /* check if already opened */
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
+ return (0);
+
+#ifdef RSS
+ if (rss_getnumbuckets() > priv->params.num_channels) {
+ if_printf(ifp, "NOTE: There are more RSS buckets(%u) than "
+ "channels(%u) available\n", rss_getnumbuckets(),
+ priv->params.num_channels);
+ }
+#endif
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return (err);
+ }
+ err = mlx5_vport_alloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, &set_id);
+ if (err) {
+ if_printf(priv->ifp,
+ "%s: mlx5_vport_alloc_q_counter failed: %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+ /* store counter set ID */
+ priv->counter_set_id = set_id;
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_dalloc_q_counter;
+ }
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ return (0);
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_dalloc_q_counter:
+ mlx5_vport_dealloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return (err);
+}
+
+static void
+mlx5e_open(void *arg)
+{
+ struct mlx5e_priv *priv = arg;
+
+ PRIV_LOCK(priv);
+ if (mlx5_set_port_status(priv->mdev, MLX5_PORT_UP))
+ if_printf(priv->ifp,
+ "%s: Setting port status to up failed\n",
+ __func__);
+
+ mlx5e_open_locked(priv->ifp);
+ priv->ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ PRIV_UNLOCK(priv);
+}
+
+int
+mlx5e_close_locked(struct ifnet *ifp)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+
+ /* check if already closed */
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
+ return (0);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ if_link_state_change(priv->ifp, LINK_STATE_DOWN);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5_vport_dealloc_q_counter(priv->mdev,
+ MLX5_INTERFACE_PROTOCOL_ETH, priv->counter_set_id);
+ mlx5e_close_tises(priv);
+
+ return (0);
+}
+
+#if (__FreeBSD_version >= 1100000)
+static uint64_t
+mlx5e_get_counter(struct ifnet *ifp, ift_counter cnt)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+ u64 retval;
+
+ /* PRIV_LOCK(priv); XXX not allowed */
+ switch (cnt) {
+ case IFCOUNTER_IPACKETS:
+ retval = priv->stats.vport.rx_packets;
+ break;
+ case IFCOUNTER_IERRORS:
+ retval = priv->stats.vport.rx_error_packets +
+ priv->stats.pport.alignment_err +
+ priv->stats.pport.check_seq_err +
+ priv->stats.pport.crc_align_errors +
+ priv->stats.pport.in_range_len_errors +
+ priv->stats.pport.jabbers +
+ priv->stats.pport.out_of_range_len +
+ priv->stats.pport.oversize_pkts +
+ priv->stats.pport.symbol_err +
+ priv->stats.pport.too_long_errors +
+ priv->stats.pport.undersize_pkts +
+ priv->stats.pport.unsupported_op_rx;
+ break;
+ case IFCOUNTER_IQDROPS:
+ retval = priv->stats.vport.rx_out_of_buffer +
+ priv->stats.pport.drop_events;
+ break;
+ case IFCOUNTER_OPACKETS:
+ retval = priv->stats.vport.tx_packets;
+ break;
+ case IFCOUNTER_OERRORS:
+ retval = priv->stats.vport.tx_error_packets;
+ break;
+ case IFCOUNTER_IBYTES:
+ retval = priv->stats.vport.rx_bytes;
+ break;
+ case IFCOUNTER_OBYTES:
+ retval = priv->stats.vport.tx_bytes;
+ break;
+ case IFCOUNTER_IMCASTS:
+ retval = priv->stats.vport.rx_multicast_packets;
+ break;
+ case IFCOUNTER_OMCASTS:
+ retval = priv->stats.vport.tx_multicast_packets;
+ break;
+ case IFCOUNTER_OQDROPS:
+ retval = priv->stats.vport.tx_queue_dropped;
+ break;
+ case IFCOUNTER_COLLISIONS:
+ retval = priv->stats.pport.collisions;
+ break;
+ default:
+ retval = if_get_counter_default(ifp, cnt);
+ break;
+ }
+ /* PRIV_UNLOCK(priv); XXX not allowed */
+ return (retval);
+}
+#endif
+
+static void
+mlx5e_set_rx_mode(struct ifnet *ifp)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int
+mlx5e_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct mlx5e_priv *priv;
+ struct ifreq *ifr;
+ struct ifi2creq i2c;
+ int error = 0;
+ int mask = 0;
+ int size_read = 0;
+ int module_num;
+ int max_mtu;
+ uint8_t read_addr;
+
+ priv = ifp->if_softc;
+
+ /* check if detaching */
+ if (priv == NULL || priv->gone != 0)
+ return (ENXIO);
+
+ switch (command) {
+ case SIOCSIFMTU:
+ ifr = (struct ifreq *)data;
+
+ PRIV_LOCK(priv);
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu);
+
+ if (ifr->ifr_mtu >= MLX5E_MTU_MIN &&
+ ifr->ifr_mtu <= MIN(MLX5E_MTU_MAX, max_mtu)) {
+ int was_opened;
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(ifp);
+
+ /* set new MTU */
+ mlx5e_set_dev_port_mtu(ifp, ifr->ifr_mtu);
+
+ if (was_opened)
+ mlx5e_open_locked(ifp);
+ } else {
+ error = EINVAL;
+ if_printf(ifp, "Invalid MTU value. Min val: %d, Max val: %d\n",
+ MLX5E_MTU_MIN, MIN(MLX5E_MTU_MAX, max_mtu));
+ }
+ PRIV_UNLOCK(priv);
+ break;
+ case SIOCSIFFLAGS:
+ if ((ifp->if_flags & IFF_UP) &&
+ (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ mlx5e_set_rx_mode(ifp);
+ break;
+ }
+ PRIV_LOCK(priv);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state) == 0)
+ mlx5e_open_locked(ifp);
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+ mlx5_set_port_status(priv->mdev, MLX5_PORT_UP);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ mlx5_set_port_status(priv->mdev,
+ MLX5_PORT_DOWN);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state) != 0)
+ mlx5e_close_locked(ifp);
+ mlx5e_update_carrier(priv);
+ ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+ }
+ }
+ PRIV_UNLOCK(priv);
+ break;
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ mlx5e_set_rx_mode(ifp);
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ case SIOCGIFXMEDIA:
+ ifr = (struct ifreq *)data;
+ error = ifmedia_ioctl(ifp, ifr, &priv->media, command);
+ break;
+ case SIOCSIFCAP:
+ ifr = (struct ifreq *)data;
+ PRIV_LOCK(priv);
+ mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+
+ if (IFCAP_TSO4 & ifp->if_capenable &&
+ !(IFCAP_TXCSUM & ifp->if_capenable)) {
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ ifp->if_hwassist &= ~CSUM_IP_TSO;
+ if_printf(ifp,
+ "tso4 disabled due to -txcsum.\n");
+ }
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ ifp->if_capenable ^= IFCAP_TXCSUM_IPV6;
+ ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
+
+ if (IFCAP_TSO6 & ifp->if_capenable &&
+ !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
+ ifp->if_capenable &= ~IFCAP_TSO6;
+ ifp->if_hwassist &= ~CSUM_IP6_TSO;
+ if_printf(ifp,
+ "tso6 disabled due to -txcsum6.\n");
+ }
+ }
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_TSO4) {
+ if (!(IFCAP_TSO4 & ifp->if_capenable) &&
+ !(IFCAP_TXCSUM & ifp->if_capenable)) {
+ if_printf(ifp, "enable txcsum first.\n");
+ error = EAGAIN;
+ goto out;
+ }
+ ifp->if_capenable ^= IFCAP_TSO4;
+ ifp->if_hwassist ^= CSUM_IP_TSO;
+ }
+ if (mask & IFCAP_TSO6) {
+ if (!(IFCAP_TSO6 & ifp->if_capenable) &&
+ !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) {
+ if_printf(ifp, "enable txcsum6 first.\n");
+ error = EAGAIN;
+ goto out;
+ }
+ ifp->if_capenable ^= IFCAP_TSO6;
+ ifp->if_hwassist ^= CSUM_IP6_TSO;
+ }
+ if (mask & IFCAP_VLAN_HWFILTER) {
+ if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+ mlx5e_disable_vlan_filter(priv);
+ else
+ mlx5e_enable_vlan_filter(priv);
+
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ }
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_WOL_MAGIC)
+ ifp->if_capenable ^= IFCAP_WOL_MAGIC;
+
+ VLAN_CAPABILITIES(ifp);
+ /* turn off LRO means also turn of HW LRO - if it's on */
+ if (mask & IFCAP_LRO) {
+ int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool need_restart = false;
+
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (!(ifp->if_capenable & IFCAP_LRO)) {
+ if (priv->params.hw_lro_en) {
+ priv->params.hw_lro_en = false;
+ need_restart = true;
+ /* Not sure this is the correct way */
+ priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
+ }
+ }
+ if (was_opened && need_restart) {
+ mlx5e_close_locked(ifp);
+ mlx5e_open_locked(ifp);
+ }
+ }
+out:
+ PRIV_UNLOCK(priv);
+ break;
+
+ case SIOCGI2C:
+ ifr = (struct ifreq *)data;
+
+ /*
+ * Copy from the user-space address ifr_data to the
+ * kernel-space address i2c
+ */
+ error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
+ if (error)
+ break;
+
+ if (i2c.len > sizeof(i2c.data)) {
+ error = EINVAL;
+ break;
+ }
+
+ PRIV_LOCK(priv);
+ /* Get module_num which is required for the query_eeprom */
+ error = mlx5_query_module_num(priv->mdev, &module_num);
+ if (error) {
+ if_printf(ifp, "Query module num failed, eeprom "
+ "reading is not supported\n");
+ error = EINVAL;
+ goto err_i2c;
+ }
+ /* Check if module is present before doing an access */
+ if (mlx5_query_module_status(priv->mdev, module_num) !=
+ MLX5_MODULE_STATUS_PLUGGED) {
+ error = EINVAL;
+ goto err_i2c;
+ }
+ /*
+ * Currently 0XA0 and 0xA2 are the only addresses permitted.
+ * The internal conversion is as follows:
+ */
+ if (i2c.dev_addr == 0xA0)
+ read_addr = MLX5E_I2C_ADDR_LOW;
+ else if (i2c.dev_addr == 0xA2)
+ read_addr = MLX5E_I2C_ADDR_HIGH;
+ else {
+ if_printf(ifp, "Query eeprom failed, "
+ "Invalid Address: %X\n", i2c.dev_addr);
+ error = EINVAL;
+ goto err_i2c;
+ }
+ error = mlx5_query_eeprom(priv->mdev,
+ read_addr, MLX5E_EEPROM_LOW_PAGE,
+ (uint32_t)i2c.offset, (uint32_t)i2c.len, module_num,
+ (uint32_t *)i2c.data, &size_read);
+ if (error) {
+ if_printf(ifp, "Query eeprom failed, eeprom "
+ "reading is not supported\n");
+ error = EINVAL;
+ goto err_i2c;
+ }
+
+ if (i2c.len > MLX5_EEPROM_MAX_BYTES) {
+ error = mlx5_query_eeprom(priv->mdev,
+ read_addr, MLX5E_EEPROM_LOW_PAGE,
+ (uint32_t)(i2c.offset + size_read),
+ (uint32_t)(i2c.len - size_read), module_num,
+ (uint32_t *)(i2c.data + size_read), &size_read);
+ }
+ if (error) {
+ if_printf(ifp, "Query eeprom failed, eeprom "
+ "reading is not supported\n");
+ error = EINVAL;
+ goto err_i2c;
+ }
+
+ error = copyout(&i2c, ifr->ifr_data, sizeof(i2c));
+err_i2c:
+ PRIV_UNLOCK(priv);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+ return (error);
+}
+
+static int
+mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ /*
+ * TODO: uncoment once FW really sets all these bits if
+ * (!mdev->caps.eth.rss_ind_tbl_cap || !mdev->caps.eth.csum_cap ||
+ * !mdev->caps.eth.max_lso_cap || !mdev->caps.eth.vlan_cap ||
+ * !(mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD)) return
+ * -ENOTSUPP;
+ */
+
+ /* TODO: add more must-to-have features */
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return (-ENODEV);
+
+ return (0);
+}
+
+static void
+mlx5e_build_ifp_priv(struct mlx5_core_dev *mdev,
+ struct mlx5e_priv *priv,
+ int num_comp_vectors)
+{
+ /*
+ * TODO: Consider link speed for setting "log_sq_size",
+ * "log_rq_size" and "cq_moderation_xxx":
+ */
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_mode =
+ MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? 1 : 0;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+ priv->counter_set_id = -1;
+
+ /*
+ * hw lro is currently defaulted to off. when it won't anymore we
+ * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
+ */
+ priv->params.hw_lro_en = false;
+ priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->params.cqe_zipping_en = !!MLX5_CAP_GEN(mdev, cqe_compression);
+
+ priv->mdev = mdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
+ priv->queue_mapping_channel_mask =
+ roundup_pow_of_two(num_comp_vectors) - 1;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ INIT_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+}
+
+static int
+mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct ifnet *ifp = priv->ifp;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (in == NULL) {
+ if_printf(ifp, "%s: failed to allocate inbox\n", __func__);
+ return (-ENOMEM);
+ }
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+ if (err)
+ if_printf(ifp, "%s: mlx5_core_create_mkey failed, %d\n",
+ __func__, err);
+
+ kvfree(in);
+
+ return (err);
+}
+
+static const char *mlx5e_vport_stats_desc[] = {
+ MLX5E_VPORT_STATS(MLX5E_STATS_DESC)
+};
+
+static const char *mlx5e_pport_stats_desc[] = {
+ MLX5E_PPORT_STATS(MLX5E_STATS_DESC)
+};
+
+static void
+mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
+{
+ mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
+ sx_init(&priv->state_lock, "mlx5state");
+ callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
+ MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
+}
+
+static void
+mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
+{
+ mtx_destroy(&priv->async_events_mtx);
+ sx_destroy(&priv->state_lock);
+}
+
+static int
+sysctl_firmware(SYSCTL_HANDLER_ARGS)
+{
+ /*
+ * %d.%d%.d the string format.
+ * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
+ * We need at most 5 chars to store that.
+ * It also has: two "." and NULL at the end, which means we need 18
+ * (5*3 + 3) chars at most.
+ */
+ char fw[18];
+ struct mlx5e_priv *priv = arg1;
+ int error;
+
+ snprintf(fw, sizeof(fw), "%d.%d.%d", fw_rev_maj(priv->mdev), fw_rev_min(priv->mdev),
+ fw_rev_sub(priv->mdev));
+ error = sysctl_handle_string(oidp, fw, sizeof(fw), req);
+ return (error);
+}
+
+static void
+mlx5e_add_hw_stats(struct mlx5e_priv *priv)
+{
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD, priv, 0,
+ sysctl_firmware, "A", "HCA firmware version");
+
+ SYSCTL_ADD_STRING(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_hw),
+ OID_AUTO, "board_id", CTLFLAG_RD, priv->mdev->board_id, 0,
+ "Board ID");
+}
+
+static void
+mlx5e_setup_pauseframes(struct mlx5e_priv *priv)
+{
+#if (__FreeBSD_version < 1100000)
+ char path[64];
+
+#endif
+ /* Only receiving pauseframes is enabled by default */
+ priv->params.tx_pauseframe_control = 0;
+ priv->params.rx_pauseframe_control = 1;
+
+#if (__FreeBSD_version < 1100000)
+ /* compute path for sysctl */
+ snprintf(path, sizeof(path), "dev.mce.%d.tx_pauseframe_control",
+ device_get_unit(priv->mdev->pdev->dev.bsddev));
+
+ /* try to fetch tunable, if any */
+ TUNABLE_INT_FETCH(path, &priv->params.tx_pauseframe_control);
+
+ /* compute path for sysctl */
+ snprintf(path, sizeof(path), "dev.mce.%d.rx_pauseframe_control",
+ device_get_unit(priv->mdev->pdev->dev.bsddev));
+
+ /* try to fetch tunable, if any */
+ TUNABLE_INT_FETCH(path, &priv->params.rx_pauseframe_control);
+#endif
+
+ /* register pausframe SYSCTLs */
+ SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ OID_AUTO, "tx_pauseframe_control", CTLFLAG_RDTUN,
+ &priv->params.tx_pauseframe_control, 0,
+ "Set to enable TX pause frames. Clear to disable.");
+
+ SYSCTL_ADD_INT(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ OID_AUTO, "rx_pauseframe_control", CTLFLAG_RDTUN,
+ &priv->params.rx_pauseframe_control, 0,
+ "Set to enable RX pause frames. Clear to disable.");
+
+ /* range check */
+ priv->params.tx_pauseframe_control =
+ priv->params.tx_pauseframe_control ? 1 : 0;
+ priv->params.rx_pauseframe_control =
+ priv->params.rx_pauseframe_control ? 1 : 0;
+
+ /* update firmware */
+ mlx5_set_port_pause(priv->mdev, 1,
+ priv->params.rx_pauseframe_control,
+ priv->params.tx_pauseframe_control);
+}
+
+static void *
+mlx5e_create_ifp(struct mlx5_core_dev *mdev)
+{
+ static volatile int mlx5_en_unit;
+ struct ifnet *ifp;
+ struct mlx5e_priv *priv;
+ u8 dev_addr[ETHER_ADDR_LEN] __aligned(4);
+ struct sysctl_oid_list *child;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ char unit[16];
+ int err;
+ int i;
+ u32 eth_proto_cap;
+
+ if (mlx5e_check_required_hca_cap(mdev)) {
+ mlx5_core_dbg(mdev, "mlx5e_check_required_hca_cap() failed\n");
+ return (NULL);
+ }
+ priv = malloc(sizeof(*priv), M_MLX5EN, M_WAITOK | M_ZERO);
+ mlx5e_priv_mtx_init(priv);
+
+ ifp = priv->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ mlx5_core_err(mdev, "if_alloc() failed\n");
+ goto err_free_priv;
+ }
+ ifp->if_softc = priv;
+ if_initname(ifp, "mce", atomic_fetchadd_int(&mlx5_en_unit, 1));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_init = mlx5e_open;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = mlx5e_ioctl;
+ ifp->if_transmit = mlx5e_xmit;
+ ifp->if_qflush = if_qflush;
+#if (__FreeBSD_version >= 1100000)
+ ifp->if_get_counter = mlx5e_get_counter;
+#endif
+ ifp->if_snd.ifq_maxlen = ifqmaxlen;
+ /*
+ * Set driver features
+ */
+ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING;
+ ifp->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER;
+ ifp->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_LRO;
+ ifp->if_capabilities |= IFCAP_TSO | IFCAP_VLAN_HWTSO;
+ ifp->if_capabilities |= IFCAP_HWSTATS;
+
+ /* set TSO limits so that we don't have to drop TX packets */
+ ifp->if_hw_tsomax = MLX5E_MAX_TX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
+ ifp->if_hw_tsomaxsegcount = MLX5E_MAX_TX_MBUF_FRAGS - 1 /* hdr */;
+ ifp->if_hw_tsomaxsegsize = MLX5E_MAX_TX_MBUF_SIZE;
+
+ ifp->if_capenable = ifp->if_capabilities;
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6);
+
+ /* ifnet sysctl tree */
+ sysctl_ctx_init(&priv->sysctl_ctx);
+ priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev),
+ OID_AUTO, ifp->if_dname, CTLFLAG_RD, 0, "MLX5 ethernet - interface name");
+ if (priv->sysctl_ifnet == NULL) {
+ mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
+ goto err_free_sysctl;
+ }
+ snprintf(unit, sizeof(unit), "%d", ifp->if_dunit);
+ priv->sysctl_ifnet = SYSCTL_ADD_NODE(&priv->sysctl_ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ OID_AUTO, unit, CTLFLAG_RD, 0, "MLX5 ethernet - interface unit");
+ if (priv->sysctl_ifnet == NULL) {
+ mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
+ goto err_free_sysctl;
+ }
+
+ /* HW sysctl tree */
+ child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
+ priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
+ OID_AUTO, "hw", CTLFLAG_RD, 0, "MLX5 ethernet dev hw");
+ if (priv->sysctl_hw == NULL) {
+ mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
+ goto err_free_sysctl;
+ }
+ mlx5e_build_ifp_priv(mdev, priv, ncv);
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_sysctl;
+ }
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ if_printf(ifp, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+ err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+ if (err) {
+ if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_transport_domain;
+ }
+ mlx5_query_nic_vport_mac_address(priv->mdev, 0, dev_addr);
+
+ /* check if we should generate a random MAC address */
+ if (MLX5_CAP_GEN(priv->mdev, vport_group_manager) == 0 &&
+ is_zero_ether_addr(dev_addr)) {
+ random_ether_addr(dev_addr);
+ if_printf(ifp, "Assigned random MAC address\n");
+ }
+
+ /* set default MTU */
+ mlx5e_set_dev_port_mtu(ifp, ifp->if_mtu);
+
+ /* Set desc */
+ device_set_desc(mdev->pdev->dev.bsddev, mlx5e_version);
+
+ /* Set default media status */
+ priv->media_status_last = IFM_AVALID;
+ priv->media_active_last = IFM_ETHER | IFM_AUTO |
+ IFM_ETH_RXPAUSE | IFM_FDX;
+
+ /* setup default pauseframes configuration */
+ mlx5e_setup_pauseframes(priv);
+
+ err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ eth_proto_cap = 0;
+ if_printf(ifp, "%s: Query port media capability failed, %d\n",
+ __func__, err);
+ }
+
+ /* Setup supported medias */
+ ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK,
+ mlx5e_media_change, mlx5e_media_status);
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (mlx5e_mode_table[i].baudrate == 0)
+ continue;
+ if (MLX5E_PROT_MASK(i) & eth_proto_cap) {
+ ifmedia_add(&priv->media,
+ mlx5e_mode_table[i].subtype |
+ IFM_ETHER, 0, NULL);
+ ifmedia_add(&priv->media,
+ mlx5e_mode_table[i].subtype |
+ IFM_ETHER | IFM_FDX |
+ IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
+ }
+ }
+
+ ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
+ IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE, 0, NULL);
+
+ /* Set autoselect by default */
+ ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO | IFM_FDX |
+ IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE);
+ ether_ifattach(ifp, dev_addr);
+
+ /* Register for VLAN events */
+ priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+ mlx5e_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST);
+ priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+ mlx5e_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST);
+
+ /* Link is down by default */
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+
+ mlx5e_enable_async_events(priv);
+
+ mlx5e_add_hw_stats(priv);
+
+ mlx5e_create_stats(&priv->stats.vport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ "vstats", mlx5e_vport_stats_desc, MLX5E_VPORT_STATS_NUM,
+ priv->stats.vport.arg);
+
+ mlx5e_create_stats(&priv->stats.pport.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
+ "pstats", mlx5e_pport_stats_desc, MLX5E_PPORT_STATS_NUM,
+ priv->stats.pport.arg);
+
+ mlx5e_create_ethtool(priv);
+
+ mtx_lock(&priv->async_events_mtx);
+ mlx5e_update_stats(priv);
+ mtx_unlock(&priv->async_events_mtx);
+
+ return (priv);
+
+err_dealloc_transport_domain:
+ mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_sysctl:
+ sysctl_ctx_free(&priv->sysctl_ctx);
+
+ if_free(ifp);
+
+err_free_priv:
+ mlx5e_priv_mtx_destroy(priv);
+ free(priv, M_MLX5EN);
+ return (NULL);
+}
+
+static void
+mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct ifnet *ifp = priv->ifp;
+
+ /* don't allow more IOCTLs */
+ priv->gone = 1;
+
+ /*
+ * Clear the device description to avoid use after free,
+ * because the bsddev is not destroyed when this module is
+ * unloaded:
+ */
+ device_set_desc(mdev->pdev->dev.bsddev, NULL);
+
+ /* XXX wait a bit to allow IOCTL handlers to complete */
+ pause("W", hz);
+
+ /* stop watchdog timer */
+ callout_drain(&priv->watchdog);
+
+ if (priv->vlan_attach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach);
+ if (priv->vlan_detach != NULL)
+ EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach);
+
+ /* make sure device gets closed */
+ PRIV_LOCK(priv);
+ mlx5e_close_locked(ifp);
+ PRIV_UNLOCK(priv);
+
+ /* unregister device */
+ ifmedia_removeall(&priv->media);
+ ether_ifdetach(ifp);
+ if_free(ifp);
+
+ /* destroy all remaining sysctl nodes */
+ if (priv->sysctl_debug)
+ sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
+ sysctl_ctx_free(&priv->stats.vport.ctx);
+ sysctl_ctx_free(&priv->stats.pport.ctx);
+ sysctl_ctx_free(&priv->sysctl_ctx);
+
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ mlx5e_priv_mtx_destroy(priv);
+ free(priv, M_MLX5EN);
+}
+
+static void *
+mlx5e_get_ifp(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return (priv->ifp);
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_ifp,
+ .remove = mlx5e_destroy_ifp,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_ifp,
+};
+
+void
+mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void
+mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
+
+module_init_order(mlx5e_init, SI_ORDER_THIRD);
+module_exit_order(mlx5e_cleanup, SI_ORDER_THIRD);
+
+#if (__FreeBSD_version >= 1100000)
+MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
+#endif
+MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
+MODULE_VERSION(mlx5en, 1);
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,454 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c 324523 2017-10-11 10:04:17Z hselasky $
+ */
+
+#include "en.h"
+#include <machine/in_cksum.h>
+
+static inline int
+mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ bus_dma_segment_t segs[1];
+ struct mbuf *mb;
+ int nsegs;
+ int err;
+
+ if (rq->mbuf[ix].mbuf != NULL)
+ return (0);
+
+ mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
+ if (unlikely(!mb))
+ return (-ENOMEM);
+
+ /* set initial mbuf length */
+ mb->m_pkthdr.len = mb->m_len = rq->wqe_sz;
+
+ /* get IP header aligned */
+ m_adj(mb, MLX5E_NET_IP_ALIGN);
+
+ err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
+ mb, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (err != 0)
+ goto err_free_mbuf;
+ if (unlikely(nsegs != 1)) {
+ bus_dmamap_unload(rq->dma_tag, rq->mbuf[ix].dma_map);
+ err = -ENOMEM;
+ goto err_free_mbuf;
+ }
+ wqe->data.addr = cpu_to_be64(segs[0].ds_addr);
+
+ rq->mbuf[ix].mbuf = mb;
+ rq->mbuf[ix].data = mb->m_data;
+
+ bus_dmamap_sync(rq->dma_tag, rq->mbuf[ix].dma_map,
+ BUS_DMASYNC_PREREAD);
+ return (0);
+
+err_free_mbuf:
+ m_freem(mb);
+ return (err);
+}
+
+static void
+mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ if (unlikely(rq->enabled == 0))
+ return;
+
+ while (!mlx5_wq_ll_is_full(&rq->wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, rq->wq.head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, rq->wq.head))) {
+ callout_reset_curcpu(&rq->watchdog, 1, (void *)&mlx5e_post_rx_wqes, rq);
+ break;
+ }
+ mlx5_wq_ll_push(&rq->wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ wmb();
+
+ mlx5_wq_ll_update_db_record(&rq->wq);
+}
+
+static void
+mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
+{
+ /* TODO: consider vlans, ip options, ... */
+ struct ether_header *eh;
+ uint16_t eh_type;
+ uint16_t tot_len;
+ struct ip6_hdr *ip6 = NULL;
+ struct ip *ip4 = NULL;
+ struct tcphdr *th;
+ uint32_t *ts_ptr;
+ uint8_t l4_hdr_type;
+ int tcp_ack;
+
+ eh = mtod(mb, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+
+ l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ /* TODO: consider vlan */
+ tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
+
+ switch (eh_type) {
+ case ETHERTYPE_IP:
+ ip4 = (struct ip *)(eh + 1);
+ th = (struct tcphdr *)(ip4 + 1);
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(eh + 1);
+ th = (struct tcphdr *)(ip6 + 1);
+ break;
+ default:
+ return;
+ }
+
+ ts_ptr = (uint32_t *)(th + 1);
+
+ if (get_cqe_lro_tcppsh(cqe))
+ th->th_flags |= TH_PUSH;
+
+ if (tcp_ack) {
+ th->th_flags |= TH_ACK;
+ th->th_ack = cqe->lro_ack_seq_num;
+ th->th_win = cqe->lro_tcp_win;
+
+ /*
+ * FreeBSD handles only 32bit aligned timestamp right after
+ * the TCP hdr
+ * +--------+--------+--------+--------+
+ * | NOP | NOP | TSopt | 10 |
+ * +--------+--------+--------+--------+
+ * | TSval timestamp |
+ * +--------+--------+--------+--------+
+ * | TSecr timestamp |
+ * +--------+--------+--------+--------+
+ */
+ if (get_cqe_lro_timestamp_valid(cqe) &&
+ (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
+ TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
+ TCPOLEN_TIMESTAMP))) {
+ /*
+ * cqe->timestamp is 64bit long.
+ * [0-31] - timestamp.
+ * [32-64] - timestamp echo replay.
+ */
+ ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
+ ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
+ }
+ }
+ if (ip4) {
+ ip4->ip_ttl = cqe->lro_min_ttl;
+ ip4->ip_len = cpu_to_be16(tot_len);
+ ip4->ip_sum = 0;
+ ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
+ } else {
+ ip6->ip6_hlim = cqe->lro_min_ttl;
+ ip6->ip6_plen = cpu_to_be16(tot_len -
+ sizeof(struct ip6_hdr));
+ }
+ /* TODO: handle tcp checksum */
+}
+
+static inline void
+mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq, struct mbuf *mb,
+ u32 cqe_bcnt)
+{
+ struct ifnet *ifp = rq->ifp;
+ int lro_num_seg; /* HW LRO session aggregated packets counter */
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(mb, cqe);
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ mb->m_pkthdr.len = mb->m_len = cqe_bcnt;
+ /* check if a Toeplitz hash was computed */
+ if (cqe->rss_hash_type != 0) {
+ mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
+#ifdef RSS
+ /* decode the RSS hash type */
+ switch (cqe->rss_hash_type &
+ (CQE_RSS_DST_HTYPE_L4 | CQE_RSS_DST_HTYPE_IP)) {
+ /* IPv4 */
+ case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV4):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV4);
+ break;
+ case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV4):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV4);
+ break;
+ case CQE_RSS_DST_HTYPE_IPV4:
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV4);
+ break;
+ /* IPv6 */
+ case (CQE_RSS_DST_HTYPE_TCP | CQE_RSS_DST_HTYPE_IPV6):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_TCP_IPV6);
+ break;
+ case (CQE_RSS_DST_HTYPE_UDP | CQE_RSS_DST_HTYPE_IPV6):
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_UDP_IPV6);
+ break;
+ case CQE_RSS_DST_HTYPE_IPV6:
+ M_HASHTYPE_SET(mb, M_HASHTYPE_RSS_IPV6);
+ break;
+ default: /* Other */
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+ break;
+ }
+#else
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+#endif
+ } else {
+ mb->m_pkthdr.flowid = rq->ix;
+ M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
+ }
+ mb->m_pkthdr.rcvif = ifp;
+
+ if (likely(ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) &&
+ ((cqe->hds_ip_ext & (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK)) ==
+ (CQE_L2_OK | CQE_L3_OK | CQE_L4_OK))) {
+ mb->m_pkthdr.csum_flags =
+ CSUM_IP_CHECKED | CSUM_IP_VALID |
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ mb->m_pkthdr.csum_data = htons(0xffff);
+ } else {
+ rq->stats.csum_none++;
+ }
+
+ if (cqe_has_vlan(cqe)) {
+ mb->m_pkthdr.ether_vtag = be16_to_cpu(cqe->vlan_info);
+ mb->m_flags |= M_VLANTAG;
+ }
+}
+
+static inline void
+mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
+{
+ memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, (cc & cq->wq.sz_m1)),
+ sizeof(struct mlx5_cqe64));
+}
+
+static inline void
+mlx5e_write_cqe_slot(struct mlx5e_cq *cq, u32 cc, void *data)
+{
+ memcpy(mlx5_cqwq_get_wqe(&cq->wq, cc & cq->wq.sz_m1),
+ data, sizeof(struct mlx5_cqe64));
+}
+
+static inline void
+mlx5e_decompress_cqe(struct mlx5e_cq *cq, struct mlx5_cqe64 *title,
+ struct mlx5_mini_cqe8 *mini,
+ u16 wqe_counter, int i)
+{
+ /*
+ * NOTE: The fields which are not set here are copied from the
+ * initial and common title. See memcpy() in
+ * mlx5e_write_cqe_slot().
+ */
+ title->byte_cnt = mini->byte_cnt;
+ title->wqe_counter = cpu_to_be16((wqe_counter + i) & cq->wq.sz_m1);
+ title->check_sum = mini->checksum;
+ title->op_own = (title->op_own & 0xf0) |
+ (((cq->wq.cc + i) >> cq->wq.log_sz) & 1);
+}
+
+#define MLX5E_MINI_ARRAY_SZ 8
+/* Make sure structs are not packet differently */
+CTASSERT(sizeof(struct mlx5_cqe64) ==
+ sizeof(struct mlx5_mini_cqe8) * MLX5E_MINI_ARRAY_SZ);
+static void
+mlx5e_decompress_cqes(struct mlx5e_cq *cq)
+{
+ struct mlx5_mini_cqe8 mini_array[MLX5E_MINI_ARRAY_SZ];
+ struct mlx5_cqe64 title;
+ u32 cqe_count;
+ u32 i = 0;
+ u16 title_wqe_counter;
+
+ mlx5e_read_cqe_slot(cq, cq->wq.cc, &title);
+ title_wqe_counter = be16_to_cpu(title.wqe_counter);
+ cqe_count = be32_to_cpu(title.byte_cnt);
+
+ /* Make sure we won't overflow */
+ KASSERT(cqe_count <= cq->wq.sz_m1,
+ ("%s: cqe_count %u > cq->wq.sz_m1 %u", __func__,
+ cqe_count, cq->wq.sz_m1));
+
+ mlx5e_read_cqe_slot(cq, cq->wq.cc + 1, mini_array);
+ while (true) {
+ mlx5e_decompress_cqe(cq, &title,
+ &mini_array[i % MLX5E_MINI_ARRAY_SZ],
+ title_wqe_counter, i);
+ mlx5e_write_cqe_slot(cq, cq->wq.cc + i, &title);
+ i++;
+
+ if (i == cqe_count)
+ break;
+ if (i % MLX5E_MINI_ARRAY_SZ == 0)
+ mlx5e_read_cqe_slot(cq, cq->wq.cc + i, mini_array);
+ }
+}
+
+static int
+mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
+{
+#ifndef HAVE_TURBO_LRO
+ struct lro_entry *queued;
+#endif
+ int i;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct mbuf *mb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 byte_cnt;
+
+ cqe = mlx5e_get_cqe(&rq->cq);
+ if (!cqe)
+ break;
+
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)
+ mlx5e_decompress_cqes(&rq->cq);
+
+ mlx5_cqwq_pop(&rq->cq.wq);
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ byte_cnt = be32_to_cpu(cqe->byte_cnt);
+
+ bus_dmamap_sync(rq->dma_tag,
+ rq->mbuf[wqe_counter].dma_map,
+ BUS_DMASYNC_POSTREAD);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto wq_ll_pop;
+ }
+ if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
+ (mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
+ /* get IP header aligned */
+ mb->m_data += MLX5E_NET_IP_ALIGN;
+
+ bcopy(rq->mbuf[wqe_counter].data, mtod(mb, caddr_t),
+ byte_cnt);
+ } else {
+ mb = rq->mbuf[wqe_counter].mbuf;
+ rq->mbuf[wqe_counter].mbuf = NULL; /* safety clear */
+
+ bus_dmamap_unload(rq->dma_tag,
+ rq->mbuf[wqe_counter].dma_map);
+ }
+
+ mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
+ rq->stats.packets++;
+#ifdef HAVE_TURBO_LRO
+ if (mb->m_pkthdr.csum_flags == 0 ||
+ (rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
+ rq->lro.mbuf == NULL) {
+ /* normal input */
+ rq->ifp->if_input(rq->ifp, mb);
+ } else {
+ tcp_tlro_rx(&rq->lro, mb);
+ }
+#else
+ if (mb->m_pkthdr.csum_flags == 0 ||
+ (rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
+ rq->lro.lro_cnt == 0 ||
+ tcp_lro_rx(&rq->lro, mb, 0) != 0) {
+ rq->ifp->if_input(rq->ifp, mb);
+ }
+#endif
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&rq->cq.wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+#ifndef HAVE_TURBO_LRO
+ while ((queued = SLIST_FIRST(&rq->lro.lro_active)) != NULL) {
+ SLIST_REMOVE_HEAD(&rq->lro.lro_active, next);
+ tcp_lro_flush(&rq->lro, queued);
+ }
+#endif
+ return (i);
+}
+
+void
+mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_rq *rq = container_of(mcq, struct mlx5e_rq, cq.mcq);
+ int i = 0;
+
+#ifdef HAVE_PER_CQ_EVENT_PACKET
+ struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
+
+ if (mb != NULL) {
+ /* this code is used for debugging purpose only */
+ mb->m_pkthdr.len = mb->m_len = 15;
+ memset(mb->m_data, 255, 14);
+ mb->m_data[14] = rq->ix;
+ mb->m_pkthdr.rcvif = rq->ifp;
+ rq->ifp->if_input(rq->ifp, mb);
+ }
+#endif
+
+ mtx_lock(&rq->mtx);
+
+ /*
+ * Polling the entire CQ without posting new WQEs results in
+ * lack of receive WQEs during heavy traffic scenarios.
+ */
+ while (1) {
+ if (mlx5e_poll_rx_cq(rq, MLX5E_RX_BUDGET_MAX) !=
+ MLX5E_RX_BUDGET_MAX)
+ break;
+ i += MLX5E_RX_BUDGET_MAX;
+ if (i >= MLX5E_BUDGET_MAX)
+ break;
+ mlx5e_post_rx_wqes(rq);
+ }
+ mlx5e_post_rx_wqes(rq);
+ mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
+#ifdef HAVE_TURBO_LRO
+ tcp_tlro_flush(&rq->lro, 1);
+#endif
+ mtx_unlock(&rq->mtx);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,594 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c 324523 2017-10-11 10:04:17Z hselasky $
+ */
+
+#include "en.h"
+#include <machine/atomic.h>
+
+static inline bool
+mlx5e_do_send_cqe(struct mlx5e_sq *sq)
+{
+ sq->cev_counter++;
+ /* interleave the CQEs */
+ if (sq->cev_counter >= sq->cev_factor) {
+ sq->cev_counter = 0;
+ return (1);
+ }
+ return (0);
+}
+
+void
+mlx5e_send_nop(struct mlx5e_sq *sq, u32 ds_cnt)
+{
+ u16 pi = sq->pc & sq->wq.sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+
+ memset(&wqe->ctrl, 0, sizeof(wqe->ctrl));
+
+ wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
+
+ sq->mbuf[pi].mbuf = NULL;
+ sq->mbuf[pi].num_bytes = 0;
+ sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += sq->mbuf[pi].num_wqebbs;
+}
+
+#if (__FreeBSD_version >= 1100000)
+static uint32_t mlx5e_hash_value;
+
+static void
+mlx5e_hash_init(void *arg)
+{
+ mlx5e_hash_value = m_ether_tcpip_hash_init();
+}
+
+/* Make kernel call mlx5e_hash_init after the random stack finished initializing */
+SYSINIT(mlx5e_hash_init, SI_SUB_RANDOM, SI_ORDER_ANY, &mlx5e_hash_init, NULL);
+#endif
+
+static struct mlx5e_sq *
+mlx5e_select_queue(struct ifnet *ifp, struct mbuf *mb)
+{
+ struct mlx5e_priv *priv = ifp->if_softc;
+ struct mlx5e_channel * volatile *ppch;
+ struct mlx5e_channel *pch;
+ u32 ch;
+ u32 tc;
+
+ ppch = priv->channel;
+
+ /* check if channels are successfully opened */
+ if (unlikely(ppch == NULL))
+ return (NULL);
+
+ /* obtain VLAN information if present */
+ if (mb->m_flags & M_VLANTAG) {
+ tc = (mb->m_pkthdr.ether_vtag >> 13);
+ if (tc >= priv->num_tc)
+ tc = priv->default_vlan_prio;
+ } else {
+ tc = priv->default_vlan_prio;
+ }
+
+ ch = priv->params.num_channels;
+
+ /* check if flowid is set */
+ if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE) {
+#ifdef RSS
+ u32 temp;
+
+ if (rss_hash2bucket(mb->m_pkthdr.flowid,
+ M_HASHTYPE_GET(mb), &temp) == 0)
+ ch = temp % ch;
+ else
+#endif
+ ch = (mb->m_pkthdr.flowid % 128) % ch;
+ } else {
+#if (__FreeBSD_version >= 1100000)
+ ch = m_ether_tcpip_hash(MBUF_HASHFLAG_L3 |
+ MBUF_HASHFLAG_L4, mb, mlx5e_hash_value) % ch;
+#else
+ /*
+ * m_ether_tcpip_hash not present in stable, so just
+ * throw unhashed mbufs on queue 0
+ */
+ ch = 0;
+#endif
+ }
+
+ /* check if channel is allocated and not stopped */
+ pch = ppch[ch];
+ if (likely(pch != NULL && pch->sq[tc].stopped == 0))
+ return (&pch->sq[tc]);
+ return (NULL);
+}
+
+static inline u16
+mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct mbuf *mb)
+{
+ return (MIN(MLX5E_MAX_TX_INLINE, mb->m_len));
+}
+
+static int
+mlx5e_get_header_size(struct mbuf *mb)
+{
+ struct ether_vlan_header *eh;
+ struct tcphdr *th;
+ struct ip *ip;
+ int ip_hlen, tcp_hlen;
+ struct ip6_hdr *ip6;
+ uint16_t eth_type;
+ int eth_hdr_len;
+
+ eh = mtod(mb, struct ether_vlan_header *);
+ if (mb->m_len < ETHER_HDR_LEN)
+ return (0);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ eth_type = ntohs(eh->evl_proto);
+ eth_hdr_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+ } else {
+ eth_type = ntohs(eh->evl_encap_proto);
+ eth_hdr_len = ETHER_HDR_LEN;
+ }
+ if (mb->m_len < eth_hdr_len)
+ return (0);
+ switch (eth_type) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(mb->m_data + eth_hdr_len);
+ if (mb->m_len < eth_hdr_len + sizeof(*ip))
+ return (0);
+ if (ip->ip_p != IPPROTO_TCP)
+ return (0);
+ ip_hlen = ip->ip_hl << 2;
+ eth_hdr_len += ip_hlen;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(mb->m_data + eth_hdr_len);
+ if (mb->m_len < eth_hdr_len + sizeof(*ip6))
+ return (0);
+ if (ip6->ip6_nxt != IPPROTO_TCP)
+ return (0);
+ eth_hdr_len += sizeof(*ip6);
+ break;
+ default:
+ return (0);
+ }
+ if (mb->m_len < eth_hdr_len + sizeof(*th))
+ return (0);
+ th = (struct tcphdr *)(mb->m_data + eth_hdr_len);
+ tcp_hlen = th->th_off << 2;
+ eth_hdr_len += tcp_hlen;
+ if (mb->m_len < eth_hdr_len)
+ return (0);
+ return (eth_hdr_len);
+}
+
+/*
+ * The return value is not going back to the stack because of
+ * the drbr
+ */
+static int
+mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
+{
+ bus_dma_segment_t segs[MLX5E_MAX_TX_MBUF_FRAGS];
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_tx_wqe *wqe;
+ struct ifnet *ifp;
+ int nsegs;
+ int err;
+ int x;
+ struct mbuf *mb = *mbp;
+ u16 ds_cnt;
+ u16 ihs;
+ u16 pi;
+ u8 opcode;
+
+ /*
+ * Return ENOBUFS if the queue is full, this may trigger reinsertion
+ * of the mbuf into the drbr (see mlx5e_xmit_locked)
+ */
+ if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
+ return (ENOBUFS);
+ }
+
+ /* Align SQ edge with NOPs to avoid WQE wrap around */
+ pi = ((~sq->pc) & sq->wq.sz_m1);
+ if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
+ /* Send one multi NOP message instead of many */
+ mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS);
+ pi = ((~sq->pc) & sq->wq.sz_m1);
+ if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1))
+ return (ENOMEM);
+ }
+
+ /* Setup local variables */
+ pi = sq->pc & sq->wq.sz_m1;
+ wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ ifp = sq->ifp;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ /* Send a copy of the frame to the BPF listener, if any */
+ if (ifp != NULL && ifp->if_bpf != NULL)
+ ETHER_BPF_MTAP(ifp, mb);
+
+ if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
+ wqe->eth.cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ }
+ if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
+ wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ if (wqe->eth.cs_flags == 0) {
+ sq->stats.csum_offload_none++;
+ }
+ if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
+ u32 payload_len;
+ u32 mss = mb->m_pkthdr.tso_segsz;
+ u32 num_pkts;
+
+ wqe->eth.mss = cpu_to_be16(mss);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = mlx5e_get_header_size(mb);
+ payload_len = mb->m_pkthdr.len - ihs;
+ if (payload_len == 0)
+ num_pkts = 1;
+ else
+ num_pkts = DIV_ROUND_UP(payload_len, mss);
+ sq->mbuf[pi].num_bytes = payload_len + (num_pkts * ihs);
+
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ opcode = MLX5_OPCODE_SEND;
+ ihs = mlx5e_get_inline_hdr_size(sq, mb);
+ sq->mbuf[pi].num_bytes = max_t (unsigned int,
+ mb->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN);
+ }
+ if (mb->m_flags & M_VLANTAG) {
+ struct ether_vlan_header *eh =
+ (struct ether_vlan_header *)wqe->eth.inline_hdr_start;
+
+ /* Range checks */
+ if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
+ ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
+ else if (ihs < ETHER_HDR_LEN) {
+ err = EINVAL;
+ goto tx_drop;
+ }
+ m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
+ m_adj(mb, ETHER_HDR_LEN);
+ /* Insert 4 bytes VLAN tag into data stream */
+ eh->evl_proto = eh->evl_encap_proto;
+ eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
+ eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
+ /* Copy rest of header data, if any */
+ m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
+ m_adj(mb, ihs - ETHER_HDR_LEN);
+ /* Extend header by 4 bytes */
+ ihs += ETHER_VLAN_ENCAP_LEN;
+ } else {
+ m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
+ m_adj(mb, ihs);
+ }
+
+ wqe->eth.inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ if (likely(ihs > sizeof(wqe->eth.inline_hdr_start))) {
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(wqe->eth.inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ }
+ dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
+
+ /* Trim off empty mbufs */
+ while (mb->m_len == 0) {
+ mb = m_free(mb);
+ /* Check if all data has been inlined */
+ if (mb == NULL)
+ goto skip_dma;
+ }
+
+ err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
+ mb, segs, &nsegs, BUS_DMA_NOWAIT);
+ if (err == EFBIG) {
+ /*
+ * Update *mbp before defrag in case it was trimmed in the
+ * loop above
+ */
+ *mbp = mb;
+ /* Update statistics */
+ sq->stats.defragged++;
+ /* Too many mbuf fragments */
+ mb = m_defrag(*mbp, M_NOWAIT);
+ if (mb == NULL) {
+ mb = *mbp;
+ goto tx_drop;
+ }
+ /* Try again */
+ err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
+ mb, segs, &nsegs, BUS_DMA_NOWAIT);
+ }
+ /* Catch errors */
+ if (err != 0)
+ goto tx_drop;
+
+ for (x = 0; x != nsegs; x++) {
+ if (segs[x].ds_len == 0)
+ continue;
+ dseg->addr = cpu_to_be64((uint64_t)segs[x].ds_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32((uint32_t)segs[x].ds_len);
+ dseg++;
+ }
+skip_dma:
+ ds_cnt = (dseg - ((struct mlx5_wqe_data_seg *)&wqe->ctrl));
+
+ wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ if (mlx5e_do_send_cqe(sq))
+ wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ else
+ wqe->ctrl.fm_ce_se = 0;
+
+ /* Copy data for doorbell */
+ memcpy(sq->doorbell.d32, &wqe->ctrl, sizeof(sq->doorbell.d32));
+
+ /* Store pointer to mbuf */
+ sq->mbuf[pi].mbuf = mb;
+ sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += sq->mbuf[pi].num_wqebbs;
+
+ /* Make sure all mbuf data is written to RAM */
+ if (mb != NULL)
+ bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
+
+ sq->stats.packets++;
+ *mbp = NULL; /* safety clear */
+ return (0);
+
+tx_drop:
+ sq->stats.dropped++;
+ *mbp = NULL;
+ m_freem(mb);
+ return err;
+}
+
+static void
+mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
+{
+ u16 sqcc;
+
+ /*
+ * sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ while (budget > 0) {
+ struct mlx5_cqe64 *cqe;
+ struct mbuf *mb;
+ u16 x;
+ u16 ci;
+
+ cqe = mlx5e_get_cqe(&sq->cq);
+ if (!cqe)
+ break;
+
+ mlx5_cqwq_pop(&sq->cq.wq);
+
+ /* update budget according to the event factor */
+ budget -= sq->cev_factor;
+
+ for (x = 0; x != sq->cev_factor; x++) {
+ ci = sqcc & sq->wq.sz_m1;
+ mb = sq->mbuf[ci].mbuf;
+ sq->mbuf[ci].mbuf = NULL; /* Safety clear */
+
+ if (mb == NULL) {
+ if (sq->mbuf[ci].num_bytes == 0) {
+ /* NOP */
+ sq->stats.nop++;
+ }
+ } else {
+ bus_dmamap_sync(sq->dma_tag, sq->mbuf[ci].dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sq->dma_tag, sq->mbuf[ci].dma_map);
+
+ /* Free transmitted mbuf */
+ m_freem(mb);
+ }
+ sqcc += sq->mbuf[ci].num_wqebbs;
+ }
+ }
+
+ mlx5_cqwq_update_db_record(&sq->cq.wq);
+
+ /* Ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+
+ if (sq->sq_tq != NULL &&
+ atomic_cmpset_int(&sq->queue_state, MLX5E_SQ_FULL, MLX5E_SQ_READY))
+ taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
+}
+
+static int
+mlx5e_xmit_locked(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
+{
+ struct mbuf *next;
+ int err = 0;
+
+ if (likely(mb != NULL)) {
+ /*
+ * If we can't insert mbuf into drbr, try to xmit anyway.
+ * We keep the error we got so we could return that after xmit.
+ */
+ err = drbr_enqueue(ifp, sq->br, mb);
+ }
+
+ /*
+ * Check if the network interface is closed or if the SQ is
+ * being stopped:
+ */
+ if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ sq->stopped != 0))
+ return (err);
+
+ /* Process the queue */
+ while ((next = drbr_peek(ifp, sq->br)) != NULL) {
+ if (mlx5e_sq_xmit(sq, &next) != 0) {
+ if (next == NULL) {
+ drbr_advance(ifp, sq->br);
+ } else {
+ drbr_putback(ifp, sq->br, next);
+ atomic_store_rel_int(&sq->queue_state, MLX5E_SQ_FULL);
+ }
+ break;
+ }
+ drbr_advance(ifp, sq->br);
+ }
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+ /*
+ * Check if we need to start the event timer which flushes the
+ * transmit ring on timeout:
+ */
+ if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
+ sq->cev_factor != 1)) {
+ /* start the timer */
+ mlx5e_sq_cev_timeout(sq);
+ } else {
+ /* don't send NOPs yet */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ }
+ return (err);
+}
+
+static int
+mlx5e_xmit_locked_no_br(struct ifnet *ifp, struct mlx5e_sq *sq, struct mbuf *mb)
+{
+ int err = 0;
+
+ if (unlikely((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
+ sq->stopped != 0)) {
+ m_freem(mb);
+ return (ENETDOWN);
+ }
+
+ /* Do transmit */
+ if (mlx5e_sq_xmit(sq, &mb) != 0) {
+ /* NOTE: m_freem() is NULL safe */
+ m_freem(mb);
+ err = ENOBUFS;
+ }
+
+ /* Check if we need to write the doorbell */
+ if (likely(sq->doorbell.d64 != 0)) {
+ mlx5e_tx_notify_hw(sq, sq->doorbell.d32, 0);
+ sq->doorbell.d64 = 0;
+ }
+
+ /*
+ * Check if we need to start the event timer which flushes the
+ * transmit ring on timeout:
+ */
+ if (unlikely(sq->cev_next_state == MLX5E_CEV_STATE_INITIAL &&
+ sq->cev_factor != 1)) {
+ /* start the timer */
+ mlx5e_sq_cev_timeout(sq);
+ } else {
+ /* don't send NOPs yet */
+ sq->cev_next_state = MLX5E_CEV_STATE_HOLD_NOPS;
+ }
+ return (err);
+}
+
+int
+mlx5e_xmit(struct ifnet *ifp, struct mbuf *mb)
+{
+ struct mlx5e_sq *sq;
+ int ret;
+
+ sq = mlx5e_select_queue(ifp, mb);
+ if (unlikely(sq == NULL)) {
+ /* Invalid send queue */
+ m_freem(mb);
+ return (ENXIO);
+ }
+
+ if (unlikely(sq->br == NULL)) {
+ /* rate limited traffic */
+ mtx_lock(&sq->lock);
+ ret = mlx5e_xmit_locked_no_br(ifp, sq, mb);
+ mtx_unlock(&sq->lock);
+ } else if (mtx_trylock(&sq->lock)) {
+ ret = mlx5e_xmit_locked(ifp, sq, mb);
+ mtx_unlock(&sq->lock);
+ } else {
+ ret = drbr_enqueue(ifp, sq->br, mb);
+ taskqueue_enqueue(sq->sq_tq, &sq->sq_task);
+ }
+
+ return (ret);
+}
+
+void
+mlx5e_tx_cq_comp(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_sq *sq = container_of(mcq, struct mlx5e_sq, cq.mcq);
+
+ mtx_lock(&sq->comp_lock);
+ mlx5e_poll_tx_cq(sq, MLX5E_BUDGET_MAX);
+ mlx5e_cq_arm(&sq->cq, MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
+ mtx_unlock(&sq->comp_lock);
+}
+
+void
+mlx5e_tx_que(void *context, int pending)
+{
+ struct mlx5e_sq *sq = context;
+ struct ifnet *ifp = sq->ifp;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ mtx_lock(&sq->lock);
+ if (!drbr_empty(ifp, sq->br))
+ mlx5e_xmit_locked(ifp, sq, NULL);
+ mtx_unlock(&sq->lock);
+ }
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,54 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c 306245 2016-09-23 08:29:27Z hselasky $
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *
+mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
+
+ if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
+ return (NULL);
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return (cqe);
+}
+
+void
+mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ if_printf(cq->priv->ifp, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/mlx5_en_txrx.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,698 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/tcp_tlro.c 291184 2015-11-23 09:32:32Z hselasky $");
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
+#include <sys/param.h>
+#include <sys/libkern.h>
+#include <sys/mbuf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/endian.h>
+#include <sys/socket.h>
+#include <sys/sockopt.h>
+#include <sys/smp.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/ethernet.h>
+
+#if defined(INET) || defined(INET6)
+#include <netinet/in.h>
+#endif
+
+#ifdef INET
+#include <netinet/ip.h>
+#endif
+
+#ifdef INET6
+#include <netinet/ip6.h>
+#endif
+
+#include <netinet/tcp_var.h>
+
+#include "tcp_tlro.h"
+
+#ifndef M_HASHTYPE_LRO_TCP
+#ifndef KLD_MODULE
+#warning "M_HASHTYPE_LRO_TCP is not defined"
+#endif
+#define M_HASHTYPE_LRO_TCP 254
+#endif
+
+static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, tlro,
+ CTLFLAG_RW, 0, "TCP turbo LRO parameters");
+
+static MALLOC_DEFINE(M_TLRO, "TLRO", "Turbo LRO");
+
+static int tlro_min_rate = 20; /* Hz */
+
+SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, min_rate, CTLFLAG_RWTUN,
+ &tlro_min_rate, 0, "Minimum serving rate in Hz");
+
+static int tlro_max_packet = IP_MAXPACKET;
+
+SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, max_packet, CTLFLAG_RWTUN,
+ &tlro_max_packet, 0, "Maximum packet size in bytes");
+
+typedef struct {
+ uint32_t value;
+} __packed uint32_p_t;
+
+static uint16_t
+tcp_tlro_csum(const uint32_p_t *p, size_t l)
+{
+ const uint32_p_t *pend = p + (l / 4);
+ uint64_t cs;
+
+ for (cs = 0; p != pend; p++)
+ cs += le32toh(p->value);
+ while (cs > 0xffff)
+ cs = (cs >> 16) + (cs & 0xffff);
+ return (cs);
+}
+
+static void *
+tcp_tlro_get_header(const struct mbuf *m, const u_int off,
+ const u_int len)
+{
+ if (m->m_len < (off + len))
+ return (NULL);
+ return (mtod(m, char *) + off);
+}
+
+static uint8_t
+tcp_tlro_info_save_timestamp(struct tlro_mbuf_data *pinfo)
+{
+ struct tcphdr *tcp = pinfo->tcp;
+ uint32_t *ts_ptr;
+
+ if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
+ return (0);
+
+ ts_ptr = (uint32_t *)(tcp + 1);
+ if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
+ return (0);
+
+ /* Save timestamps */
+ pinfo->tcp_ts = ts_ptr[1];
+ pinfo->tcp_ts_reply = ts_ptr[2];
+ return (1);
+}
+
+static void
+tcp_tlro_info_restore_timestamp(struct tlro_mbuf_data *pinfoa,
+ struct tlro_mbuf_data *pinfob)
+{
+ struct tcphdr *tcp = pinfoa->tcp;
+ uint32_t *ts_ptr;
+
+ if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
+ return;
+
+ ts_ptr = (uint32_t *)(tcp + 1);
+ if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
+ return;
+
+ /* Restore timestamps */
+ ts_ptr[1] = pinfob->tcp_ts;
+ ts_ptr[2] = pinfob->tcp_ts_reply;
+}
+
+static void
+tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
+{
+ uint8_t *phdr = (uint8_t *)pinfo->buf;
+ struct ether_header *eh;
+ struct ether_vlan_header *vlan;
+#ifdef INET
+ struct ip *ip;
+#endif
+#ifdef INET6
+ struct ip6_hdr *ip6;
+#endif
+ struct tcphdr *tcp;
+ uint16_t etype;
+ int diff;
+ int off;
+
+ /* Fill in information */
+ pinfo->head = m;
+ pinfo->last_tick = ticks;
+ pinfo->sequence = seq;
+ pinfo->pprev = &m_last(m)->m_next;
+
+ off = sizeof(*eh);
+ if (m->m_len < off)
+ goto error;
+ eh = tcp_tlro_get_header(m, 0, sizeof(*eh));
+ if (eh == NULL)
+ goto error;
+ memcpy(phdr, &eh->ether_dhost, ETHER_ADDR_LEN);
+ phdr += ETHER_ADDR_LEN;
+ memcpy(phdr, &eh->ether_type, sizeof(eh->ether_type));
+ phdr += sizeof(eh->ether_type);
+ etype = ntohs(eh->ether_type);
+
+ if (etype == ETHERTYPE_VLAN) {
+ vlan = tcp_tlro_get_header(m, off, sizeof(*vlan));
+ if (vlan == NULL)
+ goto error;
+ memcpy(phdr, &vlan->evl_tag, sizeof(vlan->evl_tag) +
+ sizeof(vlan->evl_proto));
+ phdr += sizeof(vlan->evl_tag) + sizeof(vlan->evl_proto);
+ etype = ntohs(vlan->evl_proto);
+ off += sizeof(*vlan) - sizeof(*eh);
+ }
+ switch (etype) {
+#ifdef INET
+ case ETHERTYPE_IP:
+ /*
+ * Cannot LRO:
+ * - Non-IP packets
+ * - Fragmented packets
+ * - Packets with IPv4 options
+ * - Non-TCP packets
+ */
+ ip = tcp_tlro_get_header(m, off, sizeof(*ip));
+ if (ip == NULL ||
+ (ip->ip_off & htons(IP_MF | IP_OFFMASK)) != 0 ||
+ (ip->ip_p != IPPROTO_TCP) ||
+ (ip->ip_hl << 2) != sizeof(*ip))
+ goto error;
+
+ /* Legacy IP has a header checksum that needs to be correct */
+ if (!(m->m_pkthdr.csum_flags & CSUM_IP_CHECKED)) {
+ /* Verify IP header */
+ if (tcp_tlro_csum((uint32_p_t *)ip, sizeof(*ip)) != 0xFFFF)
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
+ else
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
+ CSUM_IP_VALID;
+ }
+ /* Only accept valid checksums */
+ if (!(m->m_pkthdr.csum_flags & CSUM_IP_VALID) ||
+ !(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
+ goto error;
+ memcpy(phdr, &ip->ip_src, sizeof(ip->ip_src) +
+ sizeof(ip->ip_dst));
+ phdr += sizeof(ip->ip_src) + sizeof(ip->ip_dst);
+ if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
+ pinfo->ip_len = m->m_pkthdr.len - off;
+ else
+ pinfo->ip_len = ntohs(ip->ip_len);
+ pinfo->ip_hdrlen = sizeof(*ip);
+ pinfo->ip.v4 = ip;
+ pinfo->ip_version = 4;
+ off += sizeof(*ip);
+ break;
+#endif
+#ifdef INET6
+ case ETHERTYPE_IPV6:
+ /*
+ * Cannot LRO:
+ * - Non-IP packets
+ * - Packets with IPv6 options
+ * - Non-TCP packets
+ */
+ ip6 = tcp_tlro_get_header(m, off, sizeof(*ip6));
+ if (ip6 == NULL || ip6->ip6_nxt != IPPROTO_TCP)
+ goto error;
+ if (!(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
+ goto error;
+ memcpy(phdr, &ip6->ip6_src, sizeof(struct in6_addr) +
+ sizeof(struct in6_addr));
+ phdr += sizeof(struct in6_addr) + sizeof(struct in6_addr);
+ if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
+ pinfo->ip_len = m->m_pkthdr.len - off;
+ else
+ pinfo->ip_len = ntohs(ip6->ip6_plen) + sizeof(*ip6);
+ pinfo->ip_hdrlen = sizeof(*ip6);
+ pinfo->ip.v6 = ip6;
+ pinfo->ip_version = 6;
+ off += sizeof(*ip6);
+ break;
+#endif
+ default:
+ goto error;
+ }
+ tcp = tcp_tlro_get_header(m, off, sizeof(*tcp));
+ if (tcp == NULL)
+ goto error;
+ memcpy(phdr, &tcp->th_sport, sizeof(tcp->th_sport) +
+ sizeof(tcp->th_dport));
+ phdr += sizeof(tcp->th_sport) +
+ sizeof(tcp->th_dport);
+ /* Store TCP header length */
+ *phdr++ = tcp->th_off;
+ if (tcp->th_off < (sizeof(*tcp) >> 2))
+ goto error;
+
+ /* Compute offset to data payload */
+ pinfo->tcp_len = (tcp->th_off << 2);
+ off += pinfo->tcp_len;
+
+ /* Store more info */
+ pinfo->data_off = off;
+ pinfo->tcp = tcp;
+
+ /* Try to save timestamp, if any */
+ *phdr++ = tcp_tlro_info_save_timestamp(pinfo);
+
+ /* Verify offset and IP/TCP length */
+ if (off > m->m_pkthdr.len ||
+ pinfo->ip_len < pinfo->tcp_len)
+ goto error;
+
+ /* Compute data payload length */
+ pinfo->data_len = (pinfo->ip_len - pinfo->tcp_len - pinfo->ip_hdrlen);
+
+ /* Trim any padded data */
+ diff = (m->m_pkthdr.len - off) - pinfo->data_len;
+ if (diff != 0) {
+ if (diff < 0)
+ goto error;
+ else
+ m_adj(m, -diff);
+ }
+ /* Compute header length */
+ pinfo->buf_length = phdr - (uint8_t *)pinfo->buf;
+ /* Zero-pad rest of buffer */
+ memset(phdr, 0, TLRO_MAX_HEADER - pinfo->buf_length);
+ return;
+error:
+ pinfo->buf_length = 0;
+}
+
+static int
+tcp_tlro_cmp64(const uint64_t *pa, const uint64_t *pb)
+{
+ int64_t diff = 0;
+ unsigned x;
+
+ for (x = 0; x != TLRO_MAX_HEADER / 8; x++) {
+ /*
+ * NOTE: Endianness does not matter in this
+ * comparisation:
+ */
+ diff = pa[x] - pb[x];
+ if (diff != 0)
+ goto done;
+ }
+done:
+ if (diff < 0)
+ return (-1);
+ else if (diff > 0)
+ return (1);
+ return (0);
+}
+
+static int
+tcp_tlro_compare_header(const void *_ppa, const void *_ppb)
+{
+ const struct tlro_mbuf_ptr *ppa = _ppa;
+ const struct tlro_mbuf_ptr *ppb = _ppb;
+ struct tlro_mbuf_data *pinfoa = ppa->data;
+ struct tlro_mbuf_data *pinfob = ppb->data;
+ int ret;
+
+ ret = (pinfoa->head == NULL) - (pinfob->head == NULL);
+ if (ret != 0)
+ goto done;
+
+ ret = pinfoa->buf_length - pinfob->buf_length;
+ if (ret != 0)
+ goto done;
+ if (pinfoa->buf_length != 0) {
+ ret = tcp_tlro_cmp64(pinfoa->buf, pinfob->buf);
+ if (ret != 0)
+ goto done;
+ ret = ntohl(pinfoa->tcp->th_seq) - ntohl(pinfob->tcp->th_seq);
+ if (ret != 0)
+ goto done;
+ ret = ntohl(pinfoa->tcp->th_ack) - ntohl(pinfob->tcp->th_ack);
+ if (ret != 0)
+ goto done;
+ ret = pinfoa->sequence - pinfob->sequence;
+ if (ret != 0)
+ goto done;
+ }
+done:
+ return (ret);
+}
+
+static void
+tcp_tlro_sort(struct tlro_ctrl *tlro)
+{
+ if (tlro->curr == 0)
+ return;
+
+ qsort(tlro->mbuf, tlro->curr, sizeof(struct tlro_mbuf_ptr),
+ &tcp_tlro_compare_header);
+}
+
+static int
+tcp_tlro_get_ticks(void)
+{
+ int to = tlro_min_rate;
+
+ if (to < 1)
+ to = 1;
+ to = hz / to;
+ if (to < 1)
+ to = 1;
+ return (to);
+}
+
+static void
+tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
+{
+ struct tlro_mbuf_data *pinfoa;
+ struct tlro_mbuf_data *pinfob;
+ uint32_t cs;
+ int curr_ticks = ticks;
+ int ticks_limit = tcp_tlro_get_ticks();
+ unsigned x;
+ unsigned y;
+ unsigned z;
+ int temp;
+
+ if (tlro->curr == 0)
+ return;
+
+ for (y = 0; y != tlro->curr;) {
+ struct mbuf *m;
+
+ pinfoa = tlro->mbuf[y].data;
+ for (x = y + 1; x != tlro->curr; x++) {
+ pinfob = tlro->mbuf[x].data;
+ if (pinfoa->buf_length != pinfob->buf_length ||
+ tcp_tlro_cmp64(pinfoa->buf, pinfob->buf) != 0)
+ break;
+ }
+ if (pinfoa->buf_length == 0) {
+ /* Forward traffic which cannot be combined */
+ for (z = y; z != x; z++) {
+ /* Just forward packets */
+ pinfob = tlro->mbuf[z].data;
+
+ m = pinfob->head;
+
+ /* Reset info structure */
+ pinfob->head = NULL;
+ pinfob->buf_length = 0;
+
+ /* Do stats */
+ tlro->lro_flushed++;
+
+ /* Input packet to network layer */
+ (*tlro->ifp->if_input) (tlro->ifp, m);
+ }
+ y = z;
+ continue;
+ }
+
+ /* Compute current checksum subtracted some header parts */
+ temp = (pinfoa->ip_len - pinfoa->ip_hdrlen);
+ cs = ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
+ tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len);
+
+ /* Append all fragments into one block */
+ for (z = y + 1; z != x; z++) {
+
+ pinfob = tlro->mbuf[z].data;
+
+ /* Check for command packets */
+ if ((pinfoa->tcp->th_flags & ~(TH_ACK | TH_PUSH)) ||
+ (pinfob->tcp->th_flags & ~(TH_ACK | TH_PUSH)))
+ break;
+
+ /* Check if there is enough space */
+ if ((pinfoa->ip_len + pinfob->data_len) > tlro_max_packet)
+ break;
+
+ /* Try to append the new segment */
+ temp = ntohl(pinfoa->tcp->th_seq) + pinfoa->data_len;
+ if (temp != (int)ntohl(pinfob->tcp->th_seq))
+ break;
+
+ temp = pinfob->ip_len - pinfob->ip_hdrlen;
+ cs += ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
+ tcp_tlro_csum((uint32_p_t *)pinfob->tcp, pinfob->tcp_len);
+ /* Remove fields which appear twice */
+ cs += (IPPROTO_TCP << 8);
+ if (pinfob->ip_version == 4) {
+ cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_src, 4);
+ cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_dst, 4);
+ } else {
+ cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_src, 16);
+ cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_dst, 16);
+ }
+ /* Remainder computation */
+ while (cs > 0xffff)
+ cs = (cs >> 16) + (cs & 0xffff);
+
+ /* Update window and ack sequence number */
+ pinfoa->tcp->th_ack = pinfob->tcp->th_ack;
+ pinfoa->tcp->th_win = pinfob->tcp->th_win;
+
+ /* Check if we should restore the timestamp */
+ tcp_tlro_info_restore_timestamp(pinfoa, pinfob);
+
+ /* Accumulate TCP flags */
+ pinfoa->tcp->th_flags |= pinfob->tcp->th_flags;
+
+ /* update lengths */
+ pinfoa->ip_len += pinfob->data_len;
+ pinfoa->data_len += pinfob->data_len;
+
+ /* Clear mbuf pointer - packet is accumulated */
+ m = pinfob->head;
+
+ /* Reset info structure */
+ pinfob->head = NULL;
+ pinfob->buf_length = 0;
+
+ /* Append data to mbuf [y] */
+ m_adj(m, pinfob->data_off);
+ /* Delete mbuf tags, if any */
+ m_tag_delete_chain(m, NULL);
+ /* Clear packet header flag */
+ m->m_flags &= ~M_PKTHDR;
+
+ /* Concat mbuf(s) to end of list */
+ pinfoa->pprev[0] = m;
+ m = m_last(m);
+ pinfoa->pprev = &m->m_next;
+ pinfoa->head->m_pkthdr.len += pinfob->data_len;
+ }
+ /* Compute new TCP header checksum */
+ pinfoa->tcp->th_sum = 0;
+
+ temp = pinfoa->ip_len - pinfoa->ip_hdrlen;
+ cs = (cs ^ 0xFFFF) +
+ tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len) +
+ ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8);
+
+ /* Remainder computation */
+ while (cs > 0xffff)
+ cs = (cs >> 16) + (cs & 0xffff);
+
+ /* Update new checksum */
+ pinfoa->tcp->th_sum = ~htole16(cs);
+
+ /* Update IP length, if any */
+ if (pinfoa->ip_version == 4) {
+ if (pinfoa->ip_len > IP_MAXPACKET) {
+ M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
+ pinfoa->ip.v4->ip_len = htons(IP_MAXPACKET);
+ } else {
+ pinfoa->ip.v4->ip_len = htons(pinfoa->ip_len);
+ }
+ } else {
+ if (pinfoa->ip_len > (IP_MAXPACKET + sizeof(*pinfoa->ip.v6))) {
+ M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
+ pinfoa->ip.v6->ip6_plen = htons(IP_MAXPACKET);
+ } else {
+ temp = pinfoa->ip_len - sizeof(*pinfoa->ip.v6);
+ pinfoa->ip.v6->ip6_plen = htons(temp);
+ }
+ }
+
+ temp = curr_ticks - pinfoa->last_tick;
+ /* Check if packet should be forwarded */
+ if (force != 0 || z != x || temp >= ticks_limit ||
+ pinfoa->data_len == 0) {
+
+ /* Compute new IPv4 header checksum */
+ if (pinfoa->ip_version == 4) {
+ pinfoa->ip.v4->ip_sum = 0;
+ cs = tcp_tlro_csum((uint32_p_t *)pinfoa->ip.v4,
+ sizeof(*pinfoa->ip.v4));
+ pinfoa->ip.v4->ip_sum = ~htole16(cs);
+ }
+ /* Forward packet */
+ m = pinfoa->head;
+
+ /* Reset info structure */
+ pinfoa->head = NULL;
+ pinfoa->buf_length = 0;
+
+ /* Do stats */
+ tlro->lro_flushed++;
+
+ /* Input packet to network layer */
+ (*tlro->ifp->if_input) (tlro->ifp, m);
+ }
+ y = z;
+ }
+
+ /* Cleanup all NULL heads */
+ for (y = 0; y != tlro->curr; y++) {
+ if (tlro->mbuf[y].data->head == NULL) {
+ for (z = y + 1; z != tlro->curr; z++) {
+ struct tlro_mbuf_ptr ptemp;
+ if (tlro->mbuf[z].data->head == NULL)
+ continue;
+ ptemp = tlro->mbuf[y];
+ tlro->mbuf[y] = tlro->mbuf[z];
+ tlro->mbuf[z] = ptemp;
+ y++;
+ }
+ break;
+ }
+ }
+ tlro->curr = y;
+}
+
+static void
+tcp_tlro_cleanup(struct tlro_ctrl *tlro)
+{
+ while (tlro->curr != 0 &&
+ tlro->mbuf[tlro->curr - 1].data->head == NULL)
+ tlro->curr--;
+}
+
+void
+tcp_tlro_flush(struct tlro_ctrl *tlro, int force)
+{
+ if (tlro->curr == 0)
+ return;
+
+ tcp_tlro_sort(tlro);
+ tcp_tlro_cleanup(tlro);
+ tcp_tlro_combine(tlro, force);
+}
+
+int
+tcp_tlro_init(struct tlro_ctrl *tlro, struct ifnet *ifp,
+ int max_mbufs)
+{
+ ssize_t size;
+ uint32_t x;
+
+ /* Set zero defaults */
+ memset(tlro, 0, sizeof(*tlro));
+
+ /* Compute size needed for data */
+ size = (sizeof(struct tlro_mbuf_ptr) * max_mbufs) +
+ (sizeof(struct tlro_mbuf_data) * max_mbufs);
+
+ /* Range check */
+ if (max_mbufs <= 0 || size <= 0 || ifp == NULL)
+ return (EINVAL);
+
+ /* Setup tlro control structure */
+ tlro->mbuf = malloc(size, M_TLRO, M_WAITOK | M_ZERO);
+ tlro->max = max_mbufs;
+ tlro->ifp = ifp;
+
+ /* Setup pointer array */
+ for (x = 0; x != tlro->max; x++) {
+ tlro->mbuf[x].data = ((struct tlro_mbuf_data *)
+ &tlro->mbuf[max_mbufs]) + x;
+ }
+ return (0);
+}
+
+void
+tcp_tlro_free(struct tlro_ctrl *tlro)
+{
+ struct tlro_mbuf_data *pinfo;
+ struct mbuf *m;
+ uint32_t y;
+
+ /* Check if not setup */
+ if (tlro->mbuf == NULL)
+ return;
+ /* Free MBUF array and any leftover MBUFs */
+ for (y = 0; y != tlro->max; y++) {
+
+ pinfo = tlro->mbuf[y].data;
+
+ m = pinfo->head;
+
+ /* Reset info structure */
+ pinfo->head = NULL;
+ pinfo->buf_length = 0;
+
+ m_freem(m);
+ }
+ free(tlro->mbuf, M_TLRO);
+ /* Reset buffer */
+ memset(tlro, 0, sizeof(*tlro));
+}
+
+void
+tcp_tlro_rx(struct tlro_ctrl *tlro, struct mbuf *m)
+{
+ if (m->m_len > 0 && tlro->curr < tlro->max) {
+ /* do stats */
+ tlro->lro_queued++;
+
+ /* extract header */
+ tcp_tlro_extract_header(tlro->mbuf[tlro->curr++].data,
+ m, tlro->sequence++);
+ } else if (tlro->ifp != NULL) {
+ /* do stats */
+ tlro->lro_flushed++;
+
+ /* input packet to network layer */
+ (*tlro->ifp->if_input) (tlro->ifp, m);
+ } else {
+ /* packet drop */
+ m_freem(m);
+ }
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,84 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_en/tcp_tlro.h 291184 2015-11-23 09:32:32Z hselasky $
+ */
+
+#ifndef _TCP_TLRO_H_
+#define _TCP_TLRO_H_
+
+#define TLRO_MAX_HEADER 64 /* bytes */
+
+struct ip;
+struct ip6_hdr;
+struct tcphdr;
+
+struct tlro_mbuf_data {
+ union {
+#ifdef INET
+ struct ip *v4;
+#endif
+#ifdef INET6
+ struct ip6_hdr *v6;
+#endif
+ } ip;
+ struct tcphdr *tcp;
+ struct mbuf *head;
+ struct mbuf **pprev;
+ int last_tick;
+ int sequence;
+ int data_len;
+ int data_off;
+ int ip_hdrlen;
+ int ip_len;
+ uint32_t tcp_ts;
+ uint32_t tcp_ts_reply;
+ uint16_t tcp_len;
+ uint8_t ip_version;
+ uint8_t buf_length; /* in 32-bit words */
+ uint64_t buf[TLRO_MAX_HEADER / 8];
+} __aligned(256);
+
+struct tlro_mbuf_ptr {
+ struct tlro_mbuf_data *data;
+};
+
+/* NB: This is part of driver structs */
+struct tlro_ctrl {
+ struct ifnet *ifp;
+ struct tlro_mbuf_ptr *mbuf;
+ uint64_t lro_queued;
+ uint64_t lro_flushed;
+ uint32_t max;
+ uint32_t curr;
+ int sequence;
+};
+
+int tcp_tlro_init(struct tlro_ctrl *, struct ifnet *, int);
+void tcp_tlro_free(struct tlro_ctrl *);
+void tcp_tlro_flush(struct tlro_ctrl *, int);
+void tcp_tlro_rx(struct tlro_ctrl *, struct mbuf *);
+
+#endif /* _TCP_TLRO_H_ */
Property changes on: trunk/sys/dev/mlx5/mlx5_en/tcp_tlro.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,792 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib.h 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#ifndef MLX5_IB_H
+#define MLX5_IB_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_addr.h>
+#include <dev/mlx5/device.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/cq.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/srq.h>
+#include <linux/types.h>
+#include <dev/mlx5/mlx5_core/transobj.h>
+
+#define mlx5_ib_dbg(dev, format, arg...) \
+pr_debug("mlx5_dbg:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
+ __LINE__, curthread->td_proc->p_pid, ##arg)
+
+#define mlx5_ib_err(dev, format, arg...) \
+printf("mlx5_ib: ERR: ""mlx5_err:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
+ __LINE__, curthread->td_proc->p_pid, ##arg)
+
+#define mlx5_ib_warn(dev, format, arg...) \
+printf("mlx5_ib: WARN: ""mlx5_warn:%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
+ __LINE__, curthread->td_proc->p_pid, ##arg)
+#define BF_ENABLE 0
+
+extern struct workqueue_struct *mlx5_ib_wq;
+
+enum {
+ MLX5_IB_MMAP_CMD_SHIFT = 8,
+ MLX5_IB_MMAP_CMD_MASK = 0xff,
+};
+
+enum mlx5_ib_mmap_cmd {
+ MLX5_IB_MMAP_REGULAR_PAGE = 0,
+ MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
+ MLX5_IB_MMAP_MAP_DC_INFO_PAGE = 4,
+
+ /* Use EXP mmap commands until it is pushed to upstream */
+ MLX5_IB_EXP_MMAP_CORE_CLOCK = 0xFB,
+ MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_CPU_NUMA = 0xFC,
+ MLX5_IB_EXP_MMAP_GET_CONTIGUOUS_PAGES_DEV_NUMA = 0xFD,
+ MLX5_IB_EXP_ALLOC_N_MMAP_WC = 0xFE,
+};
+
+enum {
+ MLX5_RES_SCAT_DATA32_CQE = 0x1,
+ MLX5_RES_SCAT_DATA64_CQE = 0x2,
+ MLX5_REQ_SCAT_DATA32_CQE = 0x11,
+ MLX5_REQ_SCAT_DATA64_CQE = 0x22,
+};
+
+enum {
+ MLX5_DCT_CS_RES_64 = 2,
+ MLX5_CNAK_RX_POLL_CQ_QUOTA = 256,
+};
+
+enum mlx5_ib_latency_class {
+ MLX5_IB_LATENCY_CLASS_LOW,
+ MLX5_IB_LATENCY_CLASS_MEDIUM,
+ MLX5_IB_LATENCY_CLASS_HIGH,
+ MLX5_IB_LATENCY_CLASS_FAST_PATH
+};
+
+enum mlx5_ib_mad_ifc_flags {
+ MLX5_MAD_IFC_IGNORE_MKEY = 1,
+ MLX5_MAD_IFC_IGNORE_BKEY = 2,
+ MLX5_MAD_IFC_NET_VIEW = 4,
+};
+
+enum {
+ MLX5_CROSS_CHANNEL_UUAR = 0,
+};
+
+enum {
+ MLX5_IB_MAX_CTX_DYNAMIC_UARS = 256,
+ MLX5_IB_INVALID_UAR_INDEX = -1U
+};
+
+enum {
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES = 13,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES = 6,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES = 16,
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES = 9,
+};
+
+struct mlx5_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ struct list_head db_page_list;
+
+ /* protect doorbell record alloc/free
+ */
+ struct mutex db_page_mutex;
+ struct mlx5_uuar_info uuari;
+ u32 dynamic_wc_uar_index[MLX5_IB_MAX_CTX_DYNAMIC_UARS];
+ /* Transport Domain number */
+ u32 tdn;
+};
+
+static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
+}
+
+struct mlx5_ib_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ u32 pa_lkey;
+};
+
+struct wr_list {
+ u16 opcode;
+ u16 next;
+};
+
+struct mlx5_swr_ctx {
+ u64 wrid;
+ u32 wr_data;
+ struct wr_list w_list;
+ u32 wqe_head;
+ u8 sig_piped;
+ u8 rsvd[11];
+};
+
+struct mlx5_rwr_ctx {
+ u64 wrid;
+};
+
+struct mlx5_ib_wq {
+ union {
+ struct mlx5_swr_ctx *swr_ctx;
+ struct mlx5_rwr_ctx *rwr_ctx;
+ };
+ u16 unsig_count;
+
+ /* serialize post to the work queue
+ */
+ spinlock_t lock;
+ int wqe_cnt;
+ int max_post;
+ int max_gs;
+ int offset;
+ int wqe_shift;
+ unsigned head;
+ unsigned tail;
+ u16 cur_post;
+ u16 last_poll;
+ void *qend;
+};
+
+enum {
+ MLX5_QP_USER,
+ MLX5_QP_KERNEL,
+ MLX5_QP_EMPTY
+};
+
+enum {
+ MLX5_WQ_USER,
+ MLX5_WQ_KERNEL
+};
+
+struct mlx5_ib_sqd {
+ struct mlx5_ib_qp *qp;
+ struct work_struct work;
+};
+
+struct mlx5_ib_mc_flows_list {
+ struct list_head flows_list;
+ /*Protect the flows_list*/
+ struct mutex lock;
+};
+
+struct mlx5_ib_qp {
+ struct ib_qp ibqp;
+ struct mlx5_core_qp mqp;
+ struct mlx5_core_qp mrq;
+ struct mlx5_core_qp msq;
+ u32 tisn;
+ u32 tirn;
+ struct mlx5_buf buf;
+
+ struct mlx5_db db;
+ struct mlx5_ib_wq rq;
+
+ u32 doorbell_qpn;
+ u8 sq_signal_bits;
+ u8 fm_cache;
+ int sq_max_wqes_per_wr;
+ int sq_spare_wqes;
+ struct mlx5_ib_wq sq;
+
+ struct ib_umem *umem;
+ int buf_size;
+ /* Raw Ethernet QP's SQ is allocated seperately
+ * from the RQ's buffer in user-space.
+ */
+ struct ib_umem *sq_umem;
+ int sq_buf_size;
+ u64 sq_buf_addr;
+ int allow_mp_wqe;
+
+ /* serialize qp state modifications
+ */
+ struct mutex mutex;
+ u16 xrcdn;
+ u32 flags;
+ u8 port;
+ u8 alt_port;
+ u8 atomic_rd_en;
+ u8 resp_depth;
+ u8 state;
+ /* Raw Ethernet QP's SQ and RQ states */
+ u8 rq_state;
+ u8 sq_state;
+ int mlx_type;
+ int wq_sig;
+ int scat_cqe;
+ int max_inline_data;
+ struct mlx5_bf *bf;
+ int has_rq;
+
+ /* only for user space QPs. For kernel
+ * we have it from the bf object
+ */
+ int uuarn;
+
+ int create_type;
+ u32 pa_lkey;
+
+ /* Store signature errors */
+ bool signature_en;
+
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
+
+ struct mlx5_ib_mc_flows_list mc_flows_list;
+};
+
+struct mlx5_ib_cq_buf {
+ struct mlx5_buf buf;
+ struct ib_umem *umem;
+ int cqe_size;
+ int nent;
+};
+
+enum mlx5_ib_qp_flags {
+ MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0,
+ MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1,
+ MLX5_IB_QP_CAP_RX_END_PADDING = 1 << 5,
+};
+
+struct mlx5_umr_wr {
+ union {
+ u64 virt_addr;
+ u64 offset;
+ } target;
+ struct ib_pd *pd;
+ unsigned int page_shift;
+ unsigned int npages;
+ u64 length;
+ int access_flags;
+ u32 mkey;
+};
+
+struct mlx5_shared_mr_info {
+ int mr_id;
+ struct ib_umem *umem;
+};
+
+struct mlx5_ib_cq {
+ struct ib_cq ibcq;
+ struct mlx5_core_cq mcq;
+ struct mlx5_ib_cq_buf buf;
+ struct mlx5_db db;
+
+ /* serialize access to the CQ
+ */
+ spinlock_t lock;
+
+ /* protect resize cq
+ */
+ struct mutex resize_mutex;
+ struct mlx5_ib_cq_buf *resize_buf;
+ struct ib_umem *resize_umem;
+ int cqe_size;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
+};
+
+struct mlx5_ib_srq {
+ struct ib_srq ibsrq;
+ struct mlx5_core_srq msrq;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+ u64 *wrid;
+ /* protect SRQ hanlding
+ */
+ spinlock_t lock;
+ int head;
+ int tail;
+ u16 wqe_ctr;
+ struct ib_umem *umem;
+ /* serialize arming a SRQ
+ */
+ struct mutex mutex;
+ int wq_sig;
+};
+
+struct mlx5_ib_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+};
+
+enum mlx5_ib_mtt_access_flags {
+ MLX5_IB_MTT_READ = (1 << 0),
+ MLX5_IB_MTT_WRITE = (1 << 1),
+};
+
+#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+
+struct mlx5_ib_mr {
+ struct ib_mr ibmr;
+ struct mlx5_core_mr mmr;
+ struct ib_umem *umem;
+ struct mlx5_shared_mr_info *smr_info;
+ struct list_head list;
+ int order;
+ int umred;
+ dma_addr_t dma;
+ int npages;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_create_mkey_mbox_out out;
+ struct mlx5_core_sig_ctx *sig;
+ u32 max_reg_descriptors;
+ u64 size;
+ u64 page_count;
+ struct mlx5_ib_mr **children;
+ int nchild;
+};
+
+struct mlx5_ib_fast_reg_page_list {
+ struct ib_fast_reg_page_list ibfrpl;
+ __be64 *mapped_page_list;
+ dma_addr_t map;
+};
+
+struct mlx5_ib_umr_context {
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
+{
+ context->status = -1;
+ init_completion(&context->done);
+}
+
+struct umr_common {
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+};
+
+enum {
+ MLX5_FMR_INVALID,
+ MLX5_FMR_VALID,
+ MLX5_FMR_BUSY,
+};
+
+struct mlx5_ib_fmr {
+ struct ib_fmr ibfmr;
+ struct mlx5_core_mr mr;
+ int access_flags;
+ int state;
+ /* protect fmr state
+ */
+ spinlock_t lock;
+ u64 wrid;
+ struct ib_send_wr wr[2];
+ u8 page_shift;
+ struct ib_fast_reg_page_list page_list;
+};
+
+struct cache_order {
+ struct kobject kobj;
+ int order;
+ int index;
+ struct mlx5_ib_dev *dev;
+};
+
+struct mlx5_cache_ent {
+ struct list_head head;
+ /* sync access to the cahce entry
+ */
+ spinlock_t lock;
+
+
+ u32 order;
+ u32 size;
+ u32 cur;
+ u32 miss;
+ u32 limit;
+
+ struct mlx5_ib_dev *dev;
+ struct work_struct work;
+ struct delayed_work dwork;
+ int pending;
+ struct cache_order co;
+};
+
+struct mlx5_mr_cache {
+ struct workqueue_struct *wq;
+ struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
+ int stopped;
+ struct dentry *root;
+ int last_add;
+ int rel_timeout;
+ int rel_imm;
+};
+
+struct mlx5_ib_resources {
+ struct ib_cq *c0;
+ struct ib_xrcd *x0;
+ struct ib_xrcd *x1;
+ struct ib_pd *p0;
+ struct ib_srq *s0;
+ struct ib_srq *s1;
+};
+
+struct mlx5_dc_tracer {
+ struct page *pg;
+ dma_addr_t dma;
+ int size;
+ int order;
+};
+
+struct mlx5_dc_desc {
+ dma_addr_t dma;
+ void *buf;
+};
+
+enum mlx5_op {
+ MLX5_WR_OP_MLX = 1,
+};
+
+struct mlx5_mlx_wr {
+ u8 sl;
+ u16 dlid;
+ int icrc;
+};
+
+struct mlx5_send_wr {
+ struct ib_send_wr wr;
+ union {
+ struct mlx5_mlx_wr mlx;
+ } sel;
+};
+
+struct mlx5_dc_data {
+ struct ib_mr *mr;
+ struct ib_qp *dcqp;
+ struct ib_cq *rcq;
+ struct ib_cq *scq;
+ unsigned int rx_npages;
+ unsigned int tx_npages;
+ struct mlx5_dc_desc *rxdesc;
+ struct mlx5_dc_desc *txdesc;
+ unsigned int max_wqes;
+ unsigned int cur_send;
+ unsigned int last_send_completed;
+ int tx_pending;
+ struct mlx5_ib_dev *dev;
+ int port;
+ int initialized;
+ struct kobject kobj;
+ unsigned long connects;
+ unsigned long cnaks;
+ unsigned long discards;
+ struct ib_wc wc_tbl[MLX5_CNAK_RX_POLL_CQ_QUOTA];
+};
+
+struct mlx5_ib_port_sysfs_group {
+ struct kobject kobj;
+ bool enabled;
+ struct attribute_group counters;
+};
+
+#define MLX5_IB_GID_MAX 16
+
+struct mlx5_ib_port {
+ struct mlx5_ib_dev *dev;
+ u8 port_num; /* 0 based */
+ u8 port_gone; /* set when gone */
+ u16 q_cnt_id;
+ struct mlx5_ib_port_sysfs_group group;
+ union ib_gid gid_table[MLX5_IB_GID_MAX];
+};
+
+struct mlx5_ib_dev {
+ struct ib_device ib_dev;
+ struct mlx5_core_dev *mdev;
+ MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
+ int num_ports;
+ /* serialize update of capability mask
+ */
+ struct mutex cap_mask_mutex;
+ bool ib_active;
+ struct umr_common umrc;
+ /* sync used page count stats
+ */
+ struct mlx5_ib_resources devr;
+ struct mutex slow_path_mutex;
+ int enable_atomic_resp;
+ enum ib_atomic_cap atomic_cap;
+ struct mlx5_mr_cache cache;
+ struct kobject mr_cache;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
+ struct timer_list delay_timer;
+ int fill_delay;
+ struct mlx5_dc_tracer dctr;
+ struct mlx5_dc_data dcd[MLX5_MAX_PORTS];
+ struct kobject *dc_kobj;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
+ struct kobject *ports_parent;
+};
+
+static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+{
+ return container_of(mcq, struct mlx5_ib_cq, mcq);
+}
+
+static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
+}
+
+static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+}
+
+static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
+{
+ return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
+}
+
+static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct mlx5_ib_cq, ibcq);
+}
+
+static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
+{
+ return container_of(mqp, struct mlx5_ib_qp, mqp);
+}
+
+static inline struct mlx5_ib_qp *sq_to_mibqp(struct mlx5_core_qp *msq)
+{
+ return container_of(msq, struct mlx5_ib_qp, msq);
+}
+
+static inline struct mlx5_ib_qp *rq_to_mibqp(struct mlx5_core_qp *mrq)
+{
+ return container_of(mrq, struct mlx5_ib_qp, mrq);
+}
+
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+{
+ return container_of(mmr, struct mlx5_ib_mr, mmr);
+}
+
+static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct mlx5_ib_pd, ibpd);
+}
+
+static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
+}
+
+static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct mlx5_ib_qp, ibqp);
+}
+
+static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
+{
+ return container_of(msrq, struct mlx5_ib_srq, msrq);
+}
+
+static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct mlx5_ib_mr, ibmr);
+}
+
+static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
+{
+ return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
+}
+
+struct mlx5_ib_ah {
+ struct ib_ah ibah;
+ struct mlx5_av av;
+};
+
+static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct mlx5_ib_ah, ibah);
+}
+
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
+ struct mlx5_db *db);
+void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
+void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
+void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
+void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
+int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ void *in_mad, void *response_mad);
+int mlx5_ib_resolve_grh(const struct ib_ah_attr *ah_attr, u8 *mac, int *is_mcast);
+struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev, struct ib_ah_attr *ah_attr,
+ struct mlx5_ib_ah *ah, enum rdma_link_layer ll);
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
+int mlx5_ib_destroy_ah(struct ib_ah *ah);
+struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
+int mlx5_ib_destroy_srq(struct ib_srq *srq);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
+struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
+ int entries, int vector,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_cq(struct ib_cq *cq);
+int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata, int mr_id);
+struct ib_mr *mlx5_ib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int access_flags,
+ u64 *virt_addr);
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
+int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
+struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
+ int max_page_list_len);
+struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
+ int page_list_len);
+void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
+
+struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
+ struct ib_fmr_attr *fmr_attr);
+int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
+ int npages, u64 iova);
+int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
+int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad);
+struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata);
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
+int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
+int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
+ struct ib_smp *out_mad);
+int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
+ __be64 *sys_image_guid);
+int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
+ u16 *max_pkeys);
+int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
+ u32 *vendor_id);
+int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey);
+int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid);
+int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid);
+int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
+void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+ int *ncont, int *order);
+void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, __be64 *pas, int umr);
+void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
+int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
+int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
+int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
+void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
+int mlx5_query_port_roce(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props);
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port, int index,
+ __be16 ah_udp_s_port);
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
+ int index, int *gid_type);
+struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port);
+int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
+ const union ib_gid *gid, struct net_device *ndev);
+int query_gid_roce(struct ib_device *ib_dev, u8 port, int index,
+ union ib_gid *gid);
+int mlx5_process_mad_mad_ifc(struct ib_device *ibdev, int mad_flags,
+ u8 port_num, struct ib_wc *in_wc,
+ struct ib_grh *in_grh, struct ib_mad *in_mad,
+ struct ib_mad *out_mad);
+
+static inline void init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = 1;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
+
+static inline u8 convert_access(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ;
+}
+
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
+#endif /* MLX5_IB_H */
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,147 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include "mlx5_ib.h"
+
+#define IPV6_DEFAULT_HOPLIMIT 64
+
+int mlx5_ib_resolve_grh(const struct ib_ah_attr *ah_attr, u8 *mac, int *is_mcast)
+{
+ struct in6_addr in6;
+
+ if (is_mcast != NULL)
+ *is_mcast = 0;
+
+ memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6);
+ if (rdma_link_local_addr(&in6)) {
+ rdma_get_ll_mac(&in6, mac);
+ } else if (rdma_is_multicast_addr(&in6)) {
+ rdma_get_mcast_mac(&in6, mac);
+ if (is_mcast != NULL)
+ *is_mcast = 1;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+struct ib_ah *create_ib_ah(struct mlx5_ib_dev *dev,
+ struct ib_ah_attr *ah_attr,
+ struct mlx5_ib_ah *ah, enum rdma_link_layer ll)
+{
+ int err;
+ int gid_type;
+
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ memcpy(ah->av.rgid, &ah_attr->grh.dgid, 16);
+ ah->av.grh_gid_fl = cpu_to_be32(ah_attr->grh.flow_label |
+ (1 << 30) |
+ ah_attr->grh.sgid_index << 20);
+ ah->av.hop_limit = ah_attr->grh.hop_limit;
+ ah->av.tclass = ah_attr->grh.traffic_class;
+ }
+
+ ah->av.stat_rate_sl = (ah_attr->static_rate << 4);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ err = mlx5_get_roce_gid_type(dev, ah_attr->port_num,
+ ah_attr->grh.sgid_index,
+ &gid_type);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_resolve_grh(ah_attr, ah->av.rmac, NULL);
+ ah->av.udp_sport = mlx5_get_roce_udp_sport(
+ dev,
+ ah_attr->port_num,
+ ah_attr->grh.sgid_index,
+ 0);
+ ah->av.stat_rate_sl |= (ah_attr->sl & 0x7) << 1;
+ ah->av.hop_limit = ah_attr->grh.hop_limit;
+ /* TODO: initialize other eth fields */
+ } else {
+ ah->av.rlid = cpu_to_be16(ah_attr->dlid);
+ ah->av.fl_mlid = ah_attr->src_path_bits & 0x7f;
+ ah->av.stat_rate_sl |= (ah_attr->sl & 0xf);
+ }
+
+ return &ah->ibah;
+}
+
+struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
+{
+ struct mlx5_ib_ah *ah;
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ enum rdma_link_layer ll;
+ struct ib_ah *ret = ERR_PTR(-EINVAL);
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ if (!ah)
+ return ERR_PTR(-ENOMEM);
+
+ ll = pd->device->get_link_layer(pd->device, ah_attr->port_num);
+
+ if (ll == IB_LINK_LAYER_ETHERNET && !(ah_attr->ah_flags & IB_AH_GRH))
+ goto err_kfree_ah;
+
+ return create_ib_ah(dev, ah_attr, ah, ll); /* never fails */
+
+err_kfree_ah:
+ kfree(ah);
+ return ret;
+}
+
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+{
+ struct mlx5_ib_ah *ah = to_mah(ibah);
+ u32 tmp;
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+
+ tmp = be32_to_cpu(ah->av.grh_gid_fl);
+ if (tmp & (1 << 30)) {
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.sgid_index = (tmp >> 20) & 0xff;
+ ah_attr->grh.flow_label = tmp & 0xfffff;
+ memcpy(&ah_attr->grh.dgid, ah->av.rgid, 16);
+ ah_attr->grh.hop_limit = ah->av.hop_limit;
+ ah_attr->grh.traffic_class = ah->av.tclass;
+ }
+ ah_attr->dlid = be16_to_cpu(ah->av.rlid);
+ ah_attr->static_rate = ah->av.stat_rate_sl >> 4;
+ ah_attr->sl = ah->av.stat_rate_sl & 0xf;
+
+ return 0;
+}
+
+int mlx5_ib_destroy_ah(struct ib_ah *ah)
+{
+ kfree(to_mah(ah));
+ return 0;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_ah.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1301 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include <linux/compiler.h>
+#include <linux/kref.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include "mlx5_ib.h"
+#include "user.h"
+
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
+{
+ struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
+
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, int type)
+{
+ struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct ib_cq *ibcq = &cq->ibcq;
+ struct ib_event event;
+
+ if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
+ mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
+ type, mcq->cqn);
+ return;
+ }
+
+ if (ibcq->event_handler) {
+ event.device = &dev->ib_dev;
+ event.event = IB_EVENT_CQ_ERR;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+}
+
+static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
+{
+ return mlx5_buf_offset(&buf->buf, n * size);
+}
+
+static void *get_cqe(struct mlx5_ib_cq *cq, int n)
+{
+ return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
+}
+
+static u8 sw_ownership_bit(int n, int nent)
+{
+ return (n & nent) ? 1 : 0;
+}
+
+static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
+{
+ void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
+ return cqe;
+ } else {
+ return NULL;
+ }
+}
+
+static void *next_cqe_sw(struct mlx5_ib_cq *cq)
+{
+ return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
+{
+ switch (wq->swr_ctx[idx].wr_data) {
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+
+ case IB_WR_FAST_REG_MR:
+ return IB_WC_FAST_REG_MR;
+
+ default:
+ printf("mlx5_ib: WARN: ""unknown completion status\n");
+ return 0;
+ }
+}
+
+static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ struct mlx5_ib_wq *wq, int idx)
+{
+ wc->wc_flags = 0;
+ switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ case MLX5_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case MLX5_OPCODE_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ case MLX5_OPCODE_NOP:
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_INVAL:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case MLX5_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+ break;
+ case MLX5_OPCODE_ATOMIC_CS:
+ wc->opcode = IB_WC_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_FA:
+ wc->opcode = IB_WC_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_MASKED_CS:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_MASKED_FA:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_BIND_MW:
+ wc->opcode = IB_WC_BIND_MW;
+ break;
+ case MLX5_OPCODE_UMR:
+ wc->opcode = get_umr_comp(wq, idx);
+ break;
+ }
+}
+
+enum {
+ MLX5_GRH_IN_BUFFER = 1,
+ MLX5_GRH_IN_CQE = 2,
+};
+
+static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ struct mlx5_ib_srq *srq;
+ struct mlx5_ib_wq *wq;
+ u16 wqe_ctr;
+ u8 g;
+#if defined(DX_ROCE_V1_5) || defined(DX_WINDOWS)
+ u8 udp_header_valid;
+#endif
+
+ if (qp->ibqp.srq || qp->ibqp.xrcd) {
+ struct mlx5_core_srq *msrq = NULL;
+
+ if (qp->ibqp.xrcd) {
+ msrq = mlx5_core_get_srq(dev->mdev,
+ be32_to_cpu(cqe->srqn));
+ srq = to_mibsrq(msrq);
+ } else {
+ srq = to_msrq(qp->ibqp.srq);
+ }
+ if (srq) {
+ wqe_ctr = be16_to_cpu(cqe->wqe_counter);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx5_ib_free_srq_wqe(srq, wqe_ctr);
+ if (msrq && atomic_dec_and_test(&msrq->refcount))
+ complete(&msrq->free);
+ }
+ } else {
+ wq = &qp->rq;
+ wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
+ ++wq->tail;
+ }
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+
+ switch (cqe->op_own >> 4) {
+ case MLX5_CQE_RESP_WR_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->imm_inval_pkey;
+ break;
+ case MLX5_CQE_RESP_SEND:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+ break;
+ case MLX5_CQE_RESP_SEND_IMM:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->imm_inval_pkey;
+ break;
+ case MLX5_CQE_RESP_SEND_INV:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ break;
+ }
+ wc->slid = be16_to_cpu(cqe->slid);
+ wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
+ wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
+ wc->dlid_path_bits = cqe->ml_path;
+ g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ wc->wc_flags |= g ? IB_WC_GRH : 0;
+ wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+
+#if defined(DX_ROCE_V1_5) || defined(DX_WINDOWS)
+ udp_header_valid = wc->sl & 0x8;
+ if (udp_header_valid)
+ wc->wc_flags |= IB_WC_WITH_UDP_HDR;
+
+#endif
+}
+
+static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
+{
+ __be32 *p = (__be32 *)cqe;
+ int i;
+
+ mlx5_ib_warn(dev, "dump error cqe\n");
+ for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4)
+ printf("mlx5_ib: INFO: ""%08x %08x %08x %08x\n", be32_to_cpu(p[0]), be32_to_cpu(p[1]), be32_to_cpu(p[2]), be32_to_cpu(p[3]));
+}
+
+static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_err_cqe *cqe,
+ struct ib_wc *wc)
+{
+ int dump = 1;
+
+ switch (cqe->syndrome) {
+ case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
+ dump = 0;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_MW_BIND_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ dump = 0;
+ break;
+ case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ dump = 0;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
+ wc->status = IB_WC_REM_ABORT_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ wc->vendor_err = cqe->vendor_err_synd;
+ if (dump)
+ dump_cqe(dev, cqe);
+}
+
+static int is_atomic_response(struct mlx5_ib_qp *qp, u16 idx)
+{
+ /* TBD: waiting decision
+ */
+ return 0;
+}
+
+static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, u16 idx)
+{
+ struct mlx5_wqe_data_seg *dpseg;
+ void *addr;
+
+ dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_atomic_seg);
+ addr = (void *)(uintptr_t)be64_to_cpu(dpseg->addr);
+ return addr;
+}
+
+static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+ u16 idx)
+{
+ void *addr;
+ int byte_count;
+ int i;
+
+ if (!is_atomic_response(qp, idx))
+ return;
+
+ byte_count = be32_to_cpu(cqe64->byte_cnt);
+ addr = mlx5_get_atomic_laddr(qp, idx);
+
+ if (byte_count == 4) {
+ *(u32 *)addr = be32_to_cpu(*((__be32 *)addr));
+ } else {
+ for (i = 0; i < byte_count; i += 8) {
+ *(u64 *)addr = be64_to_cpu(*((__be64 *)addr));
+ addr += 8;
+ }
+ }
+
+ return;
+}
+
+static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+ u16 tail, u16 head)
+{
+ u16 idx;
+
+ do {
+ idx = tail & (qp->sq.wqe_cnt - 1);
+ handle_atomic(qp, cqe64, idx);
+ if (idx == head)
+ break;
+
+ tail = qp->sq.swr_ctx[idx].w_list.next;
+ } while (1);
+ tail = qp->sq.swr_ctx[idx].w_list.next;
+ qp->sq.last_poll = tail;
+}
+
+static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
+{
+ mlx5_buf_free(dev->mdev, &buf->buf);
+}
+
+static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned cur;
+ unsigned idx;
+ int np;
+ int i;
+
+ wq = &qp->sq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ idx = wq->last_poll & (wq->wqe_cnt - 1);
+ wc->wr_id = wq->swr_ctx[idx].wrid;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ wq->last_poll = wq->swr_ctx[idx].w_list.next;
+ }
+ *npolled = np;
+}
+
+static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned cur;
+ int np;
+ int i;
+
+ wq = &qp->rq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+ *npolled = np;
+}
+
+static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ sw_send_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
+ if (*npolled >= num_entries)
+ return;
+ }
+}
+
+static inline u32 mlx5_ib_base_mkey(const u32 key)
+{
+ return key & 0xffffff00u;
+}
+
+static int mlx5_poll_one(struct mlx5_ib_cq *cq,
+ struct mlx5_ib_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_err_cqe *err_cqe;
+ struct mlx5_cqe64 *cqe64;
+ struct mlx5_core_qp *mqp;
+ struct mlx5_ib_wq *wq;
+ struct mlx5_sig_err_cqe *sig_err_cqe;
+ struct mlx5_core_mr *mmr;
+ struct mlx5_ib_mr *mr;
+ unsigned long flags;
+ u8 opcode;
+ u32 qpn;
+ u16 wqe_ctr;
+ void *cqe;
+ int idx;
+
+repoll:
+ cqe = next_cqe_sw(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ ++cq->mcq.cons_index;
+
+ /* Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+
+ opcode = cqe64->op_own >> 4;
+ if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
+ if (likely(cq->resize_buf)) {
+ free_cq_buf(dev, &cq->buf);
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ goto repoll;
+ } else {
+ mlx5_ib_warn(dev, "unexpected resize cqe\n");
+ }
+ }
+
+ qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
+ if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
+ /* We do not have to take the QP table lock here,
+ * because CQs will be locked while QPs are removed
+ * from the table.
+ */
+ mqp = __mlx5_qp_lookup(dev->mdev, qpn);
+ if (unlikely(!mqp)) {
+ mlx5_ib_warn(dev, "CQE at CQ %06x for unknown QPN %6x\n",
+ cq->mcq.cqn, qpn);
+ return -EINVAL;
+ }
+
+ *cur_qp = to_mibqp(mqp);
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ switch (opcode) {
+ case MLX5_CQE_REQ:
+ wq = &(*cur_qp)->sq;
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ idx = wqe_ctr & (wq->wqe_cnt - 1);
+ handle_good_req(wc, cqe64, wq, idx);
+ handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
+ wc->wr_id = wq->swr_ctx[idx].wrid;
+ wq->tail = wq->swr_ctx[idx].wqe_head + 1;
+ if (unlikely(wq->swr_ctx[idx].w_list.opcode &
+ MLX5_OPCODE_SIGNATURE_CANCELED))
+ wc->status = IB_WC_GENERAL_ERR;
+ else
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case MLX5_CQE_RESP_WR_IMM:
+ case MLX5_CQE_RESP_SEND:
+ case MLX5_CQE_RESP_SEND_IMM:
+ case MLX5_CQE_RESP_SEND_INV:
+ handle_responder(wc, cqe64, *cur_qp);
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case MLX5_CQE_RESIZE_CQ:
+ break;
+ case MLX5_CQE_REQ_ERR:
+ case MLX5_CQE_RESP_ERR:
+ err_cqe = (struct mlx5_err_cqe *)cqe64;
+ mlx5_handle_error_cqe(dev, err_cqe, wc);
+ mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
+ opcode == MLX5_CQE_REQ_ERR ?
+ "Requestor" : "Responder", cq->mcq.cqn);
+ mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ wq = &(*cur_qp)->sq;
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ idx = wqe_ctr & (wq->wqe_cnt - 1);
+ wc->wr_id = wq->swr_ctx[idx].wrid;
+ wq->tail = wq->swr_ctx[idx].wqe_head + 1;
+ } else {
+ struct mlx5_ib_srq *srq;
+
+ if ((*cur_qp)->ibqp.srq) {
+ srq = to_msrq((*cur_qp)->ibqp.srq);
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx5_ib_free_srq_wqe(srq, wqe_ctr);
+ } else {
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->rwr_ctx[wq->tail & (wq->wqe_cnt - 1)].wrid;
+ ++wq->tail;
+ }
+ }
+ break;
+ case MLX5_CQE_SIG_ERR:
+ sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
+
+ spin_lock_irqsave(&dev->mdev->priv.mr_table.lock, flags);
+ mmr = __mlx5_mr_lookup(dev->mdev,
+ mlx5_ib_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ if (unlikely(!mmr)) {
+ spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags);
+ mlx5_ib_warn(dev, "CQE at CQ %06x for unknown MR %6x\n",
+ cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
+ return -EINVAL;
+ }
+
+ mr = to_mibmr(mmr);
+ mr->sig->sig_err_exists = true;
+ mr->sig->sigerr_count++;
+
+ mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR\n", cq->mcq.cqn);
+
+ spin_unlock_irqrestore(&dev->mdev->priv.mr_table.lock, flags);
+ goto repoll;
+ }
+
+ return 0;
+}
+
+int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ struct mlx5_ib_qp *cur_qp = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int npolled;
+ int err = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
+ goto out;
+ }
+
+ for (npolled = 0; npolled < num_entries; npolled++) {
+ err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
+ if (err)
+ break;
+ }
+
+ if (npolled)
+ mlx5_cq_set_ci(&cq->mcq);
+out:
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ if (err == 0 || err == -EAGAIN)
+ return npolled;
+ else
+ return err;
+}
+
+int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+
+
+ mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
+ uar_page,
+ MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
+ to_mcq(ibcq)->mcq.cons_index);
+
+ return 0;
+}
+
+static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
+ int nent, int cqe_size)
+{
+ int err;
+
+ err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
+ PAGE_SIZE * 2, &buf->buf);
+ if (err) {
+ mlx5_ib_err(dev, "alloc failed\n");
+ return err;
+ }
+
+ buf->cqe_size = cqe_size;
+ buf->nent = nent;
+
+ return 0;
+}
+
+static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
+ struct ib_ucontext *context, struct mlx5_ib_cq *cq,
+ int entries, struct mlx5_create_cq_mbox_in **cqb,
+ int *cqe_size, int *index, int *inlen)
+{
+ struct mlx5_exp_ib_create_cq ucmd;
+ size_t ucmdlen;
+ int page_shift;
+ int npages;
+ int ncont;
+ int err;
+
+ memset(&ucmd, 0, sizeof(ucmd));
+
+ ucmdlen =
+ (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
+ sizeof(struct mlx5_ib_create_cq)) ?
+ (sizeof(struct mlx5_ib_create_cq) - sizeof(ucmd.reserved)) :
+ sizeof(struct mlx5_ib_create_cq);
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
+ mlx5_ib_err(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmdlen == sizeof(ucmd) && ucmd.reserved != 0) {
+ mlx5_ib_err(dev, "command corrupted\n");
+ return -EINVAL;
+ }
+
+ if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) {
+ mlx5_ib_warn(dev, "wrong CQE size %d\n", ucmd.cqe_size);
+ return -EINVAL;
+ }
+
+ *cqe_size = ucmd.cqe_size;
+
+ cq->buf.umem = ib_umem_get(context, ucmd.buf_addr,
+ entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(cq->buf.umem)) {
+ err = PTR_ERR(cq->buf.umem);
+ return err;
+ }
+
+ err = mlx5_ib_db_map_user(to_mucontext(context), ucmd.db_addr,
+ &cq->db);
+ if (err) {
+ mlx5_ib_warn(dev, "map failed\n");
+ goto err_umem;
+ }
+
+ mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
+ (unsigned long long)ucmd.buf_addr, entries * ucmd.cqe_size,
+ npages, page_shift, ncont);
+
+ *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * ncont;
+ *cqb = mlx5_vzalloc(*inlen);
+ if (!*cqb) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, (*cqb)->pas, 0);
+ (*cqb)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+
+ *index = to_mucontext(context)->uuari.uars[0].index;
+
+ if (*cqe_size == 64 && MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+ if (ucmd.exp_data.cqe_comp_en == 1 &&
+ (ucmd.exp_data.comp_mask & MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_EN)) {
+ MLX5_SET(cqc, &(*cqb)->ctx, cqe_compression_en, 1);
+ if (ucmd.exp_data.cqe_comp_recv_type ==
+ MLX5_IB_CQE_FORMAT_CSUM &&
+ (ucmd.exp_data.comp_mask &
+ MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_RECV_TYPE))
+ MLX5_SET(cqc, &(*cqb)->ctx, mini_cqe_res_format,
+ MLX5_IB_CQE_FORMAT_CSUM);
+ }
+ }
+
+ return 0;
+
+err_db:
+ mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+
+err_umem:
+ ib_umem_release(cq->buf.umem);
+ return err;
+}
+
+static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
+{
+
+ mlx5_ib_db_unmap_user(to_mucontext(context), &cq->db);
+ ib_umem_release(cq->buf.umem);
+}
+
+static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
+{
+ int i;
+ void *cqe;
+ struct mlx5_cqe64 *cqe64;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size,
+ struct mlx5_create_cq_mbox_in **cqb,
+ int *index, int *inlen)
+{
+ int err;
+
+ err = mlx5_db_alloc(dev->mdev, &cq->db);
+ if (err)
+ return err;
+
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ cq->mcq.cqe_sz = cqe_size;
+
+ err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
+ if (err)
+ goto err_db;
+
+ init_cq_buf(cq, &cq->buf);
+
+ *inlen = sizeof(**cqb) + sizeof(*(*cqb)->pas) * cq->buf.buf.npages;
+ *cqb = mlx5_vzalloc(*inlen);
+ if (!*cqb) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+ mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
+
+ (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ *index = dev->mdev->priv.uuari.uars[0].index;
+
+ return 0;
+
+err_buf:
+ free_cq_buf(dev, &cq->buf);
+
+err_db:
+ mlx5_db_free(dev->mdev, &cq->db);
+ return err;
+}
+
+static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
+{
+ free_cq_buf(dev, &cq->buf);
+ mlx5_db_free(dev->mdev, &cq->db);
+}
+
+struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
+ int entries, int vector,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_cq_mbox_in *cqb = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_cq *cq;
+ int uninitialized_var(index);
+ int uninitialized_var(inlen);
+ int cqe_size;
+ int irqn;
+ int eqn;
+ int err;
+
+ if (entries < 0 || roundup_pow_of_two(entries + 1) >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d(%ld), max %d\n",
+ entries, roundup_pow_of_two(entries + 1),
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
+ return ERR_PTR(-EINVAL);
+ }
+
+ entries = roundup_pow_of_two(entries + 1);
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return ERR_PTR(-ENOMEM);
+
+ cq->ibcq.cqe = entries - 1;
+ mutex_init(&cq->resize_mutex);
+ spin_lock_init(&cq->lock);
+ cq->resize_buf = NULL;
+ cq->resize_umem = NULL;
+
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
+ if (context) {
+ err = create_cq_user(dev, udata, context, cq, entries,
+ &cqb, &cqe_size, &index, &inlen);
+ if (err)
+ goto err_create;
+ } else {
+ cqe_size = (cache_line_size() >= 128 ? 128 : 64);
+ err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
+ &index, &inlen);
+ if (err)
+ goto err_create;
+ }
+
+ cq->cqe_size = cqe_size;
+ cqb->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
+ cqb->ctx.log_sz_usr_page = cpu_to_be32((ilog2(entries) << 24) | index);
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn, &irqn);
+ if (err)
+ goto err_cqb;
+
+ cqb->ctx.c_eqn = cpu_to_be16(eqn);
+ cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
+
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
+ if (err)
+ goto err_cqb;
+
+ mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
+ cq->mcq.irqn = irqn;
+ cq->mcq.comp = mlx5_ib_cq_comp;
+ cq->mcq.event = mlx5_ib_cq_event;
+
+ if (context)
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
+ err = -EFAULT;
+ goto err_cmd;
+ }
+
+
+ kvfree(cqb);
+ return &cq->ibcq;
+
+err_cmd:
+ mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
+
+err_cqb:
+ kvfree(cqb);
+ if (context)
+ destroy_cq_user(cq, context);
+ else
+ destroy_cq_kernel(dev, cq);
+
+err_create:
+ kfree(cq);
+
+ return ERR_PTR(err);
+}
+
+
+int mlx5_ib_destroy_cq(struct ib_cq *cq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ struct ib_ucontext *context = NULL;
+
+ if (cq->uobject)
+ context = cq->uobject->context;
+
+ mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ if (context)
+ destroy_cq_user(mcq, context);
+ else
+ destroy_cq_kernel(dev, mcq);
+
+ kfree(mcq);
+
+ return 0;
+}
+
+static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
+{
+ return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
+}
+
+void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
+{
+ struct mlx5_cqe64 *cqe64, *dest64;
+ void *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ if (!cq)
+ return;
+
+ /* First we need to find the current producer index, so we
+ * know where to start cleaning from. It doesn't matter if HW
+ * adds new entries after this loop -- the QP we're worried
+ * about is already in RESET, so the new entries won't come
+ * from our QP and therefore don't need to be checked.
+ */
+ for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
+ if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
+ break;
+
+ /* Now sweep backwards through the CQ, removing CQ entries
+ * that match our QP by copying older entries on top of them.
+ */
+ while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
+ cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+ if (is_equal_rsn(cqe64, rsn)) {
+ if (srq && (ntohl(cqe64->srqn) & 0xffffff))
+ mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+ dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
+ owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
+ memcpy(dest, cqe, cq->mcq.cqe_sz);
+ dest64->op_own = owner_bit |
+ (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
+ }
+ }
+
+ if (nfreed) {
+ cq->mcq.cons_index += nfreed;
+ /* Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+ mlx5_cq_set_ci(&cq->mcq);
+ }
+}
+
+void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
+{
+ if (!cq)
+ return;
+
+ spin_lock_irq(&cq->lock);
+ __mlx5_ib_cq_clean(cq, qpn, srq);
+ spin_unlock_irq(&cq->lock);
+}
+
+int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct mlx5_modify_cq_mbox_in *in;
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+
+ int err;
+ u32 fsel = 0;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->cqn = cpu_to_be32(mcq->mcq.cqn);
+ if (1) {
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ fsel |= (MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+ if (cq_period & 0xf000) {
+ /* A value higher than 0xfff is required, better
+ * use the largest value possible. */
+ cq_period = 0xfff;
+ printf("mlx5_ib: INFO: ""period supported is limited to 12 bits\n");
+ }
+
+ in->ctx.cq_period = cpu_to_be16(cq_period);
+ in->ctx.cq_max_count = cpu_to_be16(cq_count);
+ } else {
+ err = -ENOSYS;
+ goto out;
+ }
+ }
+ in->field_select = cpu_to_be32(fsel);
+ err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
+
+out:
+ kfree(in);
+ if (err)
+ mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
+
+ return err;
+}
+
+static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, struct ib_udata *udata, int *npas,
+ int *page_shift, int *cqe_size)
+{
+ struct mlx5_ib_resize_cq ucmd;
+ struct ib_umem *umem;
+ int err;
+ int npages;
+ struct ib_ucontext *context = cq->buf.umem->context;
+
+ err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ if (err)
+ return err;
+
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift,
+ npas, NULL);
+
+ cq->resize_umem = umem;
+ *cqe_size = ucmd.cqe_size;
+
+ return 0;
+}
+
+static void un_resize_user(struct mlx5_ib_cq *cq)
+{
+ ib_umem_release(cq->resize_umem);
+}
+
+static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size)
+{
+ int err;
+
+ cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
+ if (err)
+ goto ex;
+
+ init_cq_buf(cq, cq->resize_buf);
+
+ return 0;
+
+ex:
+ kfree(cq->resize_buf);
+ return err;
+}
+
+static void un_resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
+{
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+}
+
+static int copy_resize_cqes(struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_cqe64 *scqe64;
+ struct mlx5_cqe64 *dcqe64;
+ void *start_cqe;
+ void *scqe;
+ void *dcqe;
+ int ssize;
+ int dsize;
+ int i;
+ u8 sw_own;
+
+ ssize = cq->buf.cqe_size;
+ dsize = cq->resize_buf->cqe_size;
+ if (ssize != dsize) {
+ mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
+ return -EINVAL;
+ }
+
+ i = cq->mcq.cons_index;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ start_cqe = scqe;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
+ dcqe = get_cqe_from_buf(cq->resize_buf,
+ (i + 1) & (cq->resize_buf->nent),
+ dsize);
+ dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
+ sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
+ memcpy(dcqe, scqe, dsize);
+ dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
+
+ ++i;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ if (scqe == start_cqe) {
+ printf("mlx5_ib: WARN: ""resize CQ failed to get resize CQE, CQN 0x%x\n", cq->mcq.cqn);
+ return -ENOMEM;
+ }
+ }
+ ++cq->mcq.cons_index;
+ return 0;
+}
+
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ struct mlx5_modify_cq_mbox_in *in;
+ int err;
+ int npas;
+ int page_shift;
+ int inlen;
+ int uninitialized_var(cqe_size);
+ unsigned long flags;
+
+ if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
+ mlx5_ib_warn(dev, "Firmware does not support resize CQ\n");
+ return -ENOSYS;
+ }
+
+ if (entries < 1 || roundup_pow_of_two(entries + 1) >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d(%ld), max %d\n",
+ entries, roundup_pow_of_two(entries + 1),
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
+ return -EINVAL;
+ }
+
+ entries = roundup_pow_of_two(entries + 1);
+
+ if (entries == ibcq->cqe + 1)
+ return 0;
+
+ mutex_lock(&cq->resize_mutex);
+ if (udata) {
+ err = resize_user(dev, cq, entries, udata, &npas, &page_shift,
+ &cqe_size);
+ } else {
+ cqe_size = 64;
+ err = resize_kernel(dev, cq, entries, cqe_size);
+ if (!err) {
+ npas = cq->resize_buf->buf.npages;
+ page_shift = cq->resize_buf->buf.page_shift;
+ }
+ }
+
+ if (err) {
+ mlx5_ib_warn(dev, "resize failed: %d\n", err);
+ goto ex;
+ }
+
+ inlen = sizeof(*in) + npas * sizeof(in->pas[0]);
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto ex_resize;
+ }
+
+ if (udata)
+ mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
+ in->pas, 0);
+ else
+ mlx5_fill_page_array(&cq->resize_buf->buf, in->pas);
+
+ in->field_select = cpu_to_be32(MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+ in->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ in->ctx.cqe_sz_flags = cqe_sz_to_mlx_sz(cqe_size) << 5;
+ in->ctx.page_offset = 0;
+ in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(entries) << 24);
+ in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
+ in->cqn = cpu_to_be32(cq->mcq.cqn);
+
+ err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
+ if (err) {
+ mlx5_ib_warn(dev, "modify cq failed: %d\n", err);
+ goto ex_alloc;
+ }
+
+ if (udata) {
+ cq->ibcq.cqe = entries - 1;
+ ib_umem_release(cq->buf.umem);
+ cq->buf.umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ } else {
+ struct mlx5_ib_cq_buf tbuf;
+ int resized = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (cq->resize_buf) {
+ err = copy_resize_cqes(cq);
+ if (!err) {
+ tbuf = cq->buf;
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ resized = 1;
+ }
+ }
+ cq->ibcq.cqe = entries - 1;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (resized)
+ free_cq_buf(dev, &tbuf);
+ }
+ mutex_unlock(&cq->resize_mutex);
+
+ kvfree(in);
+ return 0;
+
+ex_alloc:
+ kvfree(in);
+
+ex_resize:
+ if (udata)
+ un_resize_user(cq);
+ else
+ un_resize_kernel(dev, cq);
+ex:
+ mutex_unlock(&cq->resize_mutex);
+ return err;
+}
+
+int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+{
+ struct mlx5_ib_cq *cq;
+
+ if (!ibcq)
+ return 128;
+
+ cq = to_mcq(ibcq);
+ return cq->cqe_size;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,98 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include <linux/compiler.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <rdma/ib_umem.h>
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_user_db_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ uintptr_t user_virt;
+ int refcnt;
+};
+
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, uintptr_t virt,
+ struct mlx5_db *db)
+{
+ struct mlx5_ib_user_db_page *page;
+ struct ib_umem_chunk *chunk;
+ int err = 0;
+
+ mutex_lock(&context->db_page_mutex);
+
+ list_for_each_entry(page, &context->db_page_list, list)
+ if (page->user_virt == (virt & PAGE_MASK))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ page->user_virt = (virt & PAGE_MASK);
+ page->refcnt = 0;
+ page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
+ PAGE_SIZE, 0, 0);
+ if (IS_ERR(page->umem)) {
+ err = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+
+ list_add(&page->list, &context->db_page_list);
+
+found:
+ chunk = list_entry(page->umem->chunk_list.next,
+ struct ib_umem_chunk, list);
+ db->dma = sg_dma_address(chunk->page_list) + (virt & ~PAGE_MASK);
+ db->u.user_page = page;
+ ++page->refcnt;
+
+out:
+ mutex_unlock(&context->db_page_mutex);
+
+ return err;
+}
+
+void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
+{
+ mutex_lock(&context->db_page_mutex);
+
+ if (!--db->u.user_page->refcnt) {
+ list_del(&db->u.user_page->list);
+ ib_umem_release(db->u.user_page->umem);
+ kfree(db->u.user_page);
+ }
+
+ mutex_unlock(&context->db_page_mutex);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_doorbell.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,543 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
+#include "mlx5_ib.h"
+#include <dev/mlx5/vport.h>
+
+#define MAX_U32 0xffffffffULL
+#define MAX_U16 0xffffUL
+
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_32BIT_COUNTER(counter, value) do { \
+ if ((value) > MAX_U32) \
+ counter = cpu_to_be32(MAX_U32); \
+ else \
+ counter = cpu_to_be32(value); \
+} while (0)
+
+/* Counters should be saturate once they reach their maximum value */
+#define ASSIGN_16BIT_COUNTER(counter, value) do { \
+ if ((value) > MAX_U16) \
+ counter = cpu_to_be16(MAX_U16); \
+ else \
+ counter = cpu_to_be16(value); \
+} while (0)
+
+enum {
+ MLX5_IB_VENDOR_CLASS1 = 0x9,
+ MLX5_IB_VENDOR_CLASS2 = 0xa
+};
+
+int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ void *in_mad, void *response_mad)
+{
+ u8 op_modifier = 0;
+
+ /* Key check traps can't be generated unless we have in_wc to
+ * tell us where to send the trap.
+ */
+ if (ignore_mkey || !in_wc)
+ op_modifier |= 0x1;
+ if (ignore_bkey || !in_wc)
+ op_modifier |= 0x2;
+
+ return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
+}
+
+static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ u16 slid;
+ int err;
+
+ slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
+
+ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
+ if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
+ in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
+ in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
+ if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
+ in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } else {
+ return IB_MAD_RESULT_SUCCESS;
+ }
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev),
+ mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY,
+ port_num, in_wc, in_grh, in_mad, out_mad);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+ struct mlx5_vport_counters *vc)
+{
+ pma_cnt_ext->port_xmit_data = cpu_to_be64((vc->transmitted_ib_unicast.octets +
+ vc->transmitted_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_rcv_data = cpu_to_be64((vc->received_ib_unicast.octets +
+ vc->received_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets +
+ vc->transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets +
+ vc->received_ib_multicast.packets);
+ pma_cnt_ext->port_unicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_unicast.packets);
+ pma_cnt_ext->port_unicast_rcv_packets = cpu_to_be64(vc->received_ib_unicast.packets);
+ pma_cnt_ext->port_multicast_xmit_packets = cpu_to_be64(vc->transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_multicast_rcv_packets = cpu_to_be64(vc->received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ struct mlx5_vport_counters *vc)
+{
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+ (vc->transmitted_ib_unicast.octets +
+ vc->transmitted_ib_multicast.octets) >> 2);
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+ (vc->received_ib_unicast.octets +
+ vc->received_ib_multicast.octets) >> 2);
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+ vc->transmitted_ib_unicast.packets +
+ vc->transmitted_ib_multicast.packets);
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+ vc->received_ib_unicast.packets +
+ vc->received_ib_multicast.packets);
+}
+
+static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_vport_counters *vc;
+ int err;
+ int ext;
+
+ vc = kzalloc(sizeof(*vc), GFP_KERNEL);
+ if (!vc)
+ return -ENOMEM;
+
+ ext = in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT;
+
+ err = mlx5_get_vport_counters(dev->mdev, port_num, vc);
+ if (!err) {
+ if (ext) {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+
+ pma_cnt_ext_assign(pma_cnt_ext, vc);
+ } else {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)(out_mad->data + 40);
+
+ ASSIGN_16BIT_COUNTER(pma_cnt->port_rcv_errors,
+ (u16)vc->received_errors.packets);
+
+ pma_cnt_assign(pma_cnt, vc);
+ }
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ }
+
+ kfree(vc);
+ return err;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ memset(out_mad->data, 0, sizeof(out_mad->data));
+
+ if (MLX5_CAP_GEN(mdev, vport_counters) &&
+ in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
+ /* TBD: read error counters from the PPCNT */
+ return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+ } else {
+ return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+ in_mad, out_mad);
+ }
+}
+
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+ u16 packet_error;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+
+ packet_error = be16_to_cpu(out_mad->status);
+
+ dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_smp_attr_node_info_mad_ifc(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
+{
+ struct ib_smp *in_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ if (!in_mad)
+ return -ENOMEM;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+ out_mad);
+
+ kfree(in_mad);
+ return err;
+}
+
+int mlx5_query_system_image_guid_mad_ifc(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_max_pkeys_mad_ifc(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_vendor_id_mad_ifc(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_smp_attr_node_info_mad_ifc(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_node_desc_mad_ifc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_desc, out_mad->data, 64);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_node_guid_mad_ifc(struct mlx5_ib_dev *dev, u64 *node_guid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_pkey_mad_ifc(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_gids_mad_ifc(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw, out_mad->data + 8, 8);
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_port_mad_ifc(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int ext_active_speed;
+ int err = -ENOMEM;
+
+ if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
+ mlx5_ib_warn(dev, "invalid port number %d\n", port);
+ return -EINVAL;
+ }
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ memset(props, 0, sizeof(*props));
+
+ init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto out;
+ }
+
+ props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
+ props->lmc = out_mad->data[34] & 0x7;
+ props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
+ props->sm_sl = out_mad->data[36] & 0xf;
+ props->state = out_mad->data[32] & 0xf;
+ props->phys_state = out_mad->data[33] >> 4;
+ props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
+ props->gid_tbl_len = out_mad->data[50];
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
+ props->active_width = out_mad->data[31] & 0xf;
+ props->active_speed = out_mad->data[35] >> 4;
+ props->max_mtu = out_mad->data[41] & 0xf;
+ props->active_mtu = out_mad->data[36] >> 4;
+ props->subnet_timeout = out_mad->data[51] & 0x1f;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = out_mad->data[41] >> 4;
+
+ /* Check if extended speeds (EDR/FDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+ ext_active_speed = out_mad->data[62] >> 4;
+
+ switch (ext_active_speed) {
+ case 1:
+ props->active_speed = 16; /* FDR */
+ break;
+ case 2:
+ props->active_speed = 32; /* EDR */
+ break;
+ }
+ }
+
+ /* If reported active speed is QDR, check if is FDR-10 */
+ if (props->active_speed == 4) {
+ if (mdev->port_caps[port - 1].ext_port_cap &
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+ init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port,
+ NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ /* Checking LinkSpeedActive for FDR-10 */
+ if (out_mad->data[15] & 0x1)
+ props->active_speed = 8;
+ }
+ }
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mad.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,2343 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 325611 2017-11-09 19:00:11Z hselasky $
+ */
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/io-mapping.h>
+#include <linux/sched.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ipv6.h>
+#include <linux/list.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/vport.h>
+#include <asm/pgtable.h>
+#include <linux/fs.h>
+#undef inode
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include "user.h"
+#include "mlx5_ib.h"
+
+#include <sys/unistd.h>
+
+#define DRIVER_NAME "mlx5_ib"
+#define DRIVER_VERSION "3.2-rc1"
+#define DRIVER_RELDATE "May 2016"
+
+#undef MODULE_VERSION
+#include <sys/module.h>
+
+MODULE_AUTHOR("Eli Cohen <eli at mellanox.com>");
+MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1);
+MODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1);
+MODULE_VERSION(mlx5ib, 1);
+
+static int deprecated_prof_sel = 2;
+module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
+
+enum {
+ MLX5_STANDARD_ATOMIC_SIZE = 0x8,
+};
+
+struct workqueue_struct *mlx5_ib_wq;
+
+static char mlx5_version[] =
+ DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
+ DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
+
+static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ int tmp;
+ u8 atomic_operations;
+ u8 atomic_size_qp;
+ u8 atomic_req_endianess;
+
+ atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+ atomic_req_endianess = MLX5_CAP_ATOMIC(dev->mdev,
+ atomic_req_8B_endianess_mode) ||
+ !mlx5_host_is_le();
+
+ tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
+ if (((atomic_operations & tmp) == tmp)
+ && (atomic_size_qp & 8)) {
+ if (atomic_req_endianess) {
+ props->atomic_cap = IB_ATOMIC_HCA;
+ } else {
+ props->atomic_cap = IB_ATOMIC_NONE;
+ }
+ } else {
+ props->atomic_cap = IB_ATOMIC_NONE;
+ }
+
+ tmp = MLX5_ATOMIC_OPS_MASKED_CMP_SWAP | MLX5_ATOMIC_OPS_MASKED_FETCH_ADD;
+ if (((atomic_operations & tmp) == tmp)
+ &&(atomic_size_qp & 8)) {
+ if (atomic_req_endianess)
+ props->masked_atomic_cap = IB_ATOMIC_HCA;
+ else {
+ props->masked_atomic_cap = IB_ATOMIC_NONE;
+ }
+ } else {
+ props->masked_atomic_cap = IB_ATOMIC_NONE;
+ }
+}
+
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
+ case MLX5_CAP_PORT_TYPE_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case MLX5_CAP_PORT_TYPE_ETH:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+ return !dev->mdev->issi;
+}
+
+enum {
+ MLX5_VPORT_ACCESS_METHOD_MAD,
+ MLX5_VPORT_ACCESS_METHOD_HCA,
+ MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+ if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+ return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET)
+ return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+ return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_system_image_guid_mad_ifc(ibdev,
+ sys_image_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+ return err;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_max_pkeys_mad_ifc(ibdev, max_pkeys);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+ pkey_table_size));
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_vendor_id_mad_ifc(ibdev, vendor_id);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+ __be64 *node_guid)
+{
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_node_guid_mad_ifc(dev, node_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+ return err;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+ return err;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+struct mlx5_reg_node_desc {
+ u8 desc[64];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct mlx5_reg_node_desc in;
+
+ if (mlx5_use_mad_ifc(dev))
+ return mlx5_query_node_desc_mad_ifc(dev, node_desc);
+
+ memset(&in, 0, sizeof(in));
+
+ return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+ sizeof(struct mlx5_reg_node_desc),
+ MLX5_REG_NODE_DESC, 0, 0);
+}
+
+static int mlx5_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int max_sq_desc;
+ int max_rq_sg;
+ int max_sq_sg;
+ int err;
+
+
+ memset(props, 0, sizeof(*props));
+
+ err = mlx5_query_system_image_guid(ibdev,
+ &props->sys_image_guid);
+ if (err)
+ return err;
+
+ err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
+ if (err)
+ return err;
+
+ err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+ if (err)
+ return err;
+
+ props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+ ((u64)fw_rev_min(dev->mdev) << 16) |
+ fw_rev_sub(dev->mdev);
+ props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
+ IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+
+ if (MLX5_CAP_GEN(mdev, pkv))
+ props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
+ if (MLX5_CAP_GEN(mdev, qkv))
+ props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
+ if (MLX5_CAP_GEN(mdev, apm))
+ props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
+ props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
+ if (MLX5_CAP_GEN(mdev, xrc))
+ props->device_cap_flags |= IB_DEVICE_XRC;
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (MLX5_CAP_GEN(mdev, block_lb_mc))
+ props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+
+ props->vendor_part_id = mdev->pdev->device;
+ props->hw_ver = mdev->pdev->revision;
+
+ props->max_mr_size = ~0ull;
+ props->page_size_cap = ~(u32)((1ull << MLX5_CAP_GEN(mdev, log_pg_sz)) -1);
+ props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+ props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
+ max_sq_desc = min((int)MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+ max_sq_sg = (max_sq_desc -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) / sizeof(struct mlx5_wqe_data_seg);
+ props->max_sge = min(max_rq_sg, max_sq_sg);
+ props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+ props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+ props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+ props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+ props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+ props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
+ props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
+ props->max_srq_sge = max_rq_sg - 1;
+ props->max_fast_reg_page_list_len = (unsigned int)-1;
+ get_atomic_caps(dev, props);
+ props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+ props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+ props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
+ props->max_ah = INT_MAX;
+
+ return 0;
+}
+
+enum mlx5_ib_width {
+ MLX5_IB_WIDTH_1X = 1 << 0,
+ MLX5_IB_WIDTH_2X = 1 << 1,
+ MLX5_IB_WIDTH_4X = 1 << 2,
+ MLX5_IB_WIDTH_8X = 1 << 3,
+ MLX5_IB_WIDTH_12X = 1 << 4
+};
+
+static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+ u8 *ib_width)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ int err = 0;
+
+ if (active_width & MLX5_IB_WIDTH_1X) {
+ *ib_width = IB_WIDTH_1X;
+ } else if (active_width & MLX5_IB_WIDTH_2X) {
+ mlx5_ib_warn(dev, "active_width %d is not supported by IB spec\n",
+ (int)active_width);
+ err = -EINVAL;
+ } else if (active_width & MLX5_IB_WIDTH_4X) {
+ *ib_width = IB_WIDTH_4X;
+ } else if (active_width & MLX5_IB_WIDTH_8X) {
+ *ib_width = IB_WIDTH_8X;
+ } else if (active_width & MLX5_IB_WIDTH_12X) {
+ *ib_width = IB_WIDTH_12X;
+ } else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+ (int)active_width);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+/*
+ * TODO: Move to IB core
+ */
+enum ib_max_vl_num {
+ __IB_MAX_VL_0 = 1,
+ __IB_MAX_VL_0_1 = 2,
+ __IB_MAX_VL_0_3 = 3,
+ __IB_MAX_VL_0_7 = 4,
+ __IB_MAX_VL_0_14 = 5,
+};
+
+enum mlx5_vl_hw_cap {
+ MLX5_VL_HW_0 = 1,
+ MLX5_VL_HW_0_1 = 2,
+ MLX5_VL_HW_0_2 = 3,
+ MLX5_VL_HW_0_3 = 4,
+ MLX5_VL_HW_0_4 = 5,
+ MLX5_VL_HW_0_5 = 6,
+ MLX5_VL_HW_0_6 = 7,
+ MLX5_VL_HW_0_7 = 8,
+ MLX5_VL_HW_0_14 = 15
+};
+
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+ u8 *max_vl_num)
+{
+ switch (vl_hw_cap) {
+ case MLX5_VL_HW_0:
+ *max_vl_num = __IB_MAX_VL_0;
+ break;
+ case MLX5_VL_HW_0_1:
+ *max_vl_num = __IB_MAX_VL_0_1;
+ break;
+ case MLX5_VL_HW_0_3:
+ *max_vl_num = __IB_MAX_VL_0_3;
+ break;
+ case MLX5_VL_HW_0_7:
+ *max_vl_num = __IB_MAX_VL_0_7;
+ break;
+ case MLX5_VL_HW_0_14:
+ *max_vl_num = __IB_MAX_VL_0_14;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_query_port_ib(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 *rep;
+ int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+ struct mlx5_ptys_reg *ptys;
+ struct mlx5_pmtu_reg *pmtu;
+ struct mlx5_pvlc_reg pvlc;
+ void *ctx;
+ int err;
+
+ rep = mlx5_vzalloc(outlen);
+ ptys = kzalloc(sizeof(*ptys), GFP_KERNEL);
+ pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL);
+ if (!rep || !ptys || !pmtu) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ memset(props, 0, sizeof(*props));
+
+ /* what if I am pf with dual port */
+ err = mlx5_query_hca_vport_context(mdev, port, 0, rep, outlen);
+ if (err)
+ goto out;
+
+ ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context);
+
+ props->lid = MLX5_GET(hca_vport_context, ctx, lid);
+ props->lmc = MLX5_GET(hca_vport_context, ctx, lmc);
+ props->sm_lid = MLX5_GET(hca_vport_context, ctx, sm_lid);
+ props->sm_sl = MLX5_GET(hca_vport_context, ctx, sm_sl);
+ props->state = MLX5_GET(hca_vport_context, ctx, vport_state);
+ props->phys_state = MLX5_GET(hca_vport_context, ctx,
+ port_physical_state);
+ props->port_cap_flags = MLX5_GET(hca_vport_context, ctx, cap_mask1);
+ props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+ props->bad_pkey_cntr = MLX5_GET(hca_vport_context, ctx,
+ pkey_violation_counter);
+ props->qkey_viol_cntr = MLX5_GET(hca_vport_context, ctx,
+ qkey_violation_counter);
+ props->subnet_timeout = MLX5_GET(hca_vport_context, ctx,
+ subnet_timeout);
+ props->init_type_reply = MLX5_GET(hca_vport_context, ctx,
+ init_type_reply);
+
+ ptys->proto_mask |= MLX5_PTYS_IB;
+ ptys->local_port = port;
+ err = mlx5_core_access_ptys(mdev, ptys, 0);
+ if (err)
+ goto out;
+
+ err = translate_active_width(ibdev, ptys->ib_link_width_oper,
+ &props->active_width);
+ if (err)
+ goto out;
+
+ props->active_speed = (u8)ptys->ib_proto_oper;
+
+ pmtu->local_port = port;
+ err = mlx5_core_access_pmtu(mdev, pmtu, 0);
+ if (err)
+ goto out;
+
+ props->max_mtu = pmtu->max_mtu;
+ props->active_mtu = pmtu->oper_mtu;
+
+ memset(&pvlc, 0, sizeof(pvlc));
+ pvlc.local_port = port;
+ err = mlx5_core_access_pvlc(mdev, &pvlc, 0);
+ if (err)
+ goto out;
+
+ err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap,
+ &props->max_vl_num);
+out:
+ kvfree(rep);
+ kfree(ptys);
+ kfree(pmtu);
+ return err;
+}
+
+int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_port_mad_ifc(ibdev, port, props);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_port_ib(ibdev, port, props);
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_query_port_roce(ibdev, port, props);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static void
+mlx5_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
+{
+ if (dev->if_addrlen != ETH_ALEN)
+ return;
+
+ memcpy(eui, IF_LLADDR(dev), 3);
+ memcpy(eui + 5, IF_LLADDR(dev) + 3, 3);
+
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ }
+ eui[0] ^= 2;
+}
+
+static void
+mlx5_make_default_gid(struct net_device *dev, union ib_gid *gid)
+{
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx5_addrconf_ifid_eui48(&gid->raw[8], 0xFFFF, dev);
+}
+
+static void
+mlx5_ib_roce_port_update(void *arg)
+{
+ struct mlx5_ib_port *port = (struct mlx5_ib_port *)arg;
+ struct mlx5_ib_dev *dev = port->dev;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct net_device *xdev[MLX5_IB_GID_MAX];
+ struct net_device *idev;
+ struct net_device *ndev;
+ union ib_gid gid_temp;
+
+ while (port->port_gone == 0) {
+ int update = 0;
+ int gid_index = 0;
+ int j;
+ int error;
+
+ ndev = mlx5_get_protocol_dev(mdev, MLX5_INTERFACE_PROTOCOL_ETH);
+ if (ndev == NULL) {
+ pause("W", hz);
+ continue;
+ }
+
+ CURVNET_SET_QUIET(ndev->if_vnet);
+
+ memset(&gid_temp, 0, sizeof(gid_temp));
+ mlx5_make_default_gid(ndev, &gid_temp);
+ if (bcmp(&gid_temp, &port->gid_table[gid_index], sizeof(gid_temp))) {
+ port->gid_table[gid_index] = gid_temp;
+ update = 1;
+ }
+ xdev[gid_index] = ndev;
+ gid_index++;
+
+ IFNET_RLOCK();
+ TAILQ_FOREACH(idev, &V_ifnet, if_link) {
+ if (idev == ndev)
+ break;
+ }
+ if (idev != NULL) {
+ TAILQ_FOREACH(idev, &V_ifnet, if_link) {
+ u16 vid;
+
+ if (idev != ndev) {
+ if (idev->if_type != IFT_L2VLAN)
+ continue;
+ if (ndev != rdma_vlan_dev_real_dev(idev))
+ continue;
+ }
+
+ /* setup valid MAC-based GID */
+ memset(&gid_temp, 0, sizeof(gid_temp));
+ gid_temp.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ vid = rdma_vlan_dev_vlan_id(idev);
+ mlx5_addrconf_ifid_eui48(&gid_temp.raw[8], vid, idev);
+
+ /* check for existing entry */
+ for (j = 0; j != gid_index; j++) {
+ if (bcmp(&gid_temp, &port->gid_table[j], sizeof(gid_temp)) == 0)
+ break;
+ }
+
+ /* check if new entry should be added */
+ if (j == gid_index && gid_index < MLX5_IB_GID_MAX) {
+ if (bcmp(&gid_temp, &port->gid_table[gid_index], sizeof(gid_temp))) {
+ port->gid_table[gid_index] = gid_temp;
+ update = 1;
+ }
+ xdev[gid_index] = idev;
+ gid_index++;
+ }
+ }
+ }
+ IFNET_RUNLOCK();
+ CURVNET_RESTORE();
+
+ if (update != 0 &&
+ mlx5_ib_port_link_layer(&dev->ib_dev, 1) == IB_LINK_LAYER_ETHERNET) {
+ struct ib_event event = {
+ .device = &dev->ib_dev,
+ .element.port_num = port->port_num + 1,
+ .event = IB_EVENT_GID_CHANGE,
+ };
+
+ /* add new entries, if any */
+ for (j = 0; j != gid_index; j++) {
+ error = modify_gid_roce(&dev->ib_dev, port->port_num, j,
+ port->gid_table + j, xdev[j]);
+ if (error != 0)
+ printf("mlx5_ib: Failed to update ROCE GID table: %d\n", error);
+ }
+ memset(&gid_temp, 0, sizeof(gid_temp));
+
+ /* clear old entries, if any */
+ for (; j != MLX5_IB_GID_MAX; j++) {
+ if (bcmp(&gid_temp, port->gid_table + j, sizeof(gid_temp)) == 0)
+ continue;
+ port->gid_table[j] = gid_temp;
+ (void) modify_gid_roce(&dev->ib_dev, port->port_num, j,
+ port->gid_table + j, ndev);
+ }
+
+ /* make sure ibcore gets updated */
+ ib_dispatch_event(&event);
+ }
+ pause("W", hz);
+ }
+ do {
+ struct ib_event event = {
+ .device = &dev->ib_dev,
+ .element.port_num = port->port_num + 1,
+ .event = IB_EVENT_GID_CHANGE,
+ };
+ /* make sure ibcore gets updated */
+ ib_dispatch_event(&event);
+
+ /* wait a bit */
+ pause("W", hz);
+ } while (0);
+ port->port_gone = 2;
+ kthread_exit();
+}
+
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_gids_mad_ifc(ibdev, port, index, gid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid);
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ if (port == 0 || port > MLX5_CAP_GEN(mdev, num_ports) ||
+ index < 0 || index >= MLX5_IB_GID_MAX ||
+ dev->port[port - 1].port_gone != 0)
+ memset(gid, 0, sizeof(*gid));
+ else
+ *gid = dev->port[port - 1].gid_table[index];
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_pkey_mad_ifc(ibdev, port, index, pkey);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
+ pkey);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_reg_node_desc in;
+ struct mlx5_reg_node_desc out;
+ int err;
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ /*
+ * If possible, pass node desc to FW, so it can generate
+ * a 144 trap. If cmd fails, just ignore.
+ */
+ memcpy(&in, props->node_desc, 64);
+ err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
+ if (err)
+ return err;
+
+ memcpy(ibdev->node_desc, props->node_desc, 64);
+
+ return err;
+}
+
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
+ struct ib_port_modify *props)
+{
+ u8 is_eth = (mlx5_ib_port_link_layer(ibdev, port) ==
+ IB_LINK_LAYER_ETHERNET);
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct ib_port_attr attr;
+ u32 tmp;
+ int err;
+
+ /* return OK if this is RoCE. CM calls ib_modify_port() regardless
+ * of whether port link layer is ETH or IB. For ETH ports, qkey
+ * violations and port capabilities are not valid.
+ */
+ if (is_eth)
+ return 0;
+
+ mutex_lock(&dev->cap_mask_mutex);
+
+ err = mlx5_ib_query_port(ibdev, port, &attr);
+ if (err)
+ goto out;
+
+ tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
+ ~props->clr_port_cap_mask;
+
+ err = mlx5_set_port_caps(dev->mdev, port, tmp);
+
+out:
+ mutex_unlock(&dev->cap_mask_mutex);
+ return err;
+}
+
+enum mlx5_cap_flags {
+ MLX5_CAP_COMPACT_AV = 1 << 0,
+};
+
+static void set_mlx5_flags(u32 *flags, struct mlx5_core_dev *dev)
+{
+ *flags |= MLX5_CAP_GEN(dev, compact_address_vector) ?
+ MLX5_CAP_COMPACT_AV : 0;
+}
+
+static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
+ struct mlx5_ib_alloc_ucontext_resp resp;
+ struct mlx5_ib_ucontext *context;
+ struct mlx5_uuar_info *uuari;
+ struct mlx5_uar *uars;
+ int gross_uuars;
+ int num_uars;
+ int ver;
+ int uuarn;
+ int err;
+ int i;
+ size_t reqlen;
+
+ if (!dev->ib_active)
+ return ERR_PTR(-EAGAIN);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else {
+ mlx5_ib_err(dev, "request malformed, reqlen: %ld\n", (long)reqlen);
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
+ if (err) {
+ mlx5_ib_err(dev, "copy failed\n");
+ return ERR_PTR(err);
+ }
+
+ if (req.reserved) {
+ mlx5_ib_err(dev, "request corrupted\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (req.total_num_uuars == 0 || req.total_num_uuars > MLX5_MAX_UUARS) {
+ mlx5_ib_warn(dev, "wrong num_uuars: %d\n", req.total_num_uuars);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req.total_num_uuars = ALIGN(req.total_num_uuars,
+ MLX5_NON_FP_BF_REGS_PER_PAGE);
+ if (req.num_low_latency_uuars > req.total_num_uuars - 1) {
+ mlx5_ib_warn(dev, "wrong num_low_latency_uuars: %d ( > %d)\n",
+ req.total_num_uuars, req.total_num_uuars);
+ return ERR_PTR(-EINVAL);
+ }
+
+ num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
+ gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
+ resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+ if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
+ resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+ resp.cache_line_size = L1_CACHE_BYTES;
+ resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+ resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+ resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ set_mlx5_flags(&resp.flags, dev->mdev);
+
+ if (offsetof(struct mlx5_ib_alloc_ucontext_resp, max_desc_sz_sq_dc) < udata->outlen)
+ resp.max_desc_sz_sq_dc = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq_dc);
+
+ if (offsetof(struct mlx5_ib_alloc_ucontext_resp, atomic_arg_sizes_dc) < udata->outlen)
+ resp.atomic_arg_sizes_dc = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return ERR_PTR(-ENOMEM);
+
+ uuari = &context->uuari;
+ mutex_init(&uuari->lock);
+ uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
+ if (!uars) {
+ err = -ENOMEM;
+ goto out_ctx;
+ }
+
+ uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
+ sizeof(*uuari->bitmap),
+ GFP_KERNEL);
+ if (!uuari->bitmap) {
+ err = -ENOMEM;
+ goto out_uar_ctx;
+ }
+ /*
+ * clear all fast path uuars
+ */
+ for (i = 0; i < gross_uuars; i++) {
+ uuarn = i & 3;
+ if (uuarn == 2 || uuarn == 3)
+ set_bit(i, uuari->bitmap);
+ }
+
+ uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
+ if (!uuari->count) {
+ err = -ENOMEM;
+ goto out_bitmap;
+ }
+
+ for (i = 0; i < num_uars; i++) {
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
+ if (err) {
+ mlx5_ib_err(dev, "uar alloc failed at %d\n", i);
+ goto out_uars;
+ }
+ }
+ for (i = 0; i < MLX5_IB_MAX_CTX_DYNAMIC_UARS; i++)
+ context->dynamic_wc_uar_index[i] = MLX5_IB_INVALID_UAR_INDEX;
+
+ INIT_LIST_HEAD(&context->db_page_list);
+ mutex_init(&context->db_page_mutex);
+
+ resp.tot_uuars = req.total_num_uuars;
+ resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
+ err = ib_copy_to_udata(udata, &resp,
+ min_t(size_t, udata->outlen, sizeof(resp)));
+ if (err)
+ goto out_uars;
+
+ uuari->ver = ver;
+ uuari->num_low_latency_uuars = req.num_low_latency_uuars;
+ uuari->uars = uars;
+ uuari->num_uars = num_uars;
+
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx5_alloc_transport_domain(dev->mdev, &context->tdn);
+ if (err)
+ goto out_uars;
+ }
+
+ return &context->ibucontext;
+
+out_uars:
+ for (i--; i >= 0; i--)
+ mlx5_cmd_free_uar(dev->mdev, uars[i].index);
+ kfree(uuari->count);
+
+out_bitmap:
+ kfree(uuari->bitmap);
+
+out_uar_ctx:
+ kfree(uars);
+
+out_ctx:
+ kfree(context);
+ return ERR_PTR(err);
+}
+
+static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
+ struct mlx5_uuar_info *uuari = &context->uuari;
+ int i;
+
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET)
+ mlx5_dealloc_transport_domain(dev->mdev, context->tdn);
+
+ for (i = 0; i < uuari->num_uars; i++) {
+ if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
+ mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
+ }
+ for (i = 0; i < MLX5_IB_MAX_CTX_DYNAMIC_UARS; i++) {
+ if (context->dynamic_wc_uar_index[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_free_uar(dev->mdev, context->dynamic_wc_uar_index[i]);
+ }
+
+ kfree(uuari->count);
+ kfree(uuari->bitmap);
+ kfree(uuari->uars);
+ kfree(context);
+
+ return 0;
+}
+
+static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
+{
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
+}
+
+static int get_command(unsigned long offset)
+{
+ return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
+}
+
+static int get_arg(unsigned long offset)
+{
+ return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
+}
+
+static int get_index(unsigned long offset)
+{
+ return get_arg(offset);
+}
+
+static int uar_mmap(struct vm_area_struct *vma, pgprot_t prot, bool is_wc,
+ struct mlx5_uuar_info *uuari, struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ unsigned long idx;
+ phys_addr_t pfn;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE) {
+ mlx5_ib_warn(dev, "wrong size, expected PAGE_SIZE(%ld) got %ld\n",
+ (long)PAGE_SIZE, (long)(vma->vm_end - vma->vm_start));
+ return -EINVAL;
+ }
+
+ idx = get_index(vma->vm_pgoff);
+ if (idx >= uuari->num_uars) {
+ mlx5_ib_warn(dev, "wrong offset, idx:%ld num_uars:%d\n",
+ idx, uuari->num_uars);
+ return -EINVAL;
+ }
+
+ pfn = uar_index2pfn(dev, uuari->uars[idx].index);
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
+ (unsigned long long)pfn);
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ mlx5_ib_err(dev, "io remap failed\n");
+ return -EAGAIN;
+ }
+
+ mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA 0x%llx\n", is_wc ? "WC" : "NC",
+ (long)vma->vm_start, (unsigned long long)pfn << PAGE_SHIFT);
+
+ return 0;
+}
+
+static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
+ struct mlx5_uuar_info *uuari = &context->uuari;
+ unsigned long command;
+
+ command = get_command(vma->vm_pgoff);
+ switch (command) {
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return uar_mmap(vma, pgprot_writecombine(vma->vm_page_prot),
+ true,
+ uuari, dev, context);
+
+ break;
+
+ case MLX5_IB_MMAP_WC_PAGE:
+ return uar_mmap(vma, pgprot_writecombine(vma->vm_page_prot),
+ true, uuari, dev, context);
+ break;
+
+ case MLX5_IB_MMAP_NC_PAGE:
+ return uar_mmap(vma, pgprot_noncached(vma->vm_page_prot),
+ false, uuari, dev, context);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
+{
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_mkey_seg *seg;
+ struct mlx5_core_mr mr;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ seg = &in->seg;
+ seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
+ seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ seg->start_addr = 0;
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
+ NULL, NULL, NULL);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
+ goto err_in;
+ }
+
+ kfree(in);
+ *key = mr.key;
+
+ return 0;
+
+err_in:
+ kfree(in);
+
+ return err;
+}
+
+static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
+{
+ struct mlx5_core_mr mr;
+ int err;
+
+ memset(&mr, 0, sizeof(mr));
+ mr.key = key;
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr);
+ if (err)
+ mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
+}
+
+static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_alloc_pd_resp resp;
+ struct mlx5_ib_pd *pd;
+ int err;
+
+ pd = kmalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
+ if (err) {
+ mlx5_ib_warn(dev, "pd alloc failed\n");
+ kfree(pd);
+ return ERR_PTR(err);
+ }
+
+ if (context) {
+ resp.pdn = pd->pdn;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ mlx5_ib_err(dev, "copy failed\n");
+ mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
+ kfree(pd);
+ return ERR_PTR(-EFAULT);
+ }
+ } else {
+ err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
+ if (err) {
+ mlx5_ib_err(dev, "alloc mkey failed\n");
+ mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
+ kfree(pd);
+ return ERR_PTR(err);
+ }
+ }
+
+ return &pd->ibpd;
+}
+
+static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(pd->device);
+ struct mlx5_ib_pd *mpd = to_mpd(pd);
+
+ if (!pd->uobject)
+ free_pa_mkey(mdev, mpd->pa_lkey);
+
+ mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
+ kfree(mpd);
+
+ return 0;
+}
+
+static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ int err;
+
+ if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ err = -EOPNOTSUPP;
+ else
+ err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
+ if (err)
+ mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
+ ibqp->qp_num, gid->raw);
+
+ return err;
+}
+
+static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ int err;
+
+ if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+ err = -EOPNOTSUPP;
+ else
+ err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
+ if (err)
+ mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
+ ibqp->qp_num, gid->raw);
+
+ return err;
+}
+
+static int init_node_data(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
+ if (err)
+ return err;
+
+ return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
+}
+
+static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+
+ return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages);
+}
+
+static ssize_t show_reg_pages(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+
+ return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
+}
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
+}
+
+static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev),
+ fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
+}
+
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ return sprintf(buf, "%x\n", (unsigned)dev->mdev->pdev->revision);
+}
+
+static ssize_t show_board(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(device, struct mlx5_ib_dev, ib_dev.dev);
+ return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
+ dev->mdev->board_id);
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
+static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
+
+static struct device_attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type,
+ &dev_attr_board_id,
+ &dev_attr_fw_pages,
+ &dev_attr_reg_pages,
+};
+
+static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_qp *mqp;
+ struct mlx5_ib_cq *send_mcq, *recv_mcq;
+ struct mlx5_core_cq *mcq;
+ struct list_head cq_armed_list;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ unsigned long flags;
+
+ mlx5_ib_warn(ibdev, " started\n");
+ INIT_LIST_HEAD(&cq_armed_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+ /*At that point all inflight post send were put to be executed as of we
+ * lock/unlock above locks Now need to arm all involved CQs.
+ */
+ list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+ mcq->comp(mcq);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+ mlx5_ib_warn(ibdev, " ended\n");
+ return;
+}
+
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
+ struct ib_event ibev;
+
+ u8 port = 0;
+
+ switch (event) {
+ case MLX5_DEV_EVENT_SYS_ERROR:
+ ibdev->ib_active = false;
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
+ break;
+
+ case MLX5_DEV_EVENT_PORT_UP:
+ ibev.event = IB_EVENT_PORT_ACTIVE;
+ port = (u8)param;
+ break;
+
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ case MLX5_DEV_EVENT_PORT_INITIALIZED:
+ ibev.event = IB_EVENT_PORT_ERR;
+ port = (u8)param;
+ break;
+
+ case MLX5_DEV_EVENT_LID_CHANGE:
+ ibev.event = IB_EVENT_LID_CHANGE;
+ port = (u8)param;
+ break;
+
+ case MLX5_DEV_EVENT_PKEY_CHANGE:
+ ibev.event = IB_EVENT_PKEY_CHANGE;
+ port = (u8)param;
+ break;
+
+ case MLX5_DEV_EVENT_GUID_CHANGE:
+ ibev.event = IB_EVENT_GID_CHANGE;
+ port = (u8)param;
+ break;
+
+ case MLX5_DEV_EVENT_CLIENT_REREG:
+ ibev.event = IB_EVENT_CLIENT_REREGISTER;
+ port = (u8)param;
+ break;
+
+ default:
+ break;
+ }
+
+ ibev.device = &ibdev->ib_dev;
+ ibev.element.port_num = port;
+
+ if ((event != MLX5_DEV_EVENT_SYS_ERROR) &&
+ (port < 1 || port > ibdev->num_ports)) {
+ mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+ return;
+ }
+
+ if (ibdev->ib_active)
+ ib_dispatch_event(&ibev);
+}
+
+static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+{
+ int port;
+
+ for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
+ mlx5_query_ext_port_caps(dev, port);
+}
+
+static void config_atomic_responder(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ enum ib_atomic_cap cap = props->atomic_cap;
+
+#if 0
+ if (cap == IB_ATOMIC_HCA ||
+ cap == IB_ATOMIC_GLOB)
+#endif
+ dev->enable_atomic_resp = 1;
+
+ dev->atomic_cap = cap;
+}
+
+enum mlx5_addr_align {
+ MLX5_ADDR_ALIGN_0 = 0,
+ MLX5_ADDR_ALIGN_64 = 64,
+ MLX5_ADDR_ALIGN_128 = 128,
+};
+
+static int get_port_caps(struct mlx5_ib_dev *dev)
+{
+ struct ib_device_attr *dprops = NULL;
+ struct ib_port_attr *pprops = NULL;
+ int err = -ENOMEM;
+ int port;
+
+ pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
+ if (!pprops)
+ goto out;
+
+ dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
+ if (!dprops)
+ goto out;
+
+ err = mlx5_ib_query_device(&dev->ib_dev, dprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_device failed %d\n", err);
+ goto out;
+ }
+ config_atomic_responder(dev, dprops);
+
+ for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
+ err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
+ if (err) {
+ mlx5_ib_warn(dev, "query_port %d failed %d\n",
+ port, err);
+ break;
+ }
+ dev->mdev->port_caps[port - 1].pkey_table_len = dprops->max_pkeys;
+ dev->mdev->port_caps[port - 1].gid_table_len = pprops->gid_tbl_len;
+ mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
+ dprops->max_pkeys, pprops->gid_tbl_len);
+ }
+
+out:
+ kfree(pprops);
+ kfree(dprops);
+
+ return err;
+}
+
+static void destroy_umrc_res(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_mr_cache_cleanup(dev);
+ if (err)
+ mlx5_ib_warn(dev, "mr cache cleanup failed\n");
+
+ ib_dereg_mr(dev->umrc.mr);
+ ib_dealloc_pd(dev->umrc.pd);
+}
+
+enum {
+ MAX_UMR_WR = 128,
+};
+
+static int create_umr_res(struct mlx5_ib_dev *dev)
+{
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ int ret;
+
+ pd = ib_alloc_pd(&dev->ib_dev);
+ if (IS_ERR(pd)) {
+ mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
+ ret = PTR_ERR(pd);
+ goto error_0;
+ }
+
+ mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(mr)) {
+ mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n");
+ ret = PTR_ERR(mr);
+ goto error_1;
+ }
+
+ dev->umrc.mr = mr;
+ dev->umrc.pd = pd;
+
+ ret = mlx5_mr_cache_init(dev);
+ if (ret) {
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+ goto error_4;
+ }
+
+ return 0;
+
+error_4:
+ ib_dereg_mr(mr);
+error_1:
+ ib_dealloc_pd(pd);
+error_0:
+ return ret;
+}
+
+static int create_dev_resources(struct mlx5_ib_resources *devr)
+{
+ struct ib_srq_init_attr attr;
+ struct mlx5_ib_dev *dev;
+ int ret = 0;
+
+ dev = container_of(devr, struct mlx5_ib_dev, devr);
+
+ devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
+ if (IS_ERR(devr->p0)) {
+ ret = PTR_ERR(devr->p0);
+ goto error0;
+ }
+ devr->p0->device = &dev->ib_dev;
+ devr->p0->uobject = NULL;
+ atomic_set(&devr->p0->usecnt, 0);
+
+ devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL);
+ if (IS_ERR(devr->c0)) {
+ ret = PTR_ERR(devr->c0);
+ goto error1;
+ }
+ devr->c0->device = &dev->ib_dev;
+ devr->c0->uobject = NULL;
+ devr->c0->comp_handler = NULL;
+ devr->c0->event_handler = NULL;
+ devr->c0->cq_context = NULL;
+ atomic_set(&devr->c0->usecnt, 0);
+
+ devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ if (IS_ERR(devr->x0)) {
+ ret = PTR_ERR(devr->x0);
+ goto error2;
+ }
+ devr->x0->device = &dev->ib_dev;
+ devr->x0->inode = NULL;
+ atomic_set(&devr->x0->usecnt, 0);
+ mutex_init(&devr->x0->tgt_qp_mutex);
+ INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
+
+ devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
+ if (IS_ERR(devr->x1)) {
+ ret = PTR_ERR(devr->x1);
+ goto error3;
+ }
+ devr->x1->device = &dev->ib_dev;
+ devr->x1->inode = NULL;
+ atomic_set(&devr->x1->usecnt, 0);
+ mutex_init(&devr->x1->tgt_qp_mutex);
+ INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_XRC;
+ attr.ext.xrc.cq = devr->c0;
+ attr.ext.xrc.xrcd = devr->x0;
+
+ devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+ if (IS_ERR(devr->s0)) {
+ ret = PTR_ERR(devr->s0);
+ goto error4;
+ }
+ devr->s0->device = &dev->ib_dev;
+ devr->s0->pd = devr->p0;
+ devr->s0->uobject = NULL;
+ devr->s0->event_handler = NULL;
+ devr->s0->srq_context = NULL;
+ devr->s0->srq_type = IB_SRQT_XRC;
+ devr->s0->ext.xrc.xrcd = devr->x0;
+ devr->s0->ext.xrc.cq = devr->c0;
+ atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
+ atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
+ atomic_inc(&devr->p0->usecnt);
+ atomic_set(&devr->s0->usecnt, 0);
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_BASIC;
+ devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
+ if (IS_ERR(devr->s1)) {
+ ret = PTR_ERR(devr->s1);
+ goto error5;
+ }
+ devr->s1->device = &dev->ib_dev;
+ devr->s1->pd = devr->p0;
+ devr->s1->uobject = NULL;
+ devr->s1->event_handler = NULL;
+ devr->s1->srq_context = NULL;
+ devr->s1->srq_type = IB_SRQT_BASIC;
+ devr->s1->ext.xrc.cq = devr->c0;
+ atomic_inc(&devr->p0->usecnt);
+ atomic_set(&devr->s1->usecnt, 0);
+
+ return 0;
+
+error5:
+ mlx5_ib_destroy_srq(devr->s0);
+error4:
+ mlx5_ib_dealloc_xrcd(devr->x1);
+error3:
+ mlx5_ib_dealloc_xrcd(devr->x0);
+error2:
+ mlx5_ib_destroy_cq(devr->c0);
+error1:
+ mlx5_ib_dealloc_pd(devr->p0);
+error0:
+ return ret;
+}
+
+static void destroy_dev_resources(struct mlx5_ib_resources *devr)
+{
+ mlx5_ib_destroy_srq(devr->s1);
+ mlx5_ib_destroy_srq(devr->s0);
+ mlx5_ib_dealloc_xrcd(devr->x0);
+ mlx5_ib_dealloc_xrcd(devr->x1);
+ mlx5_ib_destroy_cq(devr->c0);
+ mlx5_ib_dealloc_pd(devr->p0);
+}
+
+static u32 get_core_cap_flags(struct ib_device *ibdev)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
+ u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
+ u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ u32 ret = 0;
+
+ if (ll == IB_LINK_LAYER_INFINIBAND)
+ return RDMA_CORE_PORT_IBA_IB;
+
+ ret = RDMA_CORE_PORT_RAW_PACKET;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
+ return ret;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
+ return ret;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ return ret;
+}
+
+static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ int err;
+
+ immutable->core_cap_flags = get_core_cap_flags(ibdev);
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = get_core_cap_flags(ibdev);
+ if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void enable_dc_tracer(struct mlx5_ib_dev *dev)
+{
+ struct device *device = dev->ib_dev.dma_device;
+ struct mlx5_dc_tracer *dct = &dev->dctr;
+ int order;
+ void *tmp;
+ int size;
+ int err;
+
+ size = MLX5_CAP_GEN(dev->mdev, num_ports) * 4096;
+ if (size <= PAGE_SIZE)
+ order = 0;
+ else
+ order = 1;
+
+ dct->pg = alloc_pages(GFP_KERNEL, order);
+ if (!dct->pg) {
+ mlx5_ib_err(dev, "failed to allocate %d pages\n", order);
+ return;
+ }
+
+ tmp = page_address(dct->pg);
+ memset(tmp, 0xff, size);
+
+ dct->size = size;
+ dct->order = order;
+ dct->dma = dma_map_page(device, dct->pg, 0, size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(device, dct->dma)) {
+ mlx5_ib_err(dev, "dma mapping error\n");
+ goto map_err;
+ }
+
+ err = mlx5_core_set_dc_cnak_trace(dev->mdev, 1, dct->dma);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to enable DC tracer\n");
+ goto cmd_err;
+ }
+
+ return;
+
+cmd_err:
+ dma_unmap_page(device, dct->dma, size, DMA_FROM_DEVICE);
+map_err:
+ __free_pages(dct->pg, dct->order);
+ dct->pg = NULL;
+}
+
+static void disable_dc_tracer(struct mlx5_ib_dev *dev)
+{
+ struct device *device = dev->ib_dev.dma_device;
+ struct mlx5_dc_tracer *dct = &dev->dctr;
+ int err;
+
+ if (!dct->pg)
+ return;
+
+ err = mlx5_core_set_dc_cnak_trace(dev->mdev, 0, dct->dma);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to disable DC tracer\n");
+ return;
+ }
+
+ dma_unmap_page(device, dct->dma, dct->size, DMA_FROM_DEVICE);
+ __free_pages(dct->pg, dct->order);
+ dct->pg = NULL;
+}
+
+enum {
+ MLX5_DC_CNAK_SIZE = 128,
+ MLX5_NUM_BUF_IN_PAGE = PAGE_SIZE / MLX5_DC_CNAK_SIZE,
+ MLX5_CNAK_TX_CQ_SIGNAL_FACTOR = 128,
+ MLX5_DC_CNAK_SL = 0,
+ MLX5_DC_CNAK_VL = 0,
+};
+
+static int init_dc_improvements(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_core_is_pf(dev->mdev))
+ return 0;
+
+ if (!(MLX5_CAP_GEN(dev->mdev, dc_cnak_trace)))
+ return 0;
+
+ enable_dc_tracer(dev);
+
+ return 0;
+}
+
+static void cleanup_dc_improvements(struct mlx5_ib_dev *dev)
+{
+
+ disable_dc_tracer(dev);
+}
+
+static void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num)
+{
+ mlx5_vport_dealloc_q_counter(dev->mdev,
+ MLX5_INTERFACE_PROTOCOL_IB,
+ dev->port[port_num].q_cnt_id);
+ dev->port[port_num].q_cnt_id = 0;
+}
+
+static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_ports; i++)
+ mlx5_ib_dealloc_q_port_counter(dev, i);
+}
+
+static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ ret = mlx5_vport_alloc_q_counter(dev->mdev,
+ MLX5_INTERFACE_PROTOCOL_IB,
+ &dev->port[i].q_cnt_id);
+ if (ret) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d\n",
+ i + 1);
+ goto dealloc_counters;
+ }
+ }
+
+ return 0;
+
+dealloc_counters:
+ while (--i >= 0)
+ mlx5_ib_dealloc_q_port_counter(dev, i);
+
+ return ret;
+}
+
+struct port_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mlx5_ib_port *,
+ struct port_attribute *, char *buf);
+ ssize_t (*store)(struct mlx5_ib_port *,
+ struct port_attribute *,
+ const char *buf, size_t count);
+};
+
+struct port_counter_attribute {
+ struct port_attribute attr;
+ size_t offset;
+};
+
+static ssize_t port_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct port_attribute *port_attr =
+ container_of(attr, struct port_attribute, attr);
+ struct mlx5_ib_port_sysfs_group *p =
+ container_of(kobj, struct mlx5_ib_port_sysfs_group,
+ kobj);
+ struct mlx5_ib_port *mibport = container_of(p, struct mlx5_ib_port,
+ group);
+
+ if (!port_attr->show)
+ return -EIO;
+
+ return port_attr->show(mibport, port_attr, buf);
+}
+
+static ssize_t show_port_counter(struct mlx5_ib_port *p,
+ struct port_attribute *port_attr,
+ char *buf)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ struct port_counter_attribute *counter_attr =
+ container_of(port_attr, struct port_counter_attribute, attr);
+ void *out;
+ int ret;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_vport_query_q_counter(p->dev->mdev,
+ p->q_cnt_id, 0,
+ out, outlen);
+ if (ret)
+ goto free;
+
+ ret = sprintf(buf, "%d\n",
+ be32_to_cpu(*(__be32 *)(out + counter_attr->offset)));
+
+free:
+ kfree(out);
+ return ret;
+}
+
+#define PORT_COUNTER_ATTR(_name) \
+struct port_counter_attribute port_counter_attr_##_name = { \
+ .attr = __ATTR(_name, S_IRUGO, show_port_counter, NULL), \
+ .offset = MLX5_BYTE_OFF(query_q_counter_out, _name) \
+}
+
+static PORT_COUNTER_ATTR(rx_write_requests);
+static PORT_COUNTER_ATTR(rx_read_requests);
+static PORT_COUNTER_ATTR(rx_atomic_requests);
+static PORT_COUNTER_ATTR(rx_dct_connect);
+static PORT_COUNTER_ATTR(out_of_buffer);
+static PORT_COUNTER_ATTR(out_of_sequence);
+static PORT_COUNTER_ATTR(duplicate_request);
+static PORT_COUNTER_ATTR(rnr_nak_retry_err);
+static PORT_COUNTER_ATTR(packet_seq_err);
+static PORT_COUNTER_ATTR(implied_nak_seq_err);
+static PORT_COUNTER_ATTR(local_ack_timeout_err);
+
+static struct attribute *counter_attrs[] = {
+ &port_counter_attr_rx_write_requests.attr.attr,
+ &port_counter_attr_rx_read_requests.attr.attr,
+ &port_counter_attr_rx_atomic_requests.attr.attr,
+ &port_counter_attr_rx_dct_connect.attr.attr,
+ &port_counter_attr_out_of_buffer.attr.attr,
+ &port_counter_attr_out_of_sequence.attr.attr,
+ &port_counter_attr_duplicate_request.attr.attr,
+ &port_counter_attr_rnr_nak_retry_err.attr.attr,
+ &port_counter_attr_packet_seq_err.attr.attr,
+ &port_counter_attr_implied_nak_seq_err.attr.attr,
+ &port_counter_attr_local_ack_timeout_err.attr.attr,
+ NULL
+};
+
+static struct attribute_group port_counters_group = {
+ .name = "counters",
+ .attrs = counter_attrs
+};
+
+static const struct sysfs_ops port_sysfs_ops = {
+ .show = port_attr_show
+};
+
+static struct kobj_type port_type = {
+ .sysfs_ops = &port_sysfs_ops,
+};
+
+static int add_port_attrs(struct mlx5_ib_dev *dev,
+ struct kobject *parent,
+ struct mlx5_ib_port_sysfs_group *port,
+ u8 port_num)
+{
+ int ret;
+
+ ret = kobject_init_and_add(&port->kobj, &port_type,
+ parent,
+ "%d", port_num);
+ if (ret)
+ return ret;
+
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
+ MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+ ret = sysfs_create_group(&port->kobj, &port_counters_group);
+ if (ret)
+ goto put_kobj;
+ }
+
+ port->enabled = true;
+ return ret;
+
+put_kobj:
+ kobject_put(&port->kobj);
+ return ret;
+}
+
+static void destroy_ports_attrs(struct mlx5_ib_dev *dev,
+ unsigned int num_ports)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_ports; i++) {
+ struct mlx5_ib_port_sysfs_group *port =
+ &dev->port[i].group;
+
+ if (!port->enabled)
+ continue;
+
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) &&
+ MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
+ sysfs_remove_group(&port->kobj,
+ &port_counters_group);
+ kobject_put(&port->kobj);
+ port->enabled = false;
+ }
+
+ if (dev->ports_parent) {
+ kobject_put(dev->ports_parent);
+ dev->ports_parent = NULL;
+ }
+}
+
+static int create_port_attrs(struct mlx5_ib_dev *dev)
+{
+ int ret = 0;
+ unsigned int i = 0;
+ struct device *device = &dev->ib_dev.dev;
+
+ dev->ports_parent = kobject_create_and_add("mlx5_ports",
+ &device->kobj);
+ if (!dev->ports_parent)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ ret = add_port_attrs(dev,
+ dev->ports_parent,
+ &dev->port[i].group,
+ i + 1);
+
+ if (ret)
+ goto _destroy_ports_attrs;
+ }
+
+ return 0;
+
+_destroy_ports_attrs:
+ destroy_ports_attrs(dev, i);
+ return ret;
+}
+
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_dev *dev;
+ int err;
+ int i;
+
+ printk_once(KERN_INFO "%s", mlx5_version);
+
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->mdev = mdev;
+
+ dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port)
+ goto err_dealloc;
+
+ for (i = 0; i < MLX5_CAP_GEN(mdev, num_ports); i++) {
+ dev->port[i].dev = dev;
+ dev->port[i].port_num = i;
+ dev->port[i].port_gone = 0;
+ memset(dev->port[i].gid_table, 0, sizeof(dev->port[i].gid_table));
+ }
+
+ err = get_port_caps(dev);
+ if (err)
+ goto err_free_port;
+
+ if (mlx5_use_mad_ifc(dev))
+ get_ext_port_caps(dev);
+
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ if (MLX5_CAP_GEN(mdev, roce)) {
+ err = mlx5_nic_vport_enable_roce(mdev);
+ if (err)
+ goto err_free_port;
+ } else {
+ goto err_free_port;
+ }
+ }
+
+ MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
+
+ strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
+ dev->ib_dev.owner = THIS_MODULE;
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = mdev->special_contexts.resd_lkey;
+ dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.num_comp_vectors =
+ dev->mdev->priv.eq_table.num_comp_vectors;
+ dev->ib_dev.dma_device = &mdev->pdev->dev;
+
+ dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
+ dev->ib_dev.uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
+ (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+
+ dev->ib_dev.query_device = mlx5_ib_query_device;
+ dev->ib_dev.query_port = mlx5_ib_query_port;
+ dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
+ dev->ib_dev.query_gid = mlx5_ib_query_gid;
+ dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
+ dev->ib_dev.modify_device = mlx5_ib_modify_device;
+ dev->ib_dev.modify_port = mlx5_ib_modify_port;
+ dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
+ dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
+ dev->ib_dev.mmap = mlx5_ib_mmap;
+ dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
+ dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
+ dev->ib_dev.create_ah = mlx5_ib_create_ah;
+ dev->ib_dev.query_ah = mlx5_ib_query_ah;
+ dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
+ dev->ib_dev.create_srq = mlx5_ib_create_srq;
+ dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
+ dev->ib_dev.query_srq = mlx5_ib_query_srq;
+ dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
+ dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
+ dev->ib_dev.create_qp = mlx5_ib_create_qp;
+ dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
+ dev->ib_dev.query_qp = mlx5_ib_query_qp;
+ dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
+ dev->ib_dev.post_send = mlx5_ib_post_send;
+ dev->ib_dev.post_recv = mlx5_ib_post_recv;
+ dev->ib_dev.create_cq = mlx5_ib_create_cq;
+ dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
+ dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
+ dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
+ dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
+ dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
+ dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
+ dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
+ dev->ib_dev.reg_phys_mr = mlx5_ib_reg_phys_mr;
+ dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
+ dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
+ dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
+ dev->ib_dev.process_mad = mlx5_ib_process_mad;
+ dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr;
+ dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
+ dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
+
+ if (MLX5_CAP_GEN(mdev, xrc)) {
+ dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
+ dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
+ dev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
+ (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
+ }
+
+ err = init_node_data(dev);
+ if (err)
+ goto err_disable_roce;
+
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+
+ err = create_dev_resources(&dev->devr);
+ if (err)
+ goto err_disable_roce;
+
+
+ err = mlx5_ib_alloc_q_counters(dev);
+ if (err)
+ goto err_odp;
+
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
+ goto err_q_cnt;
+
+ err = create_umr_res(dev);
+ if (err)
+ goto err_dev;
+
+ if (MLX5_CAP_GEN(dev->mdev, port_type) ==
+ MLX5_CAP_PORT_TYPE_IB) {
+ if (init_dc_improvements(dev))
+ mlx5_ib_dbg(dev, "init_dc_improvements - continuing\n");
+ }
+
+ err = create_port_attrs(dev);
+ if (err)
+ goto err_dc;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
+ err = device_create_file(&dev->ib_dev.dev,
+ mlx5_class_attributes[i]);
+ if (err)
+ goto err_port_attrs;
+ }
+
+ if (1) {
+ struct thread *rl_thread = NULL;
+ struct proc *rl_proc = NULL;
+
+ for (i = 0; i < MLX5_CAP_GEN(mdev, num_ports); i++) {
+ (void) kproc_kthread_add(mlx5_ib_roce_port_update, dev->port + i, &rl_proc, &rl_thread,
+ RFHIGHPID, 0, "mlx5-ib-roce-port", "mlx5-ib-roce_port-%d", i);
+ }
+ }
+
+ dev->ib_active = true;
+
+ return dev;
+
+err_port_attrs:
+ destroy_ports_attrs(dev, dev->num_ports);
+
+err_dc:
+ if (MLX5_CAP_GEN(dev->mdev, port_type) ==
+ MLX5_CAP_PORT_TYPE_IB)
+ cleanup_dc_improvements(dev);
+ destroy_umrc_res(dev);
+
+err_dev:
+ ib_unregister_device(&dev->ib_dev);
+
+err_q_cnt:
+ mlx5_ib_dealloc_q_counters(dev);
+
+err_odp:
+ destroy_dev_resources(&dev->devr);
+
+err_disable_roce:
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET && MLX5_CAP_GEN(mdev, roce))
+ mlx5_nic_vport_disable_roce(mdev);
+err_free_port:
+ kfree(dev->port);
+
+err_dealloc:
+ ib_dealloc_device((struct ib_device *)dev);
+
+ return NULL;
+}
+
+static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
+{
+ struct mlx5_ib_dev *dev = context;
+ int i;
+
+ for (i = 0; i < MLX5_CAP_GEN(mdev, num_ports); i++) {
+ dev->port[i].port_gone = 1;
+ while (dev->port[i].port_gone != 2)
+ pause("W", hz);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
+ device_remove_file(&dev->ib_dev.dev,
+ mlx5_class_attributes[i]);
+ }
+
+ destroy_ports_attrs(dev, dev->num_ports);
+ if (MLX5_CAP_GEN(dev->mdev, port_type) ==
+ MLX5_CAP_PORT_TYPE_IB)
+ cleanup_dc_improvements(dev);
+ mlx5_ib_dealloc_q_counters(dev);
+ ib_unregister_device(&dev->ib_dev);
+ destroy_umrc_res(dev);
+ destroy_dev_resources(&dev->devr);
+
+ if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) ==
+ IB_LINK_LAYER_ETHERNET && MLX5_CAP_GEN(mdev, roce))
+ mlx5_nic_vport_disable_roce(mdev);
+
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
+}
+
+static struct mlx5_interface mlx5_ib_interface = {
+ .add = mlx5_ib_add,
+ .remove = mlx5_ib_remove,
+ .event = mlx5_ib_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_IB,
+};
+
+static int __init mlx5_ib_init(void)
+{
+ int err;
+
+ if (deprecated_prof_sel != 2)
+ printf("mlx5_ib: WARN: ""prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+
+ err = mlx5_register_interface(&mlx5_ib_interface);
+ if (err)
+ goto clean_odp;
+
+ mlx5_ib_wq = create_singlethread_workqueue("mlx5_ib_wq");
+ if (!mlx5_ib_wq) {
+ printf("mlx5_ib: ERR: ""%s: failed to create mlx5_ib_wq\n", __func__);
+ goto err_unreg;
+ }
+
+ return err;
+
+err_unreg:
+ mlx5_unregister_interface(&mlx5_ib_interface);
+
+clean_odp:
+ return err;
+}
+
+static void __exit mlx5_ib_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_wq);
+ mlx5_unregister_interface(&mlx5_ib_interface);
+}
+
+module_init_order(mlx5_ib_init, SI_ORDER_THIRD);
+module_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD);
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,194 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include <linux/module.h>
+#include <rdma/ib_umem.h>
+#include "mlx5_ib.h"
+
+CTASSERT(sizeof(uintptr_t) == sizeof(unsigned long));
+
+/* @umem: umem object to scan
+ * @addr: ib virtual address requested by the user
+ * @count: number of PAGE_SIZE pages covered by umem
+ * @shift: page shift for the compound pages found in the region
+ * @ncont: number of compund pages
+ * @order: log2 of the number of compound pages
+ */
+
+void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
+ int *ncont, int *order)
+{
+ struct ib_umem_chunk *chunk;
+ unsigned long tmp;
+ unsigned long m;
+ int i, j, k;
+ u64 base = 0;
+ int p = 0;
+ int skip;
+ int mask;
+ u64 len;
+ u64 pfn;
+ struct scatterlist *sg;
+ unsigned long page_shift = ilog2(umem->page_size);
+
+ addr = addr >> page_shift;
+ tmp = (uintptr_t)addr;
+ m = find_first_bit(&tmp, 8 * sizeof(tmp));
+ skip = 1 << m;
+ mask = skip - 1;
+ i = 0;
+
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
+ for (j = 0; j < chunk->nmap; j++) {
+ sg = chunk->page_list + j;
+ len = sg_dma_len(sg) >> page_shift;
+ pfn = sg_dma_address(sg) >> page_shift;
+ for (k = 0; k < len; k++) {
+ if (!(i & mask)) {
+ tmp = (uintptr_t)pfn;
+ m = min_t(unsigned long, m,
+ find_first_bit(&tmp, 8 * sizeof(tmp)));
+ skip = 1 << m;
+ mask = skip - 1;
+ base = pfn;
+ p = 0;
+ } else {
+ if (base + p != pfn) {
+ tmp = (uintptr_t)p;
+ m = find_first_bit(&tmp, 8 * sizeof(tmp));
+ skip = 1 << m;
+ mask = skip - 1;
+ base = pfn;
+ p = 0;
+ }
+ }
+ p++;
+ i++;
+ }
+ }
+ }
+
+ if (i) {
+ m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
+
+ if (order)
+ *order = ilog2(roundup_pow_of_two(i) >> m);
+
+ *ncont = DIV_ROUND_UP(i, (1 << m));
+ } else {
+ m = 0;
+
+ if (order)
+ *order = 0;
+
+ *ncont = 0;
+ }
+ *shift = page_shift + m;
+ *count = i;
+}
+
+/*
+ * Populate the given array with bus addresses from the umem.
+ *
+ * dev - mlx5_ib device
+ * umem - umem to use to fill the pages
+ * page_shift - determines the page size used in the resulting array
+ * offset - offset into the umem to start from,
+ * only implemented for ODP umems
+ * num_pages - total number of pages to fill
+ * pas - bus addresses array to fill
+ * access_flags - access flags to set on all present pages.
+ use enum mlx5_ib_mtt_access_flags for this.
+ */
+static void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, size_t offset,
+ __be64 *pas, int access_flags)
+{
+ unsigned long umem_page_shift = ilog2(umem->page_size);
+ struct ib_umem_chunk *chunk;
+ int shift = page_shift - umem_page_shift;
+ int mask = (1 << shift) - 1;
+ int i, j, k;
+ u64 cur = 0;
+ u64 base;
+ int len;
+ struct scatterlist *sg;
+
+ i = 0;
+ list_for_each_entry(chunk, &umem->chunk_list, list) {
+ for (j = 0; j < chunk->nmap; j++) {
+ sg = chunk->page_list + j;
+ len = sg_dma_len(sg) >> umem_page_shift;
+ base = sg_dma_address(sg);
+ for (k = 0; k < len; k++) {
+ if (!(i & mask)) {
+ cur = base + (k << umem_page_shift);
+ cur |= access_flags;
+
+ pas[i >> shift] = cpu_to_be64(cur);
+ mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
+ i >> shift, (unsigned long long)
+ be64_to_cpu(pas[i >> shift]));
+ } else
+ mlx5_ib_dbg(dev, "=====> 0x%llx\n",
+ (unsigned long long)
+ (base + (k << umem_page_shift)));
+ i++;
+ }
+ }
+ }
+}
+
+void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+ int page_shift, __be64 *pas, int access_flags)
+{
+ return __mlx5_ib_populate_pas(dev, umem, page_shift, 0,
+ pas,
+ access_flags);
+}
+
+int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
+{
+ u64 page_size;
+ u64 page_mask;
+ u64 off_size;
+ u64 off_mask;
+ u64 buf_off;
+
+ page_size = (u64)1 << page_shift;
+ page_mask = page_size - 1;
+ buf_off = addr & page_mask;
+ off_size = page_size >> 6;
+ off_mask = off_size - 1;
+
+ if (buf_off & off_mask)
+ return -EINVAL;
+
+ *offset = (u32)(buf_off >> ilog2(off_size));
+ return 0;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,1311 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c 323223 2017-09-06 15:33:23Z hselasky $
+ */
+
+#include <linux/compiler.h>
+#include <linux/kref.h>
+#include <linux/random.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <rdma/ib_umem.h>
+#include "mlx5_ib.h"
+
+CTASSERT((uintptr_t)PAGE_MASK > (uintptr_t)PAGE_SIZE);
+
+enum {
+ MAX_PENDING_REG_MR = 8,
+ MAX_MR_RELEASE_TIMEOUT = (60 * 20) /* Allow release timeout up to 20 min */
+};
+
+#define MLX5_UMR_ALIGN 2048
+
+static int mlx5_mr_sysfs_init(struct mlx5_ib_dev *dev);
+static void mlx5_mr_sysfs_cleanup(struct mlx5_ib_dev *dev);
+
+static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+{
+ int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+
+ return err;
+}
+
+static int order2idx(struct mlx5_ib_dev *dev, int order)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+
+ if (order < cache->ent[0].order)
+ return 0;
+ else
+ return order - cache->ent[0].order;
+}
+
+static void reg_mr_callback(int status, void *context)
+{
+ struct mlx5_ib_mr *mr = context;
+ struct mlx5_ib_dev *dev = mr->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int c = order2idx(dev, mr->order);
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_core_mr *mmr = &mr->mmr;
+ struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
+ unsigned long flags;
+ int err;
+ u8 key;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ ent->pending--;
+ spin_unlock_irqrestore(&ent->lock, flags);
+ if (status) {
+ mlx5_ib_warn(dev, "async reg mr failed. status %d, order %d\n", status, ent->order);
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ if (mr->out.hdr.status) {
+ mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
+ mr->out.hdr.status,
+ be32_to_cpu(mr->out.hdr.syndrome));
+ kfree(mr);
+ dev->fill_delay = 1;
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
+ key = dev->mdev->priv.mkey_key++;
+ spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
+ mmr->key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+ mlx5_ib_dbg(dev, "callbacked mkey 0x%x created\n",
+ be32_to_cpu(mr->out.mkey));
+
+ cache->last_add = jiffies;
+
+ spin_lock_irqsave(&ent->lock, flags);
+ list_add_tail(&mr->list, &ent->head);
+ ent->cur++;
+ ent->size++;
+ spin_unlock_irqrestore(&ent->lock, flags);
+
+ spin_lock_irqsave(&table->lock, flags);
+ err = radix_tree_insert(&table->tree, mlx5_mkey_to_idx(mmr->key), mmr);
+ spin_unlock_irqrestore(&table->lock, flags);
+ if (err) {
+ mlx5_ib_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
+ mmr->key, err);
+ mlx5_core_destroy_mkey(mdev, mmr);
+ }
+}
+
+static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_ib_mr *mr;
+ int npages = 1 << ent->order;
+ int err = 0;
+ int i;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++) {
+ if (ent->pending >= MAX_PENDING_REG_MR) {
+ err = -EAGAIN;
+ break;
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ break;
+ }
+ mr->order = ent->order;
+ mr->umred = 1;
+ mr->dev = dev;
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
+ in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
+ in->seg.log2_page_size = 12;
+
+ spin_lock_irq(&ent->lock);
+ ent->pending++;
+ spin_unlock_irq(&ent->lock);
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
+ sizeof(*in), reg_mr_callback,
+ mr, &mr->out);
+ if (err) {
+ spin_lock_irq(&ent->lock);
+ ent->pending--;
+ spin_unlock_irq(&ent->lock);
+ mlx5_ib_warn(dev, "create mkey failed %d\n", err);
+ kfree(mr);
+ break;
+ }
+ }
+
+ kfree(in);
+ return err;
+}
+
+static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ struct mlx5_ib_mr *mr;
+ int err;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ spin_lock_irq(&ent->lock);
+ if (list_empty(&ent->head)) {
+ spin_unlock_irq(&ent->lock);
+ return;
+ }
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->cur--;
+ ent->size--;
+ spin_unlock_irq(&ent->lock);
+ err = destroy_mkey(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "failed destroy mkey\n");
+ else
+ kfree(mr);
+ }
+}
+
+static int someone_adding(struct mlx5_mr_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ if (cache->ent[i].cur < cache->ent[i].limit)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int someone_releasing(struct mlx5_mr_cache *cache)
+{
+ int i;
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ if (cache->ent[i].cur > 2 * cache->ent[i].limit)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void __cache_work_func(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_ib_dev *dev = ent->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int i = order2idx(dev, ent->order);
+ int err;
+ s64 dtime;
+
+ if (cache->stopped)
+ return;
+
+ ent = &dev->cache.ent[i];
+ if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
+ err = add_keys(dev, i, 1);
+ if (ent->cur < 2 * ent->limit) {
+ if (err == -EAGAIN) {
+ mlx5_ib_dbg(dev, "returned eagain, order %d\n",
+ i + 2);
+ cancel_delayed_work(&ent->dwork);
+ if (!queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(3)))
+ mlx5_ib_warn(dev, "failed queueing delayed work\n");
+ } else if (err) {
+ mlx5_ib_warn(dev, "command failed order %d, err %d\n",
+ i + 2, err);
+ cancel_delayed_work(&ent->dwork);
+ if (!queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(1000)))
+ mlx5_ib_warn(dev, "failed queueing delayed work\n");
+ } else {
+ if (!queue_work(cache->wq, &ent->work))
+ mlx5_ib_warn(dev, "failed queueing work\n");
+ }
+ }
+ } else if (ent->cur > 2 * ent->limit) {
+ dtime = (cache->last_add + (s64)cache->rel_timeout * HZ) - jiffies;
+ if (cache->rel_imm ||
+ (cache->rel_timeout >= 0 && !someone_adding(cache) && dtime <= 0)) {
+ remove_keys(dev, i, 1);
+ if (ent->cur > ent->limit)
+ if (!queue_work(cache->wq, &ent->work))
+ mlx5_ib_warn(dev, "failed queueing work\n");
+ } else if (cache->rel_timeout >= 0) {
+ dtime = max_t(s64, dtime, 0);
+ dtime = min_t(s64, dtime, (MAX_MR_RELEASE_TIMEOUT * HZ));
+ cancel_delayed_work(&ent->dwork);
+ if (!queue_delayed_work(cache->wq, &ent->dwork, dtime))
+ mlx5_ib_warn(dev, "failed queueing delayed work\n");
+ }
+ } else if (cache->rel_imm && !someone_releasing(cache)) {
+ cache->rel_imm = 0;
+ }
+}
+
+static void delayed_cache_work_func(struct work_struct *work)
+{
+ struct mlx5_cache_ent *ent;
+
+ ent = container_of(work, struct mlx5_cache_ent, dwork.work);
+ __cache_work_func(ent);
+}
+
+static void cache_work_func(struct work_struct *work)
+{
+ struct mlx5_cache_ent *ent;
+
+ ent = container_of(work, struct mlx5_cache_ent, work);
+ __cache_work_func(ent);
+}
+
+static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int shrink = 0;
+ int c;
+
+ c = order2idx(dev, mr->order);
+ if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
+ mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
+ return;
+ }
+ ent = &cache->ent[c];
+ spin_lock_irq(&ent->lock);
+ list_add_tail(&mr->list, &ent->head);
+ ent->cur++;
+ if (ent->cur > 2 * ent->limit)
+ shrink = 1;
+ spin_unlock_irq(&ent->lock);
+
+ if (shrink)
+ if (!queue_work(cache->wq, &ent->work))
+ mlx5_ib_warn(dev, "failed queueing work\n");
+}
+
+static void clean_keys(struct mlx5_ib_dev *dev, int c)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ cancel_delayed_work(&ent->dwork);
+ while (1) {
+ spin_lock_irq(&ent->lock);
+ if (list_empty(&ent->head)) {
+ spin_unlock_irq(&ent->lock);
+ return;
+ }
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->cur--;
+ ent->size--;
+ spin_unlock_irq(&ent->lock);
+ err = destroy_mkey(dev, mr);
+ if (err)
+ mlx5_ib_warn(dev, "failed destroy mkey 0x%x from order %d\n",
+ mr->mmr.key, ent->order);
+ else
+ kfree(mr);
+ }
+}
+
+static void delay_time_func(unsigned long ctx)
+{
+ struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+
+ dev->fill_delay = 0;
+}
+
+enum {
+ MLX5_VF_MR_LIMIT = 2,
+};
+
+int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int limit;
+ int err;
+ int i;
+
+ mutex_init(&dev->slow_path_mutex);
+ cache->rel_timeout = 300;
+ cache->wq = create_singlethread_workqueue("mkey_cache");
+ if (!cache->wq) {
+ mlx5_ib_warn(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ setup_timer(&dev->delay_timer, delay_time_func, (uintptr_t)dev);
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ INIT_LIST_HEAD(&cache->ent[i].head);
+ spin_lock_init(&cache->ent[i].lock);
+
+ ent = &cache->ent[i];
+ INIT_LIST_HEAD(&ent->head);
+ spin_lock_init(&ent->lock);
+ ent->order = i + 2;
+ ent->dev = dev;
+
+ if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) {
+ if (mlx5_core_is_pf(dev->mdev))
+ limit = dev->mdev->profile->mr_cache[i].limit;
+ else
+ limit = MLX5_VF_MR_LIMIT;
+ } else {
+ limit = 0;
+ }
+
+ INIT_WORK(&ent->work, cache_work_func);
+ INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
+ ent->limit = limit;
+ if (!queue_work(cache->wq, &ent->work))
+ mlx5_ib_warn(dev, "failed queueing work\n");
+ }
+
+ err = mlx5_mr_sysfs_init(dev);
+ if (err)
+ mlx5_ib_warn(dev, "failed to init mr cache sysfs\n");
+
+ return 0;
+}
+
+static void wait_for_async_commands(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int total = 0;
+ int i;
+ int j;
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ for (j = 0 ; j < 1000; j++) {
+ if (!ent->pending)
+ break;
+ msleep(50);
+ }
+ }
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ total += ent->pending;
+ }
+
+ if (total)
+ mlx5_ib_dbg(dev, "aborted, %d pending requests\n", total);
+ else
+ mlx5_ib_dbg(dev, "done with all pending requests\n");
+}
+
+int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
+{
+ int i;
+
+ dev->cache.stopped = 1;
+ flush_workqueue(dev->cache.wq);
+ mlx5_mr_sysfs_cleanup(dev);
+
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
+ clean_keys(dev, i);
+
+ destroy_workqueue(dev->cache.wq);
+ wait_for_async_commands(dev);
+ del_timer_sync(&dev->delay_timer);
+ return 0;
+}
+
+struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_mkey_seg *seg;
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ seg = &in->seg;
+ seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
+ seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
+ seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ seg->start_addr = 0;
+
+ err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+ NULL);
+ if (err)
+ goto err_in;
+
+ kfree(in);
+ mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.rkey = mr->mmr.key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_in:
+ kfree(in);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+static int get_octo_len(u64 addr, u64 len, u64 page_size)
+{
+ u64 offset;
+ int npages;
+
+ offset = addr & (page_size - 1ULL);
+ npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
+ return (npages + 1) / 2;
+}
+
+void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
+{
+ struct mlx5_ib_umr_context *context;
+ struct ib_wc wc;
+ int err;
+
+ while (1) {
+ err = ib_poll_cq(cq, 1, &wc);
+ if (err < 0) {
+ printf("mlx5_ib: WARN: ""poll cq error %d\n", err);
+ return;
+ }
+ if (err == 0)
+ break;
+
+ context = (struct mlx5_ib_umr_context *)(uintptr_t)wc.wr_id;
+ context->status = wc.status;
+ complete(&context->done);
+ }
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+}
+
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
+ u64 length, struct ib_umem *umem,
+ int npages, int page_shift,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_ib_mr *mr;
+ int inlen;
+ int err;
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
+ }
+ mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+
+ /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ * in the page list submitted with the command. */
+ in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
+ in->seg.flags = convert_access(access_flags) |
+ MLX5_ACCESS_MODE_MTT;
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ in->seg.start_addr = cpu_to_be64(virt_addr);
+ in->seg.len = cpu_to_be64(length);
+ in->seg.bsfs_octo_size = 0;
+ in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
+ in->seg.log2_page_size = page_shift;
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
+ 1 << page_shift));
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
+ NULL, NULL);
+ if (err) {
+ mlx5_ib_warn(dev, "create mkey failed\n");
+ goto err_2;
+ }
+ mr->umem = umem;
+ mr->dev = dev;
+ kvfree(in);
+
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
+
+ return mr;
+
+err_2:
+ kvfree(in);
+
+err_1:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+enum {
+ MLX5_MAX_REG_ORDER = MAX_MR_CACHE_ENTRIES + 1,
+ MLX5_MAX_REG_SIZE = 2ul * 1024 * 1024 * 1024,
+};
+
+static int clean_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int umred = mr->umred;
+ int err;
+ int i;
+
+ if (!umred) {
+ for (i = 0; i < mr->nchild; ++i) {
+ free_cached_mr(dev, mr->children[i]);
+ }
+ kfree(mr->children);
+
+ err = destroy_mkey(dev, mr);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
+ mr->mmr.key, err);
+ return err;
+ }
+ }
+ return 0;
+}
+
+struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata, int mr_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct ib_umem *umem;
+ int page_shift;
+ int npages;
+ int ncont;
+ int order;
+ int err;
+
+ mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ (unsigned long long)start, (unsigned long long)virt_addr,
+ (unsigned long long)length, access_flags);
+ umem = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ if (IS_ERR(umem)) {
+ mlx5_ib_warn(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+ return (void *)umem;
+ }
+
+ mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
+ if (!npages) {
+ mlx5_ib_warn(dev, "avoid zero region\n");
+ err = -EINVAL;
+ goto error;
+ }
+
+ mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
+ npages, ncont, order, page_shift);
+
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, access_flags);
+ mutex_unlock(&dev->slow_path_mutex);
+
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ mr = NULL;
+ goto error;
+ }
+
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
+
+ mr->umem = umem;
+ mr->npages = npages;
+ atomic_add(npages, &dev->mdev->priv.reg_pages);
+ mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.rkey = mr->mmr.key;
+
+ return &mr->ibmr;
+
+error:
+ /*
+ * Destroy the umem *before* destroying the MR, to ensure we
+ * will not have any in-flight notifiers when destroying the
+ * MR.
+ *
+ * As the MR is completely invalid to begin with, and this
+ * error path is only taken if we can't push the mr entry into
+ * the pagefault tree, this is safe.
+ */
+
+ ib_umem_release(umem);
+ return ERR_PTR(err);
+}
+
+CTASSERT(sizeof(((struct ib_phys_buf *)0)->size) == 8);
+
+struct ib_mr *
+mlx5_ib_reg_phys_mr(struct ib_pd *pd,
+ struct ib_phys_buf *buffer_list,
+ int num_phys_buf,
+ int access_flags,
+ u64 *virt_addr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_ib_mr *mr;
+ u64 total_size;
+ u32 octo_len;
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
+ unsigned long mask;
+ int shift;
+ int npages;
+ int inlen;
+ int err;
+ int i, j, n;
+
+ mask = buffer_list[0].addr ^ *virt_addr;
+ total_size = 0;
+ for (i = 0; i < num_phys_buf; ++i) {
+ if (i != 0)
+ mask |= buffer_list[i].addr;
+ if (i != num_phys_buf - 1)
+ mask |= buffer_list[i].addr + buffer_list[i].size;
+
+ total_size += buffer_list[i].size;
+ }
+
+ if (mask & ~PAGE_MASK)
+ return ERR_PTR(-EINVAL);
+
+ shift = __ffs(mask | 1 << 31);
+
+ buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
+ buffer_list[0].addr &= ~0ULL << shift;
+
+ npages = 0;
+ for (i = 0; i < num_phys_buf; ++i)
+ npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
+
+ if (!npages) {
+ mlx5_ib_warn(dev, "avoid zero region\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof *mr, GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ octo_len = get_octo_len(*virt_addr, total_size, 1ULL << shift);
+ octo_len = ALIGN(octo_len, 4);
+
+ inlen = sizeof(*in) + (octo_len * 16);
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(mr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ n = 0;
+ for (i = 0; i < num_phys_buf; ++i) {
+ for (j = 0;
+ j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
+ ++j) {
+ u64 temp = buffer_list[i].addr + ((u64) j << shift);
+ if (pg_cap)
+ temp |= MLX5_IB_MTT_PRESENT;
+ in->pas[n++] = cpu_to_be64(temp);
+ }
+ }
+
+ /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
+ * in the page list submitted with the command. */
+ in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
+ in->seg.flags = convert_access(access_flags) |
+ MLX5_ACCESS_MODE_MTT;
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ in->seg.start_addr = cpu_to_be64(*virt_addr);
+ in->seg.len = cpu_to_be64(total_size);
+ in->seg.bsfs_octo_size = 0;
+ in->seg.xlt_oct_size = cpu_to_be32(octo_len);
+ in->seg.log2_page_size = shift;
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ in->xlat_oct_act_size = cpu_to_be32(octo_len);
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
+ NULL, NULL);
+ mr->umem = NULL;
+ mr->dev = dev;
+ mr->npages = npages;
+ mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.rkey = mr->mmr.key;
+
+ kvfree(in);
+
+ if (err) {
+ kfree(mr);
+ return ERR_PTR(err);
+ }
+ return &mr->ibmr;
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct ib_umem *umem = mr->umem;
+ int npages = mr->npages;
+ int umred = mr->umred;
+ int err;
+
+ err = clean_mr(mr);
+ if (err)
+ return err;
+
+ if (umem) {
+ ib_umem_release(umem);
+ atomic_sub(npages, &dev->mdev->priv.reg_pages);
+ }
+
+ if (umred)
+ free_cached_mr(dev, mr);
+ else
+ kfree(mr);
+
+ return 0;
+}
+
+int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int err;
+
+ if (mr->sig) {
+ if (mlx5_core_destroy_psv(dev->mdev,
+ mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev,
+ mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+ kfree(mr->sig);
+ }
+
+ err = destroy_mkey(dev, mr);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
+ mr->mmr.key, err);
+ return err;
+ }
+
+ kfree(mr);
+
+ return err;
+}
+
+struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
+ int max_page_list_len)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ in->seg.status = MLX5_MKEY_STATUS_FREE;
+ in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
+ in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+ /*
+ * TBD not needed - issue 197292 */
+ in->seg.log2_page_size = PAGE_SHIFT;
+
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+ NULL, NULL);
+ kfree(in);
+ if (err) {
+ mlx5_ib_warn(dev, "failed create mkey\n");
+ goto err_free;
+ }
+
+ mr->ibmr.lkey = mr->mmr.key;
+ mr->ibmr.rkey = mr->mmr.key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
+ int page_list_len)
+{
+ struct mlx5_ib_fast_reg_page_list *mfrpl;
+ int size = page_list_len * sizeof(u64);
+
+ mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
+ if (!mfrpl)
+ return ERR_PTR(-ENOMEM);
+
+ mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
+ if (!mfrpl->ibfrpl.page_list)
+ goto err_free;
+
+ mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
+ size, &mfrpl->map,
+ GFP_KERNEL);
+ if (!mfrpl->mapped_page_list)
+ goto err_free;
+
+ WARN_ON(mfrpl->map & 0x3f);
+
+ return &mfrpl->ibfrpl;
+
+err_free:
+ kfree(mfrpl->ibfrpl.page_list);
+ kfree(mfrpl);
+ return ERR_PTR(-ENOMEM);
+}
+
+void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
+{
+ struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
+ struct mlx5_ib_dev *dev = to_mdev(page_list->device);
+ int size = page_list->max_page_list_len * sizeof(u64);
+
+ dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
+ mfrpl->map);
+ kfree(mfrpl->ibfrpl.page_list);
+ kfree(mfrpl);
+}
+
+struct order_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct cache_order *, struct order_attribute *, char *buf);
+ ssize_t (*store)(struct cache_order *, struct order_attribute *,
+ const char *buf, size_t count);
+};
+
+static ssize_t cur_show(struct cache_order *co, struct order_attribute *oa,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", ent->cur);
+ return err;
+}
+
+static ssize_t limit_show(struct cache_order *co, struct order_attribute *oa,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", ent->limit);
+ return err;
+}
+
+static ssize_t limit_store(struct cache_order *co, struct order_attribute *oa,
+ const char *buf, size_t count)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ u32 var;
+ int err;
+
+#define kstrtouint(a,b,c) ({*(c) = strtol(a,0,b); 0;})
+#define kstrtoint(a,b,c) ({*(c) = strtol(a,0,b); 0;})
+
+ if (kstrtouint(buf, 0, &var))
+ return -EINVAL;
+
+ if (var > ent->size)
+ return -EINVAL;
+
+ ent->limit = var;
+
+ if (ent->cur < ent->limit) {
+ err = add_keys(dev, co->index, 2 * ent->limit - ent->cur);
+ if (err)
+ return err;
+ }
+
+ return count;
+}
+
+static ssize_t miss_show(struct cache_order *co, struct order_attribute *oa,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", ent->miss);
+ return err;
+}
+
+static ssize_t miss_store(struct cache_order *co, struct order_attribute *oa,
+ const char *buf, size_t count)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ u32 var;
+
+ if (kstrtouint(buf, 0, &var))
+ return -EINVAL;
+
+ if (var != 0)
+ return -EINVAL;
+
+ ent->miss = var;
+
+ return count;
+}
+
+static ssize_t size_show(struct cache_order *co, struct order_attribute *oa,
+ char *buf)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", ent->size);
+ return err;
+}
+
+static ssize_t size_store(struct cache_order *co, struct order_attribute *oa,
+ const char *buf, size_t count)
+{
+ struct mlx5_ib_dev *dev = co->dev;
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[co->index];
+ u32 var;
+ int err;
+
+ if (kstrtouint(buf, 0, &var))
+ return -EINVAL;
+
+ if (var < ent->limit)
+ return -EINVAL;
+
+ if (var > ent->size) {
+ do {
+ err = add_keys(dev, co->index, var - ent->size);
+ if (err && err != -EAGAIN)
+ return err;
+
+ usleep_range(3000, 5000);
+ } while (err);
+ } else if (var < ent->size) {
+ remove_keys(dev, co->index, ent->size - var);
+ }
+
+ return count;
+}
+
+static ssize_t order_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct order_attribute *oa =
+ container_of(attr, struct order_attribute, attr);
+ struct cache_order *co = container_of(kobj, struct cache_order, kobj);
+
+ if (!oa->show)
+ return -EIO;
+
+ return oa->show(co, oa, buf);
+}
+
+static ssize_t order_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t size)
+{
+ struct order_attribute *oa =
+ container_of(attr, struct order_attribute, attr);
+ struct cache_order *co = container_of(kobj, struct cache_order, kobj);
+
+ if (!oa->store)
+ return -EIO;
+
+ return oa->store(co, oa, buf, size);
+}
+
+static const struct sysfs_ops order_sysfs_ops = {
+ .show = order_attr_show,
+ .store = order_attr_store,
+};
+
+#define ORDER_ATTR(_name) struct order_attribute order_attr_##_name = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+#define ORDER_ATTR_RO(_name) struct order_attribute order_attr_##_name = \
+ __ATTR(_name, 0444, _name##_show, NULL)
+
+static ORDER_ATTR_RO(cur);
+static ORDER_ATTR(limit);
+static ORDER_ATTR(miss);
+static ORDER_ATTR(size);
+
+static struct attribute *order_default_attrs[] = {
+ &order_attr_cur.attr,
+ &order_attr_limit.attr,
+ &order_attr_miss.attr,
+ &order_attr_size.attr,
+ NULL
+};
+
+static struct kobj_type order_type = {
+ .sysfs_ops = &order_sysfs_ops,
+ .default_attrs = order_default_attrs
+};
+
+
+
+struct cache_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct mlx5_ib_dev *dev, char *buf);
+ ssize_t (*store)(struct mlx5_ib_dev *dev, const char *buf, size_t count);
+};
+
+static ssize_t rel_imm_show(struct mlx5_ib_dev *dev, char *buf)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", cache->rel_imm);
+ return err;
+}
+
+static ssize_t rel_imm_store(struct mlx5_ib_dev *dev, const char *buf, size_t count)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ u32 var;
+ int i;
+ int found = 0;
+
+ if (kstrtouint(buf, 0, &var))
+ return -EINVAL;
+
+ if (var > 1)
+ return -EINVAL;
+
+ if (var == cache->rel_imm)
+ return count;
+
+ cache->rel_imm = var;
+ if (cache->rel_imm == 1) {
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ if (cache->ent[i].cur > 2 * cache->ent[i].limit) {
+ queue_work(cache->wq, &cache->ent[i].work);
+ found = 1;
+ }
+ }
+ if (!found)
+ cache->rel_imm = 0;
+ }
+
+ return count;
+}
+static ssize_t rel_timeout_show(struct mlx5_ib_dev *dev, char *buf)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int err;
+
+ err = snprintf(buf, 20, "%d\n", cache->rel_timeout);
+ return err;
+}
+
+static ssize_t rel_timeout_store(struct mlx5_ib_dev *dev, const char *buf, size_t count)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ int var;
+ int i;
+
+ if (kstrtoint(buf, 0, &var))
+ return -EINVAL;
+
+ if (var < -1 || var > MAX_MR_RELEASE_TIMEOUT)
+ return -EINVAL;
+
+ if (var == cache->rel_timeout)
+ return count;
+
+ if (cache->rel_timeout == -1 || (var < cache->rel_timeout && var != -1)) {
+ cache->rel_timeout = var;
+ for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
+ if (cache->ent[i].cur > 2 * cache->ent[i].limit)
+ queue_work(cache->wq, &cache->ent[i].work);
+ }
+ } else {
+ cache->rel_timeout = var;
+ }
+
+ return count;
+}
+
+static ssize_t cache_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cache_attribute *ca =
+ container_of(attr, struct cache_attribute, attr);
+ struct mlx5_ib_dev *dev = container_of(kobj, struct mlx5_ib_dev, mr_cache);
+
+ if (!ca->show)
+ return -EIO;
+
+ return ca->show(dev, buf);
+}
+
+static ssize_t cache_attr_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t size)
+{
+ struct cache_attribute *ca =
+ container_of(attr, struct cache_attribute, attr);
+ struct mlx5_ib_dev *dev = container_of(kobj, struct mlx5_ib_dev, mr_cache);
+
+ if (!ca->store)
+ return -EIO;
+
+ return ca->store(dev, buf, size);
+}
+
+static const struct sysfs_ops cache_sysfs_ops = {
+ .show = cache_attr_show,
+ .store = cache_attr_store,
+};
+
+#define CACHE_ATTR(_name) struct cache_attribute cache_attr_##_name = \
+ __ATTR(_name, 0644, _name##_show, _name##_store)
+
+static CACHE_ATTR(rel_imm);
+static CACHE_ATTR(rel_timeout);
+
+static struct attribute *cache_default_attrs[] = {
+ &cache_attr_rel_imm.attr,
+ &cache_attr_rel_timeout.attr,
+ NULL
+};
+
+static struct kobj_type cache_type = {
+ .sysfs_ops = &cache_sysfs_ops,
+ .default_attrs = cache_default_attrs
+};
+
+static int mlx5_mr_sysfs_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct device *device = &dev->ib_dev.dev;
+ struct cache_order *co;
+ int o;
+ int i;
+ int err;
+
+ err = kobject_init_and_add(&dev->mr_cache, &cache_type,
+ &device->kobj, "mr_cache");
+ if (err)
+ return -ENOMEM;
+
+ for (o = 2, i = 0; i < MAX_MR_CACHE_ENTRIES; o++, i++) {
+ co = &cache->ent[i].co;
+ co->order = o;
+ co->index = i;
+ co->dev = dev;
+ err = kobject_init_and_add(&co->kobj, &order_type,
+ &dev->mr_cache, "%d", o);
+ if (err)
+ goto err_put;
+ }
+
+ return 0;
+
+err_put:
+ for (; i >= 0; i--) {
+ co = &cache->ent[i].co;
+ kobject_put(&co->kobj);
+ }
+ kobject_put(&dev->mr_cache);
+
+ return err;
+}
+
+static void mlx5_mr_sysfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mr_cache *cache = &dev->cache;
+ struct cache_order *co;
+ int i;
+
+ for (i = MAX_MR_CACHE_ENTRIES - 1; i >= 0; i--) {
+ co = &cache->ent[i].co;
+ kobject_put(&co->kobj);
+ }
+ kobject_put(&dev->mr_cache);
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_mr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,2992 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c 325611 2017-11-09 19:00:11Z hselasky $
+ */
+
+#include <linux/module.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_umem.h>
+#include "mlx5_ib.h"
+#include "user.h"
+#include <dev/mlx5/mlx5_core/transobj.h>
+#include <sys/priv.h>
+
+#define IPV6_DEFAULT_HOPLIMIT 64
+
+
+static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ enum ib_qp_state cur_state, enum ib_qp_state new_state);
+
+/* not supported currently */
+static int workqueue_signature;
+
+enum {
+ MLX5_IB_ACK_REQ_FREQ = 8,
+};
+
+enum {
+ MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
+ MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
+ MLX5_IB_LINK_TYPE_IB = 0,
+ MLX5_IB_LINK_TYPE_ETH = 1
+};
+
+enum {
+ MLX5_IB_SQ_STRIDE = 6,
+ MLX5_IB_CACHE_LINE_SIZE = 64,
+};
+
+enum {
+ MLX5_RQ_NUM_STATE = MLX5_RQC_STATE_ERR + 1,
+ MLX5_SQ_NUM_STATE = MLX5_SQC_STATE_ERR + 1,
+ MLX5_QP_STATE = MLX5_QP_NUM_STATE + 1,
+ MLX5_QP_STATE_BAD = MLX5_QP_STATE + 1,
+};
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+};
+
+struct umr_wr {
+ u64 virt_addr;
+ struct ib_pd *pd;
+ unsigned int page_shift;
+ unsigned int npages;
+ u32 length;
+ int access_flags;
+ u32 mkey;
+};
+
+static int is_qp0(enum ib_qp_type qp_type)
+{
+ return qp_type == IB_QPT_SMI;
+}
+
+static int is_qp1(enum ib_qp_type qp_type)
+{
+ return qp_type == IB_QPT_GSI;
+}
+
+static int is_sqp(enum ib_qp_type qp_type)
+{
+ return is_qp0(qp_type) || is_qp1(qp_type);
+}
+
+static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
+{
+ return mlx5_buf_offset(&qp->buf, offset);
+}
+
+static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
+{
+ return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
+}
+
+void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
+{
+ return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
+}
+
+
+static int
+query_wqe_idx(struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ struct mlx5_query_qp_mbox_out *outb;
+ struct mlx5_qp_context *context;
+ int ret;
+
+ outb = kzalloc(sizeof(*outb), GFP_KERNEL);
+ if (!outb)
+ return -ENOMEM;
+
+ context = &outb->ctx;
+
+ mutex_lock(&qp->mutex);
+ ret = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
+ if (ret)
+ goto out_free;
+
+ ret = be16_to_cpu(context->hw_sq_wqe_counter) & (qp->sq.wqe_cnt - 1);
+
+out_free:
+ mutex_unlock(&qp->mutex);
+ kfree(outb);
+
+ return ret;
+}
+
+static int mlx5_handle_sig_pipelining(struct mlx5_ib_qp *qp)
+{
+ int wqe_idx;
+
+ wqe_idx = query_wqe_idx(qp);
+ if (wqe_idx < 0) {
+ printf("mlx5_ib: ERR: ""Failed to query QP 0x%x wqe index\n", qp->mqp.qpn);
+ return wqe_idx;
+ }
+
+ if (qp->sq.swr_ctx[wqe_idx].sig_piped) {
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ struct mlx5_wqe_ctrl_seg *cwqe;
+
+ cwqe = mlx5_get_send_wqe(qp, wqe_idx);
+ cwqe->opmod_idx_opcode = cpu_to_be32(be32_to_cpu(cwqe->opmod_idx_opcode) & 0xffffff00);
+ qp->sq.swr_ctx[wqe_idx].w_list.opcode |= MLX5_OPCODE_SIGNATURE_CANCELED;
+ mlx5_ib_dbg(dev, "Cancel QP 0x%x wqe_index 0x%x\n",
+ qp->mqp.qpn, wqe_idx);
+ }
+
+ return 0;
+}
+
+static void mlx5_ib_sqd_work(struct work_struct *work)
+{
+ struct mlx5_ib_sqd *sqd;
+ struct mlx5_ib_qp *qp;
+ struct ib_qp_attr qp_attr;
+
+ sqd = container_of(work, struct mlx5_ib_sqd, work);
+ qp = sqd->qp;
+
+ if (mlx5_handle_sig_pipelining(qp))
+ goto out;
+
+ mutex_lock(&qp->mutex);
+ if (__mlx5_ib_modify_qp(&qp->ibqp, &qp_attr, 0, IB_QPS_SQD, IB_QPS_RTS))
+ printf("mlx5_ib: ERR: ""Failed to resume QP 0x%x\n", qp->mqp.qpn);
+ mutex_unlock(&qp->mutex);
+out:
+ kfree(sqd);
+}
+
+static void mlx5_ib_sigerr_sqd_event(struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_sqd *sqd;
+
+ sqd = kzalloc(sizeof(*sqd), GFP_ATOMIC);
+ if (!sqd)
+ return;
+
+ sqd->qp = qp;
+ INIT_WORK(&sqd->work, mlx5_ib_sqd_work);
+ queue_work(mlx5_ib_wq, &sqd->work);
+}
+
+static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
+{
+ struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
+ struct ib_event event;
+
+ if (type == MLX5_EVENT_TYPE_SQ_DRAINED &&
+ to_mibqp(qp)->state != IB_QPS_SQD) {
+ mlx5_ib_sigerr_sqd_event(to_mibqp(qp));
+ return;
+ }
+
+ if (type == MLX5_EVENT_TYPE_PATH_MIG)
+ to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX5_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ printf("mlx5_ib: WARN: ""mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
+ return;
+ }
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
+ int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
+{
+ int wqe_size;
+ int wq_size;
+
+ /* Sanity check RQ size before proceeding */
+ if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ if (!has_rq) {
+ qp->rq.max_gs = 0;
+ qp->rq.wqe_cnt = 0;
+ qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
+ } else {
+ if (ucmd) {
+ qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_post = qp->rq.wqe_cnt;
+ } else {
+ wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
+ wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
+ wqe_size = roundup_pow_of_two(wqe_size);
+ wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
+ wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
+ qp->rq.wqe_cnt = wq_size / wqe_size;
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
+ mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
+ wqe_size,
+ MLX5_CAP_GEN(dev->mdev,
+ max_wqe_sz_rq));
+ return -EINVAL;
+ }
+ qp->rq.wqe_shift = ilog2(wqe_size);
+ qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ qp->rq.max_post = qp->rq.wqe_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int sq_overhead(enum ib_qp_type qp_type)
+{
+ int size = 0;
+
+ switch (qp_type) {
+ case IB_QPT_XRC_INI:
+ size += sizeof(struct mlx5_wqe_xrc_seg);
+ /* fall through */
+ case IB_QPT_RC:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
+ break;
+
+ case IB_QPT_XRC_TGT:
+ return 0;
+
+ case IB_QPT_UC:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
+ break;
+
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_datagram_seg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static int calc_send_wqe(struct ib_qp_init_attr *attr)
+{
+ int inl_size = 0;
+ int size;
+
+ size = sq_overhead(attr->qp_type);
+ if (size < 0)
+ return size;
+
+ if (attr->cap.max_inline_data) {
+ inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
+ attr->cap.max_inline_data;
+ }
+
+ size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
+ return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
+}
+
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+ int max_sge;
+
+ if (attr->qp_type == IB_QPT_RC)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else if (attr->qp_type == IB_QPT_XRC_INI)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_xrc_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else
+ max_sge = (wqe_size - sq_overhead(attr->qp_type)) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ return min_t(int, max_sge, wqe_size - sq_overhead(attr->qp_type) /
+ sizeof(struct mlx5_wqe_data_seg));
+}
+
+static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ struct mlx5_ib_qp *qp)
+{
+ int wqe_size;
+ int wq_size;
+
+ if (!attr->cap.max_send_wr)
+ return 0;
+
+ wqe_size = calc_send_wqe(attr);
+ mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
+ if (wqe_size < 0)
+ return wqe_size;
+
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
+ mlx5_ib_warn(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+ wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
+ return -EINVAL;
+ }
+
+ qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
+ sizeof(struct mlx5_wqe_inline_seg);
+ attr->cap.max_inline_data = qp->max_inline_data;
+
+ wq_size = roundup_pow_of_two(attr->cap.max_send_wr * (u64)wqe_size);
+ qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
+ mlx5_ib_warn(dev, "wqe count(%d) exceeds limits(%d)\n",
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
+ return -ENOMEM;
+ }
+ qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+ qp->sq.max_gs = get_send_sge(attr, wqe_size);
+ if (qp->sq.max_gs < attr->cap.max_send_sge) {
+ mlx5_ib_warn(dev, "max sge(%d) exceeds limits(%d)\n",
+ qp->sq.max_gs, attr->cap.max_send_sge);
+ return -ENOMEM;
+ }
+
+ attr->cap.max_send_sge = qp->sq.max_gs;
+ qp->sq.max_post = wq_size / wqe_size;
+ attr->cap.max_send_wr = qp->sq.max_post;
+
+ return wq_size;
+}
+
+static int set_user_buf_size(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_ib_create_qp *ucmd,
+ struct ib_qp_init_attr *attr)
+{
+ int desc_sz = 1 << qp->sq.wqe_shift;
+
+ if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
+ mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
+ desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
+ return -EINVAL;
+ }
+
+ if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
+ mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
+ ucmd->sq_wqe_count, ucmd->sq_wqe_count);
+ return -EINVAL;
+ }
+
+ qp->sq.wqe_cnt = ucmd->sq_wqe_count;
+
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
+ mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
+ return -EINVAL;
+ }
+
+
+ if (attr->qp_type == IB_QPT_RAW_PACKET) {
+ qp->buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+ qp->sq_buf_size = qp->sq.wqe_cnt << 6;
+ } else {
+ qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << 6);
+ qp->sq_buf_size = 0;
+ }
+
+ return 0;
+}
+
+static int qp_has_rq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
+ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
+}
+
+static int first_med_uuar(void)
+{
+ return 1;
+}
+
+static int next_uuar(int n)
+{
+ n++;
+
+ while (((n % 4) & 2))
+ n++;
+
+ return n;
+}
+
+static int num_med_uuar(struct mlx5_uuar_info *uuari)
+{
+ int n;
+
+ n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
+ uuari->num_low_latency_uuars - 1;
+
+ return n >= 0 ? n : 0;
+}
+
+static int max_uuari(struct mlx5_uuar_info *uuari)
+{
+ return uuari->num_uars * 4;
+}
+
+static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+{
+ int med;
+ int i;
+ int t;
+
+ med = num_med_uuar(uuari);
+ for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
+ t++;
+ if (t == med)
+ return next_uuar(i);
+ }
+
+ return 0;
+}
+
+static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
+{
+ int i;
+
+ for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
+ if (!test_bit(i, uuari->bitmap)) {
+ set_bit(i, uuari->bitmap);
+ uuari->count[i]++;
+ return i;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
+{
+ int minidx = first_med_uuar();
+ int i;
+
+ for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
+ if (uuari->count[i] < uuari->count[minidx])
+ minidx = i;
+ }
+
+ uuari->count[minidx]++;
+
+ return minidx;
+}
+
+static int alloc_uuar(struct mlx5_uuar_info *uuari,
+ enum mlx5_ib_latency_class lat)
+{
+ int uuarn = -EINVAL;
+
+ mutex_lock(&uuari->lock);
+ switch (lat) {
+ case MLX5_IB_LATENCY_CLASS_LOW:
+ uuarn = 0;
+ uuari->count[uuarn]++;
+ break;
+
+ case MLX5_IB_LATENCY_CLASS_MEDIUM:
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
+ break;
+
+ case MLX5_IB_LATENCY_CLASS_HIGH:
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
+ break;
+
+ case MLX5_IB_LATENCY_CLASS_FAST_PATH:
+ uuarn = 2;
+ break;
+ }
+ mutex_unlock(&uuari->lock);
+
+ return uuarn;
+}
+
+static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+{
+ clear_bit(uuarn, uuari->bitmap);
+ --uuari->count[uuarn];
+}
+
+static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+{
+ clear_bit(uuarn, uuari->bitmap);
+ --uuari->count[uuarn];
+}
+
+static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+{
+ int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
+ int high_uuar = nuuars - uuari->num_low_latency_uuars;
+
+ mutex_lock(&uuari->lock);
+ if (uuarn == 0) {
+ --uuari->count[uuarn];
+ goto out;
+ }
+
+ if (uuarn < high_uuar) {
+ free_med_class_uuar(uuari, uuarn);
+ goto out;
+ }
+
+ free_high_class_uuar(uuari, uuarn);
+
+out:
+ mutex_unlock(&uuari->lock);
+}
+
+static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET: return MLX5_QP_STATE_RST;
+ case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
+ case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
+ case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
+ case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
+ case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
+ case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
+ default: return -1;
+ }
+}
+
+static int to_mlx5_st(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_RC: return MLX5_QP_ST_RC;
+ case IB_QPT_UC: return MLX5_QP_ST_UC;
+ case IB_QPT_UD: return MLX5_QP_ST_UD;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
+ case IB_QPT_SMI: return MLX5_QP_ST_QP0;
+ case IB_QPT_GSI: return MLX5_QP_ST_QP1;
+ case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
+ case IB_QPT_MAX:
+ default: return -EINVAL;
+ }
+}
+
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+
+static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
+{
+ return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
+}
+
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr,
+ struct mlx5_create_qp_mbox_in **in,
+ int *inlen,
+ struct mlx5_exp_ib_create_qp *ucmd)
+{
+ struct mlx5_exp_ib_create_qp_resp resp;
+ struct mlx5_ib_ucontext *context;
+ int page_shift = 0;
+ int uar_index;
+ int npages;
+ u32 offset = 0;
+ int uuarn;
+ int ncont = 0;
+ int err;
+
+ context = to_mucontext(pd->uobject->context);
+ memset(&resp, 0, sizeof(resp));
+ resp.size_of_prefix = offsetof(struct mlx5_exp_ib_create_qp_resp, prefix_reserved);
+ /*
+ * TBD: should come from the verbs when we have the API
+ */
+ if (ucmd->exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_WC_UAR_IDX) {
+ if (ucmd->exp.wc_uar_index == MLX5_EXP_CREATE_QP_DB_ONLY_UUAR) {
+ /* Assign LATENCY_CLASS_LOW (DB only UUAR) to this QP */
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ if (uuarn < 0) {
+ mlx5_ib_warn(dev, "DB only uuar allocation failed\n");
+ return uuarn;
+ }
+ uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
+ } else if (ucmd->exp.wc_uar_index >= MLX5_IB_MAX_CTX_DYNAMIC_UARS ||
+ context->dynamic_wc_uar_index[ucmd->exp.wc_uar_index] ==
+ MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_warn(dev, "dynamic uuar allocation failed\n");
+ return -EINVAL;
+ } else {
+ uar_index = context->dynamic_wc_uar_index[ucmd->exp.wc_uar_index];
+ uuarn = MLX5_EXP_INVALID_UUAR;
+ }
+ } else {
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
+ if (uuarn < 0) {
+ mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
+ mlx5_ib_dbg(dev, "reverting to medium latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
+ if (uuarn < 0) {
+ mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
+ mlx5_ib_dbg(dev, "reverting to high latency\n");
+ uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
+ if (uuarn < 0) {
+ mlx5_ib_warn(dev, "uuar allocation failed\n");
+ return uuarn;
+ }
+ }
+ }
+ uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
+ }
+ mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
+
+ qp->rq.offset = 0;
+ qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+ qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+
+ err = set_user_buf_size(dev, qp, (struct mlx5_ib_create_qp *)ucmd, attr);
+ if (err)
+ goto err_uuar;
+
+ if (ucmd->buf_addr && qp->buf_size) {
+ qp->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
+ qp->buf_size, 0, 0);
+ if (IS_ERR(qp->umem)) {
+ mlx5_ib_warn(dev, "umem_get failed\n");
+ err = PTR_ERR(qp->umem);
+ goto err_uuar;
+ }
+ } else {
+ qp->umem = NULL;
+ }
+
+ if (qp->umem) {
+ mlx5_ib_cont_pages(qp->umem, ucmd->buf_addr, &npages, &page_shift,
+ &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, &offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+ mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, qp->buf_size,
+ npages, page_shift, ncont, offset);
+ }
+
+ *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
+ *in = mlx5_vzalloc(*inlen);
+ if (!*in) {
+ err = -ENOMEM;
+ goto err_umem;
+ }
+ if (qp->umem)
+ mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
+ (*in)->ctx.log_pg_sz_remote_qpn =
+ cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+ (*in)->ctx.params2 = cpu_to_be32(offset << 6);
+
+ (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
+ resp.uuar_index = uuarn;
+ qp->uuarn = uuarn;
+
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
+ if (err) {
+ mlx5_ib_warn(dev, "map failed\n");
+ goto err_free;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, sizeof(struct mlx5_ib_create_qp_resp));
+ if (err) {
+ mlx5_ib_err(dev, "copy failed\n");
+ goto err_unmap;
+ }
+ qp->create_type = MLX5_QP_USER;
+
+ return 0;
+
+err_unmap:
+ mlx5_ib_db_unmap_user(context, &qp->db);
+
+err_free:
+ kvfree(*in);
+
+err_umem:
+ if (qp->umem)
+ ib_umem_release(qp->umem);
+
+err_uuar:
+ free_uuar(&context->uuari, uuarn);
+ return err;
+}
+
+static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_ucontext *context;
+
+ context = to_mucontext(pd->uobject->context);
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ if (qp->umem)
+ ib_umem_release(qp->umem);
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ /*
+ * Free only the UUARs handled by the kernel.
+ * UUARs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->uuarn != MLX5_EXP_INVALID_UUAR)
+ free_uuar(&context->uuari, qp->uuarn);
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_mbox_in **in, int *inlen)
+{
+ enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
+ struct mlx5_uuar_info *uuari;
+ int uar_index;
+ int uuarn;
+ int err;
+
+ uuari = &dev->mdev->priv.uuari;
+ if (init_attr->create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
+ return -EINVAL;
+
+ uuarn = alloc_uuar(uuari, lc);
+ if (uuarn < 0) {
+ mlx5_ib_warn(dev, "\n");
+ return -ENOMEM;
+ }
+
+ qp->bf = &uuari->bfs[uuarn];
+ uar_index = qp->bf->uar->index;
+
+ err = calc_sq_size(dev, init_attr, qp);
+ if (err < 0) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto err_uuar;
+ }
+
+ qp->rq.offset = 0;
+ qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+ qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
+
+ err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto err_uuar;
+ }
+
+ qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
+ *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
+ *in = mlx5_vzalloc(*inlen);
+ if (!*in) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+ (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
+ (*in)->ctx.log_pg_sz_remote_qpn =
+ cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
+ /* Set "fast registration enabled" for all kernel QPs */
+ (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
+ (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
+
+ mlx5_fill_page_array(&qp->buf, (*in)->pas);
+
+ err = mlx5_db_alloc(dev->mdev, &qp->db);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto err_free;
+ }
+
+ qp->sq.swr_ctx = kcalloc(qp->sq.wqe_cnt, sizeof(*qp->sq.swr_ctx),
+ GFP_KERNEL);
+ qp->rq.rwr_ctx = kcalloc(qp->rq.wqe_cnt, sizeof(*qp->rq.rwr_ctx),
+ GFP_KERNEL);
+ if (!qp->sq.swr_ctx || !qp->rq.rwr_ctx) {
+ err = -ENOMEM;
+ goto err_wrid;
+ }
+ qp->create_type = MLX5_QP_KERNEL;
+
+ return 0;
+
+err_wrid:
+ mlx5_db_free(dev->mdev, &qp->db);
+ kfree(qp->sq.swr_ctx);
+ kfree(qp->rq.rwr_ctx);
+
+err_free:
+ kvfree(*in);
+
+err_buf:
+ mlx5_buf_free(dev->mdev, &qp->buf);
+
+err_uuar:
+ free_uuar(&dev->mdev->priv.uuari, uuarn);
+ return err;
+}
+
+static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ mlx5_db_free(dev->mdev, &qp->db);
+ kfree(qp->sq.swr_ctx);
+ kfree(qp->rq.rwr_ctx);
+ mlx5_buf_free(dev->mdev, &qp->buf);
+ free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
+}
+
+static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+ enum ib_qp_type qt = attr->qp_type;
+
+ if (attr->srq || (qt == IB_QPT_XRC_TGT) || (qt == IB_QPT_XRC_INI))
+ return cpu_to_be32(MLX5_SRQ_RQ);
+ else if (!qp->has_rq)
+ return cpu_to_be32(MLX5_ZERO_LEN_RQ);
+ else
+ return cpu_to_be32(MLX5_NON_ZERO_RQ);
+}
+
+static int is_connected(enum ib_qp_type qp_type)
+{
+ if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
+ return 1;
+
+ return 0;
+}
+
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
+{
+ switch (qp_type) {
+ case IB_QPT_XRC_TGT:
+ *send_cq = NULL;
+ *recv_cq = NULL;
+ break;
+ case IB_QPT_XRC_INI:
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = NULL;
+ break;
+
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ case IB_QPT_RAW_IPV6:
+ case IB_QPT_RAW_ETHERTYPE:
+ case IB_QPT_RAW_PACKET:
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
+ break;
+
+ case IB_QPT_MAX:
+ default:
+ *send_cq = NULL;
+ *recv_cq = NULL;
+ break;
+ }
+}
+
+enum {
+ MLX5_QP_END_PAD_MODE_ALIGN = MLX5_WQ_END_PAD_MODE_ALIGN,
+ MLX5_QP_END_PAD_MODE_NONE = MLX5_WQ_END_PAD_MODE_NONE,
+};
+
+static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata, struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_create_qp_mbox_in *in = NULL;
+ struct mlx5_exp_ib_create_qp ucmd;
+ struct mlx5_ib_create_qp *pucmd = NULL;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ int inlen = sizeof(*in);
+ size_t ucmd_size;
+ int err;
+ int st;
+ u32 uidx;
+ void *qpc;
+
+ mutex_init(&qp->mutex);
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+ if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
+ mlx5_ib_warn(dev, "block multicast loopback isn't supported\n");
+ return -EINVAL;
+ } else {
+ qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ }
+ }
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ if (pd && pd->uobject) {
+ memset(&ucmd, 0, sizeof(ucmd));
+ ucmd_size = sizeof(struct mlx5_ib_create_qp);
+ if (ucmd_size > offsetof(struct mlx5_exp_ib_create_qp, size_of_prefix)) {
+ mlx5_ib_warn(dev, "mlx5_ib_create_qp is too big to fit as prefix of mlx5_exp_ib_create_qp\n");
+ return -EINVAL;
+ }
+ err = ib_copy_from_udata(&ucmd, udata, min(udata->inlen, ucmd_size));
+ if (err) {
+ mlx5_ib_err(dev, "copy failed\n");
+ return err;
+ }
+ pucmd = (struct mlx5_ib_create_qp *)&ucmd;
+ if (ucmd.exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_UIDX)
+ uidx = ucmd.exp.uidx;
+ else
+ uidx = 0xffffff;
+
+ qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
+ } else {
+ qp->wq_sig = !!workqueue_signature;
+ uidx = 0xffffff;
+ }
+
+ qp->has_rq = qp_has_rq(init_attr);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
+ qp, (pd && pd->uobject) ? pucmd : NULL);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ return err;
+ }
+
+ if (pd) {
+ if (pd->uobject) {
+ __u32 max_wqes =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
+ if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
+ mlx5_ib_warn(dev, "invalid rq params\n");
+ return -EINVAL;
+ }
+ if (ucmd.sq_wqe_count > max_wqes) {
+ mlx5_ib_warn(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
+ ucmd.sq_wqe_count, max_wqes);
+ return -EINVAL;
+ }
+ err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
+ &inlen, &ucmd);
+ if (err)
+ mlx5_ib_warn(dev, "err %d\n", err);
+ } else {
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
+ mlx5_ib_warn(dev, "Raw Eth QP is disabled for Kernel consumers\n");
+ return -EINVAL;
+ }
+ err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
+ if (err)
+ mlx5_ib_warn(dev, "err %d\n", err);
+ else
+ qp->pa_lkey = to_mpd(pd)->pa_lkey;
+ }
+
+ if (err)
+ return err;
+ } else {
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ qp->create_type = MLX5_QP_EMPTY;
+ }
+
+ if (is_sqp(init_attr->qp_type))
+ qp->port = init_attr->port_num;
+
+ st = to_mlx5_st(init_attr->qp_type);
+ if (st < 0) {
+ mlx5_ib_warn(dev, "invalid service type\n");
+ err = st;
+ goto err_create;
+ }
+ in->ctx.flags |= cpu_to_be32(st << 16 | MLX5_QP_PM_MIGRATED << 11);
+
+ in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
+
+ if (qp->wq_sig)
+ in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
+
+ if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+ in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
+
+ if (qp->flags & MLX5_IB_QP_CAP_RX_END_PADDING)
+ in->ctx.flags |= cpu_to_be32(MLX5_QP_END_PAD_MODE_ALIGN << 2);
+ else
+ in->ctx.flags |= cpu_to_be32(MLX5_QP_END_PAD_MODE_NONE << 2);
+
+ if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
+ int rcqe_sz;
+ int scqe_sz;
+
+ rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
+ scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
+
+ if (rcqe_sz == 128) {
+ in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
+ } else {
+ in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
+ }
+
+ if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
+ in->ctx.cs_req = 0;
+ } else {
+ if (scqe_sz == 128)
+ in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
+ else
+ in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
+ }
+ }
+
+ if (qp->rq.wqe_cnt) {
+ in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
+ in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
+ }
+
+ in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
+
+ if (qp->sq.wqe_cnt)
+ in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
+ else
+ in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
+
+ /* Set default resources */
+ switch (init_attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
+ in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
+ in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
+ break;
+ case IB_QPT_XRC_INI:
+ in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
+ in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
+ in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
+ break;
+ default:
+ if (init_attr->srq) {
+ in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
+ in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
+ } else {
+ in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
+ in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
+ }
+ }
+
+ if (init_attr->send_cq)
+ in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
+
+ if (init_attr->recv_cq)
+ in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
+
+ in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
+
+ if (MLX5_CAP_GEN(mdev, cqe_version)) {
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(qpc, qpc, user_index, uidx);
+ }
+
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
+ mlx5_ib_warn(dev, "Raw Ethernet QP is allowed only for Ethernet link layer\n");
+ return -ENOSYS;
+ }
+ if (ucmd.exp.comp_mask & MLX5_EXP_CREATE_QP_MASK_SQ_BUFF_ADD) {
+ qp->sq_buf_addr = ucmd.exp.sq_buf_addr;
+ } else {
+ mlx5_ib_warn(dev, "Raw Ethernet QP needs SQ buff address\n");
+ return -EINVAL;
+ }
+ err = -EOPNOTSUPP;
+ } else {
+ err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
+ qp->mqp.event = mlx5_ib_qp_event;
+ }
+
+ if (err) {
+ mlx5_ib_warn(dev, "create qp failed\n");
+ goto err_create;
+ }
+
+ kvfree(in);
+ /* Hardware wants QPN written in big-endian order (after
+ * shifting) for send doorbell. Precompute this value to save
+ * a little bit when posting sends.
+ */
+ qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
+
+ get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ if (qp->create_type == MLX5_QP_USER)
+ destroy_qp_user(pd, qp);
+ else if (qp->create_type == MLX5_QP_KERNEL)
+ destroy_qp_kernel(dev, qp);
+
+ kvfree(in);
+ return err;
+}
+
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (send_cq) {
+ if (recv_cq) {
+ if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_lock(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock,
+ SINGLE_DEPTH_NESTING);
+ } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else {
+ spin_lock(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock,
+ SINGLE_DEPTH_NESTING);
+ }
+ } else {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ }
+ } else if (recv_cq) {
+ spin_lock(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ }
+}
+
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
+{
+ if (send_cq) {
+ if (recv_cq) {
+ if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ }
+ } else {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ }
+ } else if (recv_cq) {
+ __release(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ } else {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
+ }
+}
+
+static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
+{
+ return to_mpd(qp->ibqp.pd);
+}
+
+static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_cq *send_cq, *recv_cq;
+ struct mlx5_modify_qp_mbox_in *in;
+ unsigned long flags;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return;
+
+ if (qp->state != IB_QPS_RESET) {
+ if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
+ if (mlx5_core_qp_modify(dev->mdev, MLX5_CMD_OP_2RST_QP, in, 0,
+ &qp->mqp))
+ mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
+ qp->mqp.qpn);
+ }
+ }
+
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ if (send_cq)
+ list_del(&qp->cq_send_list);
+
+ if (recv_cq)
+ list_del(&qp->cq_recv_list);
+
+ if (qp->create_type == MLX5_QP_KERNEL) {
+ __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+ }
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ } else {
+ err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
+ if (err)
+ mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
+ qp->mqp.qpn);
+ }
+
+ kfree(in);
+
+ if (qp->create_type == MLX5_QP_KERNEL)
+ destroy_qp_kernel(dev, qp);
+ else if (qp->create_type == MLX5_QP_USER)
+ destroy_qp_user(&get_pd(qp)->ibpd, qp);
+}
+
+static const char *ib_qp_type_str(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_SMI:
+ return "IB_QPT_SMI";
+ case IB_QPT_GSI:
+ return "IB_QPT_GSI";
+ case IB_QPT_RC:
+ return "IB_QPT_RC";
+ case IB_QPT_UC:
+ return "IB_QPT_UC";
+ case IB_QPT_UD:
+ return "IB_QPT_UD";
+ case IB_QPT_RAW_IPV6:
+ return "IB_QPT_RAW_IPV6";
+ case IB_QPT_RAW_ETHERTYPE:
+ return "IB_QPT_RAW_ETHERTYPE";
+ case IB_QPT_XRC_INI:
+ return "IB_QPT_XRC_INI";
+ case IB_QPT_XRC_TGT:
+ return "IB_QPT_XRC_TGT";
+ case IB_QPT_RAW_PACKET:
+ return "IB_QPT_RAW_PACKET";
+ case IB_QPT_MAX:
+ default:
+ return "Invalid QP type";
+ }
+}
+
+struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_qp *qp;
+ u16 xrcdn = 0;
+ int err;
+ u32 rcqn;
+ u32 scqn;
+
+ init_attr->qpg_type = IB_QPG_NONE;
+
+ if (pd) {
+ dev = to_mdev(pd->device);
+ } else {
+ /* being cautious here */
+ if (init_attr->qp_type != IB_QPT_XRC_TGT) {
+ printf("mlx5_ib: WARN: ""%s: no PD for transport %s\n", __func__, ib_qp_type_str(init_attr->qp_type));
+ return ERR_PTR(-EINVAL);
+ }
+ dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
+ }
+
+ switch (init_attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
+ mlx5_ib_warn(dev, "XRC not supported\n");
+ return ERR_PTR(-ENOSYS);
+ }
+ init_attr->recv_cq = NULL;
+ if (init_attr->qp_type == IB_QPT_XRC_TGT) {
+ xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
+ init_attr->send_cq = NULL;
+ }
+
+ /* fall through */
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+ case IB_QPT_RAW_ETHERTYPE:
+ case IB_QPT_RAW_PACKET:
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return ERR_PTR(-ENOMEM);
+
+ err = create_qp_common(dev, pd, init_attr, udata, qp);
+ if (err) {
+ mlx5_ib_warn(dev, "create_qp_common failed\n");
+ kfree(qp);
+ return ERR_PTR(err);
+ }
+
+ if (is_qp0(init_attr->qp_type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(init_attr->qp_type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->mqp.qpn;
+
+ rcqn = init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1;
+ scqn = init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1;
+ mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
+ qp->ibqp.qp_num, qp->mqp.qpn, rcqn, scqn);
+
+ qp->xrcdn = xrcdn;
+
+ break;
+
+ case IB_QPT_RAW_IPV6:
+ case IB_QPT_MAX:
+ default:
+ mlx5_ib_warn(dev, "unsupported qp type %d\n",
+ init_attr->qp_type);
+ /* Don't support raw QPs */
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &qp->ibqp;
+}
+
+int mlx5_ib_destroy_qp(struct ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+
+ destroy_qp_common(dev, mqp);
+
+ kfree(mqp);
+
+ return 0;
+}
+
+static u32 atomic_mode_qp(struct mlx5_ib_dev *dev)
+{
+ unsigned long mask;
+ unsigned long tmp;
+
+ mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp) &
+ MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+
+ tmp = find_last_bit(&mask, BITS_PER_LONG);
+ if (tmp < 2 || tmp >= BITS_PER_LONG)
+ return MLX5_ATOMIC_MODE_NONE;
+
+ if (tmp == 2)
+ return MLX5_ATOMIC_MODE_CX;
+
+ return tmp << MLX5_ATOMIC_MODE_OFF;
+}
+
+static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ u32 hw_access_flags = 0;
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ dest_rd_atomic = attr->max_dest_rd_atomic;
+ else
+ dest_rd_atomic = qp->resp_depth;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ access_flags = attr->qp_access_flags;
+ else
+ access_flags = qp->atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ if (access_flags & IB_ACCESS_REMOTE_READ)
+ hw_access_flags |= MLX5_QP_BIT_RRE;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ hw_access_flags |= (MLX5_QP_BIT_RAE |
+ atomic_mode_qp(dev));
+ if (access_flags & IB_ACCESS_REMOTE_WRITE)
+ hw_access_flags |= MLX5_QP_BIT_RWE;
+
+ return cpu_to_be32(hw_access_flags);
+}
+
+enum {
+ MLX5_PATH_FLAG_FL = 1 << 0,
+ MLX5_PATH_FLAG_FREE_AR = 1 << 1,
+ MLX5_PATH_FLAG_COUNTER = 1 << 2,
+};
+
+static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+{
+ if (rate == IB_RATE_PORT_CURRENT) {
+ return 0;
+ } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+ return -EINVAL;
+ } else {
+ while (rate != IB_RATE_2_5_GBPS &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+ }
+
+ return rate + MLX5_STAT_RATE_OFFSET;
+}
+
+static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
+ struct mlx5_qp_path *path, u8 port, int attr_mask,
+ u32 path_flags, const struct ib_qp_attr *attr,
+ int alt)
+{
+ enum rdma_link_layer ll = dev->ib_dev.get_link_layer(&dev->ib_dev,
+ port);
+ int err;
+ int gid_type;
+
+ if ((ll == IB_LINK_LAYER_ETHERNET) || (ah->ah_flags & IB_AH_GRH)) {
+ int len = dev->mdev->port_caps[port - 1].gid_table_len;
+ if (ah->grh.sgid_index >= len) {
+ printf("mlx5_ib: ERR: ""sgid_index (%u) too large. max is %d\n", ah->grh.sgid_index, len - 1);
+ return -EINVAL;
+ }
+ }
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ if (!(ah->ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ err = mlx5_get_roce_gid_type(dev, port, ah->grh.sgid_index,
+ &gid_type);
+ if (err)
+ return err;
+ err = mlx5_ib_resolve_grh(ah, path->rmac, NULL);
+ if (err)
+ return err;
+ path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
+ ah->grh.sgid_index,
+ 0);
+ path->dci_cfi_prio_sl = (ah->sl & 0xf) << 4;
+ } else {
+ path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
+ path->grh_mlid = ah->src_path_bits & 0x7f;
+ path->rlid = cpu_to_be16(ah->dlid);
+ if (ah->ah_flags & IB_AH_GRH)
+ path->grh_mlid |= 1 << 7;
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ path->pkey_index = cpu_to_be16(alt ?
+ attr->alt_pkey_index :
+ attr->pkey_index);
+
+ path->dci_cfi_prio_sl = ah->sl & 0xf;
+ }
+
+ path->fl_free_ar |= (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
+
+ if (ah->ah_flags & IB_AH_GRH) {
+ path->mgid_index = ah->grh.sgid_index;
+ path->hop_limit = ah->grh.hop_limit;
+ path->tclass_flowlabel =
+ cpu_to_be32((ah->grh.traffic_class << 20) |
+ (ah->grh.flow_label));
+ memcpy(path->rgid, ah->grh.dgid.raw, 16);
+ }
+
+ err = ib_rate_to_mlx5(dev, ah->static_rate);
+ if (err < 0)
+ return err;
+ path->static_rate = err;
+ path->port = port;
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ path->ackto_lt = alt ? attr->alt_timeout << 3 : attr->timeout << 3;
+
+ return 0;
+}
+
+static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_DC_KEY |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_RAE,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX,
+ [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_DC_KEY,
+ },
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RAE,
+ },
+ },
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_SRQN |
+ MLX5_QP_OPTPAR_CQN_RCV,
+ [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RAE,
+ },
+ },
+ [MLX5_QP_STATE_SQER] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ [MLX5_QP_ST_DCI] = MLX5_QP_OPTPAR_DC_KEY |
+ MLX5_QP_OPTPAR_RAE,
+
+ },
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ },
+ },
+};
+
+static int ib_nr_to_mlx5_nr(int ib_mask)
+{
+ switch (ib_mask) {
+ case IB_QP_STATE:
+ return 0;
+ case IB_QP_CUR_STATE:
+ return 0;
+ case IB_QP_EN_SQD_ASYNC_NOTIFY:
+ return 0;
+ case IB_QP_ACCESS_FLAGS:
+ return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE;
+ case IB_QP_PKEY_INDEX:
+ return MLX5_QP_OPTPAR_PKEY_INDEX;
+ case IB_QP_PORT:
+ return MLX5_QP_OPTPAR_PRI_PORT;
+ case IB_QP_QKEY:
+ return MLX5_QP_OPTPAR_Q_KEY;
+ case IB_QP_AV:
+ return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
+ MLX5_QP_OPTPAR_PRI_PORT;
+ case IB_QP_PATH_MTU:
+ return 0;
+ case IB_QP_TIMEOUT:
+ return MLX5_QP_OPTPAR_ACK_TIMEOUT;
+ case IB_QP_RETRY_CNT:
+ return MLX5_QP_OPTPAR_RETRY_COUNT;
+ case IB_QP_RNR_RETRY:
+ return MLX5_QP_OPTPAR_RNR_RETRY;
+ case IB_QP_RQ_PSN:
+ return 0;
+ case IB_QP_MAX_QP_RD_ATOMIC:
+ return MLX5_QP_OPTPAR_SRA_MAX;
+ case IB_QP_ALT_PATH:
+ return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
+ case IB_QP_MIN_RNR_TIMER:
+ return MLX5_QP_OPTPAR_RNR_TIMEOUT;
+ case IB_QP_SQ_PSN:
+ return 0;
+ case IB_QP_MAX_DEST_RD_ATOMIC:
+ return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
+ case IB_QP_PATH_MIG_STATE:
+ return MLX5_QP_OPTPAR_PM_STATE;
+ case IB_QP_CAP:
+ return 0;
+ case IB_QP_DEST_QPN:
+ return 0;
+ }
+ return 0;
+}
+
+static int ib_mask_to_mlx5_opt(int ib_mask)
+{
+ int result = 0;
+ int i;
+
+ for (i = 0; i < 8 * sizeof(int); i++) {
+ if ((1 << i) & ib_mask)
+ result |= ib_nr_to_mlx5_nr(1 << i);
+ }
+
+ return result;
+}
+
+static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ enum ib_qp_state cur_state, enum ib_qp_state new_state)
+{
+ static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
+ [MLX5_QP_STATE_RST] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
+ },
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
+ [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
+ },
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP,
+ },
+ [MLX5_QP_STATE_SQER] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
+ },
+ [MLX5_QP_STATE_ERR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ }
+ };
+
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_cq *send_cq, *recv_cq;
+ struct mlx5_qp_context *context;
+ struct mlx5_modify_qp_mbox_in *in;
+ struct mlx5_ib_pd *pd;
+ enum mlx5_qp_state mlx5_cur, mlx5_new;
+ enum mlx5_qp_optpar optpar;
+ int sqd_event;
+ int mlx5_st;
+ int err;
+ u16 op;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ context = &in->ctx;
+ err = to_mlx5_st(ibqp->qp_type);
+ if (err < 0)
+ goto out;
+
+ context->flags = cpu_to_be32(err << 16);
+
+ if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
+ context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ } else {
+ switch (attr->path_mig_state) {
+ case IB_MIG_MIGRATED:
+ context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ break;
+ case IB_MIG_REARM:
+ context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
+ break;
+ case IB_MIG_ARMED:
+ context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
+ break;
+ }
+ }
+
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
+ context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
+ } else if (ibqp->qp_type == IB_QPT_UD) {
+ context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
+ } else if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
+ err = -EINVAL;
+ goto out;
+ }
+ context->mtu_msgmax = (attr->path_mtu << 5) |
+ (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
+
+ /* todo implement counter_index functionality */
+
+ if (is_sqp(ibqp->qp_type))
+ context->pri_path.port = qp->port;
+
+ if (attr_mask & IB_QP_PORT)
+ context->pri_path.port = attr->port_num;
+
+ if (attr_mask & IB_QP_AV) {
+ err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
+ attr_mask, 0, attr, 0);
+ if (err)
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ context->pri_path.ackto_lt |= attr->timeout << 3;
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
+ 0, attr, 1);
+ if (err)
+ goto out;
+ }
+
+ pd = get_pd(qp);
+ get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
+ context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
+ context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic)
+ context->params1 |=
+ cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
+ }
+
+ if (attr_mask & IB_QP_SQ_PSN)
+ context->next_send_psn = cpu_to_be32(attr->sq_psn & 0xffffff);
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic)
+ context->params2 |=
+ cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
+ }
+
+ if ((attr_mask & IB_QP_ACCESS_FLAGS) &&
+ (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ !dev->enable_atomic_resp) {
+ mlx5_ib_warn(dev, "atomic responder is not supported\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn & 0xffffff);
+
+ if (attr_mask & IB_QP_QKEY)
+ context->qkey = cpu_to_be32(attr->qkey);
+
+ if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ context->db_rec_addr = cpu_to_be64(qp->db.dma);
+
+ if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
+ attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
+ sqd_event = 1;
+ else
+ sqd_event = 0;
+
+ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ context->sq_crq_size |= cpu_to_be16(1 << 4);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+ struct mlx5_ib_port *mibport = &dev->port[port_num];
+
+ context->qp_counter_set_usr_page |=
+ cpu_to_be32(mibport->q_cnt_id << 24);
+ }
+
+ mlx5_cur = to_mlx5_state(cur_state);
+ mlx5_new = to_mlx5_state(new_state);
+ mlx5_st = to_mlx5_st(ibqp->qp_type);
+ if (mlx5_st < 0)
+ goto out;
+
+ if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
+ !optab[mlx5_cur][mlx5_new])
+ return -EINVAL;
+
+ op = optab[mlx5_cur][mlx5_new];
+ optpar = ib_mask_to_mlx5_opt(attr_mask);
+ optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
+ in->optparam = cpu_to_be32(optpar);
+
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ err = -EOPNOTSUPP;
+ else
+ err = mlx5_core_qp_modify(dev->mdev, op, in, sqd_event,
+ &qp->mqp);
+ if (err)
+ goto out;
+
+ qp->state = new_state;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->alt_port = attr->alt_port_num;
+
+ /*
+ * If we moved a kernel QP to RESET, clean up all old CQ
+ * entries and reinitialize the QP.
+ */
+ if (new_state == IB_QPS_RESET && !ibqp->uobject) {
+ mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
+ ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ if (send_cq != recv_cq)
+ mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->sq.cur_post = 0;
+ qp->sq.last_poll = 0;
+ if (qp->db.db) {
+ qp->db.db[MLX5_RCV_DBR] = 0;
+ qp->db.db[MLX5_SND_DBR] = 0;
+ }
+ }
+
+out:
+ kfree(in);
+ return err;
+}
+
+static int ignored_ts_check(enum ib_qp_type qp_type)
+{
+ return 0;
+}
+
+int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ enum ib_qp_state cur_state, new_state;
+ int err = -EINVAL;
+ int port;
+
+ mutex_lock(&qp->mutex);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (!ignored_ts_check(ibqp->qp_type) &&
+ !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
+ goto out;
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 ||
+ attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
+ goto out;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
+ if (attr->pkey_index >=
+ dev->mdev->port_caps[port - 1].pkey_table_len)
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
+ goto out;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic >
+ (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
+ goto out;
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ err = 0;
+ goto out;
+ }
+
+ err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
+
+out:
+ mutex_unlock(&qp->mutex);
+ return err;
+}
+
+static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static __be16 get_klm_octo(int npages)
+{
+ return cpu_to_be16(ALIGN(npages, 8) / 2);
+}
+
+static __be64 frwr_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_A |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct ib_send_wr *wr, int li)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ if (li) {
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = 1 << 7;
+ return;
+ }
+
+ umr->flags = (1 << 5); /* fail if not free */
+ umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
+ umr->mkey_mask = frwr_mkey_mask();
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
+ int li, int *writ)
+{
+ memset(seg, 0, sizeof(*seg));
+ if (li) {
+ seg->status = MLX5_MKEY_STATUS_FREE;
+ return;
+ }
+
+ seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
+ MLX5_ACCESS_MODE_MTT;
+ *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
+ seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
+ seg->len = cpu_to_be64(wr->wr.fast_reg.length);
+ seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
+ seg->log2_page_size = wr->wr.fast_reg.page_shift;
+}
+
+static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
+ struct ib_send_wr *wr,
+ struct mlx5_core_dev *mdev,
+ struct mlx5_ib_pd *pd,
+ int writ)
+{
+ struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
+ u64 *page_list = wr->wr.fast_reg.page_list->page_list;
+ u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
+ int i;
+
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
+ mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
+ dseg->addr = cpu_to_be64(mfrpl->map);
+ dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
+ dseg->lkey = cpu_to_be32(pd->pa_lkey);
+}
+
+static __be32 send_ieth(struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 calc_wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
+ void *wqe, int *sz)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ void *qend = qp->sq.qend;
+ void *addr;
+ int inl = 0;
+ int copy;
+ int len;
+ int i;
+
+ seg = wqe;
+ wqe += sizeof(*seg);
+ for (i = 0; i < wr->num_sge; i++) {
+ addr = (void *)(uintptr_t)(wr->sg_list[i].addr);
+ len = wr->sg_list[i].length;
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ if (unlikely(wqe + len > qend)) {
+ copy = (int)(qend - wqe);
+ memcpy(wqe, addr, copy);
+ addr += copy;
+ len -= copy;
+ wqe = mlx5_get_send_wqe(qp, 0);
+ }
+ memcpy(wqe, addr, len);
+ wqe += len;
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
+ struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
+{
+ int writ = 0;
+ int li;
+
+ li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
+ if (unlikely(wr->send_flags & IB_SEND_INLINE))
+ return -EINVAL;
+
+ set_frwr_umr_segment(*seg, wr, li);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+ set_mkey_segment(*seg, wr, li, &writ);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ if (unlikely((*seg == qp->sq.qend)))
+ *seg = mlx5_get_send_wqe(qp, 0);
+ if (!li) {
+ if (unlikely(wr->wr.fast_reg.page_list_len >
+ wr->wr.fast_reg.page_list->max_page_list_len))
+ return -ENOMEM;
+
+ set_frwr_pages(*seg, wr, mdev, pd, writ);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
+{
+ __be32 *p = NULL;
+ int tidx = idx;
+ int i, j;
+
+ pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ void *buf = mlx5_get_send_wqe(qp, tidx);
+ tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
+ p = buf;
+ j = 0;
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
+ unsigned bytecnt, struct mlx5_ib_qp *qp)
+{
+ while (bytecnt > 0) {
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ __iowrite64_copy(dst++, src++, 8);
+ bytecnt -= 64;
+ if (unlikely(src == qp->sq.qend))
+ src = mlx5_get_send_wqe(qp, 0);
+ }
+}
+
+static u8 get_fence(u8 fence, struct ib_send_wr *wr)
+{
+ if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
+ wr->send_flags & IB_SEND_FENCE))
+ return MLX5_FENCE_MODE_STRONG_ORDERING;
+
+ if (unlikely(fence)) {
+ if (wr->send_flags & IB_SEND_FENCE)
+ return MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ return fence;
+
+ } else {
+ return 0;
+ }
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ struct ib_send_wr *wr, unsigned *idx,
+ int *size, int nreq)
+{
+ int err = 0;
+
+ if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device), "work queue overflow\n");
+ err = -ENOMEM;
+ return err;
+ }
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_get_send_wqe(qp, *idx);
+ *ctrl = *seg;
+ *(u32 *)(*seg + 8) = 0;
+ (*ctrl)->imm = send_ieth(wr);
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (wr->send_flags & IB_SEND_SIGNALED ?
+ MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (wr->send_flags & IB_SEND_SOLICITED ?
+ MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+
+ return err;
+}
+
+static void finish_wqe(struct mlx5_ib_qp *qp,
+ struct mlx5_wqe_ctrl_seg *ctrl,
+ u8 size, unsigned idx,
+ struct ib_send_wr *wr,
+ int nreq, u8 fence, u8 next_fence,
+ u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ qp->fm_cache = next_fence;
+ if (unlikely(qp->wq_sig))
+ ctrl->signature = calc_wq_sig(ctrl);
+
+ qp->sq.swr_ctx[idx].wrid = wr->wr_id;
+ qp->sq.swr_ctx[idx].w_list.opcode = mlx5_opcode;
+ qp->sq.swr_ctx[idx].wqe_head = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.swr_ctx[idx].w_list.next = qp->sq.cur_post;
+ qp->sq.swr_ctx[idx].sig_piped = 0;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+ struct ib_send_wr **bad_wr)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *dpseg;
+ struct mlx5_wqe_xrc_seg *xrc;
+ struct mlx5_bf *bf = qp->bf;
+ int uninitialized_var(size);
+ void *qend = qp->sq.qend;
+ unsigned long flags;
+ unsigned idx;
+ int err = 0;
+ int inl = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "Invalid opcode 0x%x\n", wr->opcode);
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ fence = qp->fm_cache;
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "Max gs exceeded %d (max = %d)\n", wr->num_sge, qp->sq.max_gs);
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to prepare WQE\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ switch (ibqp->qp_type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ /* fall through */
+ case IB_QPT_RC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ set_raddr_seg(seg, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ seg += sizeof(struct mlx5_wqe_raddr_seg);
+ size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -ENOSYS;
+ *bad_wr = wr;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ qp->sq.swr_ctx[idx].wr_data = IB_WR_LOCAL_INV;
+ ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to prepare LOCAL_INV WQE\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ num_sge = 0;
+ break;
+
+ case IB_WR_FAST_REG_MR:
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ qp->sq.swr_ctx[idx].wr_data = IB_WR_FAST_REG_MR;
+ ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
+ err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to prepare FAST_REG_MR WQE\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case IB_QPT_UC:
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ set_raddr_seg(seg, wr->wr.rdma.remote_addr,
+ wr->wr.rdma.rkey);
+ seg += sizeof(struct mlx5_wqe_raddr_seg);
+ size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case IB_QPT_SMI:
+ if (!mlx5_core_is_pf(mdev)) {
+ err = -EINVAL;
+ mlx5_ib_warn(dev, "Only physical function is allowed to send SMP MADs\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ set_datagram_seg(seg, wr);
+ seg += sizeof(struct mlx5_wqe_datagram_seg);
+ size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ if (unlikely((seg == qend)))
+ seg = mlx5_get_send_wqe(qp, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ int uninitialized_var(sz);
+
+ err = set_data_inl_seg(qp, wr, seg, &sz);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "Failed to prepare inline data segment\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ inl = 1;
+ size += sz;
+ } else {
+ dpseg = seg;
+ for (i = 0; i < num_sge; i++) {
+ if (unlikely(dpseg == qend)) {
+ seg = mlx5_get_send_wqe(qp, 0);
+ dpseg = seg;
+ }
+ if (likely(wr->sg_list[i].length)) {
+ set_data_ptr_seg(dpseg, wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ dpseg++;
+ }
+ }
+ }
+
+ finish_wqe(qp, ctrl, size, idx, wr, nreq,
+ get_fence(fence, wr), next_fence,
+ mlx5_ib_opcode[wr->opcode]);
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell */
+ wmb();
+
+ if (bf->need_lock)
+ spin_lock(&bf->lock);
+ else
+ __acquire(&bf->lock);
+
+ /* TBD enable WC */
+ if (BF_ENABLE && nreq == 1 && bf->uuarn && inl && size > 1 &&
+ size <= bf->buf_size / 16) {
+ mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
+ /* wc_wmb(); */
+ } else {
+ mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
+ MLX5_GET_DOORBELL_LOCK(&bf->lock32));
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ mmiowb();
+ }
+ bf->offset ^= bf->buf_size;
+ if (bf->need_lock)
+ spin_unlock(&bf->lock);
+ else
+ __release(&bf->lock);
+ }
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
+{
+ sig->signature = calc_sig(sig, size);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = get_recv_wqe(qp, ind);
+ if (qp->wq_sig)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ if (qp->wq_sig) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
+ }
+
+ qp->rq.rwr_ctx[ind].wrid = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
+
+static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
+{
+ switch (mlx5_state) {
+ case MLX5_QP_STATE_RST: return IB_QPS_RESET;
+ case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
+ case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
+ case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
+ case MLX5_QP_STATE_SQ_DRAINING:
+ case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
+ case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
+ case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
+{
+ switch (mlx5_mig_state) {
+ case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
+ case MLX5_QP_PM_REARM: return IB_MIG_REARM;
+ case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
+ default: return -1;
+ }
+}
+
+static int to_ib_qp_access_flags(int mlx5_flags)
+{
+ int ib_flags = 0;
+
+ if (mlx5_flags & MLX5_QP_BIT_RRE)
+ ib_flags |= IB_ACCESS_REMOTE_READ;
+ if (mlx5_flags & MLX5_QP_BIT_RWE)
+ ib_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (mlx5_flags & MLX5_QP_BIT_RAE)
+ ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return ib_flags;
+}
+
+static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
+ struct mlx5_qp_path *path)
+{
+ struct mlx5_core_dev *dev = ibdev->mdev;
+
+ memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
+ ib_ah_attr->port_num = path->port;
+
+ if (ib_ah_attr->port_num == 0 ||
+ ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
+ return;
+
+ ib_ah_attr->sl = path->dci_cfi_prio_sl & 0xf;
+
+ ib_ah_attr->dlid = be16_to_cpu(path->rlid);
+ ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
+ ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
+ ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
+ if (ib_ah_attr->ah_flags) {
+ ib_ah_attr->grh.sgid_index = path->mgid_index;
+ ib_ah_attr->grh.hop_limit = path->hop_limit;
+ ib_ah_attr->grh.traffic_class =
+ (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
+ ib_ah_attr->grh.flow_label =
+ be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
+ memcpy(ib_ah_attr->grh.dgid.raw,
+ path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
+ }
+}
+
+int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_query_qp_mbox_out *outb;
+ struct mlx5_qp_context *context;
+ int mlx5_state;
+ int err = 0;
+
+ mutex_lock(&qp->mutex);
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ outb = kzalloc(sizeof(*outb), GFP_KERNEL);
+ if (!outb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ context = &outb->ctx;
+ err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb,
+ sizeof(*outb));
+ if (err) {
+ kfree(outb);
+ goto out;
+ }
+
+ mlx5_state = be32_to_cpu(context->flags) >> 28;
+
+ qp->state = to_ib_qp_state(mlx5_state);
+ qp_attr->path_mtu = context->mtu_msgmax >> 5;
+ qp_attr->path_mig_state =
+ to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
+ qp_attr->qkey = be32_to_cpu(context->qkey);
+ qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
+ qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
+ qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
+ qp_attr->qp_access_flags =
+ to_ib_qp_access_flags(be32_to_cpu(context->params2));
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
+ to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+ to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+ qp_attr->alt_pkey_index = be16_to_cpu(context->alt_path.pkey_index);
+ qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
+ }
+
+ qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index);
+ qp_attr->port_num = context->pri_path.port;
+
+ /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
+ qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
+
+ qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
+
+ qp_attr->max_dest_rd_atomic =
+ 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
+ qp_attr->min_rnr_timer =
+ (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
+ qp_attr->timeout = context->pri_path.ackto_lt >> 3;
+ qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
+ qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
+ qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
+
+
+ kfree(outb);
+ }
+
+ qp_attr->qp_state = qp->state;
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->qp_type = ibqp->qp_type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ qp_init_attr->create_flags = 0;
+ if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
+ qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
+
+ qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+
+out:
+ mutex_unlock(&qp->mutex);
+ return err;
+}
+
+struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_xrcd *xrcd;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ return ERR_PTR(-ENOSYS);
+
+ xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
+ if (!xrcd)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
+ if (err) {
+ kfree(xrcd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &xrcd->ibxrcd;
+}
+
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
+{
+ struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
+ u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+ int err;
+
+ err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
+ return err;
+ }
+
+ kfree(xrcd);
+
+ return 0;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_qp.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,253 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c 322810 2017-08-23 12:09:37Z hselasky $
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <dev/mlx5/vport.h>
+#include <net/ipv6.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include "mlx5_ib.h"
+
+struct net_device *mlx5_ib_get_netdev(struct ib_device *ib_dev, u8 port)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+
+ return mlx5_get_protocol_dev(dev->mdev, MLX5_INTERFACE_PROTOCOL_ETH);
+}
+
+
+static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
+ struct net_device *ndev,
+ void *mlx5_addr)
+{
+#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
+ char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
+ source_l3_address);
+ void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
+ source_mac_47_32);
+ union ib_gid zgid;
+ u16 vtag;
+
+ memset(&zgid, 0, sizeof(zgid));
+ if (0 == memcmp(gid, &zgid, sizeof(zgid)))
+ return;
+
+ ether_addr_copy(mlx5_addr_mac, IF_LLADDR(ndev));
+
+ if (VLAN_TAG(ndev, &vtag) == 0) {
+ MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
+ MLX5_SET_RA(mlx5_addr, vlan_id, vtag);
+ }
+
+#ifndef MLX5_USE_ROCE_VERSION_2
+ MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
+
+ memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
+#else
+ MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
+
+ if (ipv6_addr_v4mapped((void *)gid)) {
+ MLX5_SET_RA(mlx5_addr, roce_l3_type,
+ MLX5_ROCE_L3_TYPE_IPV4);
+ memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
+ } else {
+ MLX5_SET_RA(mlx5_addr, roce_l3_type,
+ MLX5_ROCE_L3_TYPE_IPV6);
+ memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
+ }
+#endif
+}
+
+int modify_gid_roce(struct ib_device *ib_dev, u8 port, unsigned int index,
+ const union ib_gid *gid, struct net_device *ndev)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+ u32 in[MLX5_ST_SZ_DW(set_roce_address_in)];
+ u32 out[MLX5_ST_SZ_DW(set_roce_address_out)];
+ void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
+
+ memset(in, 0, sizeof(in));
+
+ ib_gid_to_mlx5_roce_addr(gid, ndev, in_addr);
+
+ MLX5_SET(set_roce_address_in, in, roce_address_index, index);
+ MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+ u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5_PROT_MASK(MLX5_1000BASE_CX_SGMII):
+ case MLX5_PROT_MASK(MLX5_1000BASE_KX):
+ case MLX5_PROT_MASK(MLX5_100BASE_TX):
+ case MLX5_PROT_MASK(MLX5_1000BASE_T):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_10GBASE_T):
+ case MLX5_PROT_MASK(MLX5_10GBASE_CX4):
+ case MLX5_PROT_MASK(MLX5_10GBASE_KX4):
+ case MLX5_PROT_MASK(MLX5_10GBASE_KR):
+ case MLX5_PROT_MASK(MLX5_10GBASE_CR):
+ case MLX5_PROT_MASK(MLX5_10GBASE_SR):
+ case MLX5_PROT_MASK(MLX5_10GBASE_ER):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_25GBASE_CR):
+ case MLX5_PROT_MASK(MLX5_25GBASE_KR):
+ case MLX5_PROT_MASK(MLX5_25GBASE_SR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_40GBASE_CR4):
+ case MLX5_PROT_MASK(MLX5_40GBASE_KR4):
+ case MLX5_PROT_MASK(MLX5_40GBASE_SR4):
+ case MLX5_PROT_MASK(MLX5_40GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_50GBASE_CR2):
+ case MLX5_PROT_MASK(MLX5_50GBASE_KR2):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_FDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_56GBASE_R4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR;
+ break;
+ case MLX5_PROT_MASK(MLX5_100GBASE_CR4):
+ case MLX5_PROT_MASK(MLX5_100GBASE_SR4):
+ case MLX5_PROT_MASK(MLX5_100GBASE_KR4):
+ case MLX5_PROT_MASK(MLX5_100GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_query_roce_port_ptys(struct ib_device *ib_dev,
+ struct ib_port_attr *props, u8 port)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ptys_reg *ptys;
+ int err;
+
+ ptys = kzalloc(sizeof(*ptys), GFP_KERNEL);
+ if (!ptys)
+ return -ENOMEM;
+
+ ptys->proto_mask |= MLX5_PTYS_EN;
+ ptys->local_port = port;
+
+ err = mlx5_core_access_ptys(mdev, ptys, 0);
+ if (err)
+ goto out;
+
+ err = translate_eth_proto_oper(ptys->eth_proto_oper,
+ &props->active_speed,
+ &props->active_width);
+out:
+ kfree(ptys);
+ return err;
+}
+
+int mlx5_query_port_roce(struct ib_device *ib_dev, u8 port,
+ struct ib_port_attr *props)
+{
+ struct net_device *netdev = mlx5_ib_get_netdev(ib_dev, port);
+ struct mlx5_ib_dev *dev = to_mdev(ib_dev);
+ enum ib_mtu netdev_ib_mtu;
+
+ memset(props, 0, sizeof(*props));
+
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+
+ props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
+ roce_address_table_size);
+ props->max_mtu = IB_MTU_4096;
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ props->pkey_tbl_len = 1;
+ props->state = IB_PORT_DOWN;
+ props->phys_state = 3;
+
+ if (mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev,
+ (u16 *)&props->qkey_viol_cntr))
+ printf("mlx5_ib: WARN: ""%s failed to query qkey violations counter\n", __func__);
+
+
+ if (!netdev)
+ return 0;
+
+ if (netif_running(netdev) && netif_carrier_ok(netdev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = 5;
+ }
+
+ netdev_ib_mtu = iboe_get_mtu(netdev->if_mtu);
+ props->active_mtu = min(props->max_mtu, netdev_ib_mtu);
+
+ mlx5_query_roce_port_ptys(ib_dev, props, port);
+
+ return 0;
+}
+
+__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port,
+ int index, __be16 ah_s_udp_port)
+{
+#ifndef MLX5_USE_ROCE_VERSION_2
+ return 0;
+#else
+ return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
+#endif
+}
+
+int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port,
+ int index, int *gid_type)
+{
+ union ib_gid gid;
+ int ret;
+
+ ret = ib_get_cached_gid(&dev->ib_dev, port, index, &gid);
+
+ if (!ret)
+ *gid_type = -1;
+
+ return ret;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_roce.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,504 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c 322810 2017-08-23 12:09:37Z hselasky $
+ */
+
+#include <linux/module.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/srq.h>
+#include <linux/slab.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "mlx5_ib.h"
+#include "user.h"
+
+/* not supported currently */
+static int srq_signature;
+
+static void *get_wqe(struct mlx5_ib_srq *srq, int n)
+{
+ return mlx5_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
+}
+
+static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, int type)
+{
+ struct ib_event event;
+ struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
+
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ event.event = IB_EVENT_SRQ_ERR;
+ break;
+ default:
+ printf("mlx5_ib: WARN: ""mlx5_ib: Unexpected event type %d on SRQ %06x\n", type, srq->srqn);
+ return;
+ }
+
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+ }
+}
+
+static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct mlx5_create_srq_mbox_in **in,
+ struct ib_udata *udata, int buf_size, int *inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_srq ucmd;
+ size_t ucmdlen;
+ void *xsrqc;
+ int err;
+ int npages;
+ int page_shift;
+ int ncont;
+ int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ u32 offset;
+
+ ucmdlen = (drv_data < sizeof(ucmd)) ?
+ drv_data : sizeof(ucmd);
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
+ mlx5_ib_err(dev, "failed copy udata\n");
+ return -EFAULT;
+ }
+
+ if (ucmdlen == sizeof(ucmd) &&
+ ucmd.reserved1 != 0) {
+ mlx5_ib_warn(dev, "corrupted ucmd\n");
+ return -EINVAL;
+ }
+
+ srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
+
+ srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
+ 0, 0);
+ if (IS_ERR(srq->umem)) {
+ mlx5_ib_warn(dev, "failed umem get, size %d\n", buf_size);
+ err = PTR_ERR(srq->umem);
+ return err;
+ }
+
+ mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages,
+ &page_shift, &ncont, NULL);
+ err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
+ &offset);
+ if (err) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ goto err_umem;
+ }
+
+ *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
+ *in = mlx5_vzalloc(*inlen);
+ if (!(*in)) {
+ mlx5_ib_err(dev, "failed allocate mbox\n");
+ err = -ENOMEM;
+ goto err_umem;
+ }
+
+ mlx5_ib_populate_pas(dev, srq->umem, page_shift, (*in)->pas, 0);
+
+ err = mlx5_ib_db_map_user(to_mucontext(pd->uobject->context),
+ ucmd.db_addr, &srq->db);
+ if (err) {
+ mlx5_ib_warn(dev, "map doorbell failed\n");
+ goto err_in;
+ }
+
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
+ xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
+ xrc_srq_context_entry);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (drv_data > offsetof(struct mlx5_ib_create_srq, uidx))
+ MLX5_SET(xrc_srqc, xsrqc, user_index, ucmd.uidx);
+ else
+ MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
+ }
+
+ return 0;
+
+err_in:
+ kvfree(*in);
+
+err_umem:
+ ib_umem_release(srq->umem);
+
+ return err;
+}
+
+static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
+ struct mlx5_create_srq_mbox_in **in, int buf_size,
+ int *inlen)
+{
+ int err;
+ int i;
+ struct mlx5_wqe_srq_next_seg *next;
+ int page_shift;
+ void *xsrqc;
+
+ err = mlx5_db_alloc(dev->mdev, &srq->db);
+ if (err) {
+ mlx5_ib_warn(dev, "alloc dbell rec failed\n");
+ return err;
+ }
+
+ if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+ mlx5_ib_err(dev, "buf alloc failed\n");
+ err = -ENOMEM;
+ goto err_db;
+ }
+ page_shift = srq->buf.page_shift;
+
+ srq->head = 0;
+ srq->tail = srq->msrq.max - 1;
+ srq->wqe_ctr = 0;
+
+ for (i = 0; i < srq->msrq.max; i++) {
+ next = get_wqe(srq, i);
+ next->next_wqe_index =
+ cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+ }
+
+ *inlen = sizeof(**in) + sizeof(*(*in)->pas) * srq->buf.npages;
+ *in = mlx5_vzalloc(*inlen);
+ if (!*in) {
+ mlx5_ib_err(dev, "failed allocate mbox\n");
+ err = -ENOMEM;
+ goto err_buf;
+ }
+ mlx5_fill_page_array(&srq->buf, (*in)->pas);
+
+ srq->wrid = kmalloc(srq->msrq.max * sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+ srq->wq_sig = !!srq_signature;
+
+ (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version)) {
+ xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
+ xrc_srq_context_entry);
+ /* 0xffffff means we ask to work with cqe version 0 */
+ MLX5_SET(xrc_srqc, xsrqc, user_index, 0xffffff);
+ }
+
+ return 0;
+
+err_in:
+ kvfree(*in);
+
+err_buf:
+ mlx5_buf_free(dev->mdev, &srq->buf);
+
+err_db:
+ mlx5_db_free(dev->mdev, &srq->db);
+ return err;
+}
+
+static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
+{
+ mlx5_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
+ ib_umem_release(srq->umem);
+}
+
+
+static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
+{
+ kfree(srq->wrid);
+ mlx5_buf_free(dev->mdev, &srq->buf);
+ mlx5_db_free(dev->mdev, &srq->db);
+}
+
+struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_srq *srq;
+ int desc_size;
+ int buf_size;
+ int err;
+ struct mlx5_create_srq_mbox_in *uninitialized_var(in);
+ int uninitialized_var(inlen);
+ int is_xrc;
+ u32 flgs, xrcdn;
+ __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+
+ /* Sanity check SRQ size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes) {
+ mlx5_ib_warn(dev, "max_wr %d, cap %d\n",
+ init_attr->attr.max_wr,
+ max_srq_wqes);
+ return ERR_PTR(-EINVAL);
+ }
+
+ srq = kmalloc(sizeof(*srq), GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+ srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->msrq.max_gs = init_attr->attr.max_sge;
+
+ desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
+ srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ desc_size = roundup_pow_of_two(desc_size);
+ desc_size = max_t(int, 32, desc_size);
+ srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ srq->msrq.wqe_shift = ilog2(desc_size);
+ buf_size = srq->msrq.max * desc_size;
+ mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n",
+ desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
+ srq->msrq.max_avail_gather);
+
+ if (pd->uobject)
+ err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+ else
+ err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+
+ if (err) {
+ mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
+ pd->uobject ? "user" : "kernel", err);
+ goto err_srq;
+ }
+
+ is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+ in->ctx.state_log_sz = ilog2(srq->msrq.max);
+ flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
+ xrcdn = 0;
+ if (is_xrc) {
+ xrcdn = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
+ in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(init_attr->ext.xrc.cq)->mcq.cqn);
+ } else if (init_attr->srq_type == IB_SRQT_BASIC) {
+ xrcdn = to_mxrcd(dev->devr.x0)->xrcdn;
+ in->ctx.pgoff_cqn |= cpu_to_be32(to_mcq(dev->devr.c0)->mcq.cqn);
+ }
+
+ in->ctx.flags_xrcd = cpu_to_be32((flgs & 0xFF000000) | (xrcdn & 0xFFFFFF));
+
+ in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
+ in->ctx.db_record = cpu_to_be64(srq->db.dma);
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen, is_xrc);
+ kvfree(in);
+ if (err) {
+ mlx5_ib_warn(dev, "create SRQ failed, err %d\n", err);
+ goto err_usr_kern_srq;
+ }
+
+ mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
+
+ srq->msrq.event = mlx5_ib_srq_event;
+ srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
+
+ if (pd->uobject)
+ if (ib_copy_to_udata(udata, &srq->msrq.srqn, sizeof(__u32))) {
+ mlx5_ib_err(dev, "copy to user failed\n");
+ err = -EFAULT;
+ goto err_core;
+ }
+
+ init_attr->attr.max_wr = srq->msrq.max - 1;
+
+ return &srq->ibsrq;
+
+err_core:
+ mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
+
+err_usr_kern_srq:
+ if (pd->uobject)
+ destroy_srq_user(pd, srq);
+ else
+ destroy_srq_kernel(dev, srq);
+
+err_srq:
+ kfree(srq);
+
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+
+ /* We don't support resizing SRQs yet */
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->msrq.max)
+ return -EINVAL;
+
+ mutex_lock(&srq->mutex);
+ ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
+ mutex_unlock(&srq->mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+ struct mlx5_query_srq_mbox_out *out;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
+ if (ret)
+ goto out_box;
+
+ srq_attr->srq_limit = be16_to_cpu(out->ctx.lwm);
+ srq_attr->max_wr = srq->msrq.max - 1;
+ srq_attr->max_sge = srq->msrq.max_gs;
+
+out_box:
+ kfree(out);
+ return ret;
+}
+
+int mlx5_ib_destroy_srq(struct ib_srq *srq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(srq->device);
+ struct mlx5_ib_srq *msrq = to_msrq(srq);
+
+ mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
+
+ if (srq->uobject) {
+ mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
+ ib_umem_release(msrq->umem);
+ } else {
+ destroy_srq_kernel(dev, msrq);
+ }
+
+ kfree(srq);
+ return 0;
+}
+
+void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
+{
+ struct mlx5_wqe_srq_next_seg *next;
+
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ next = get_wqe(srq, srq->tail);
+ next->next_wqe_index = cpu_to_be16(wqe_index);
+ srq->tail = wqe_index;
+
+ spin_unlock(&srq->lock);
+}
+
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ struct mlx5_wqe_srq_next_seg *next;
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ nreq = 0;
+ goto out;
+ }
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely(srq->head == srq->tail)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ srq->wrid[srq->head] = wr->wr_id;
+
+ next = get_wqe(srq, srq->head);
+ srq->head = be16_to_cpu(next->next_wqe_index);
+ scat = (struct mlx5_wqe_data_seg *)(next + 1);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
+ scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
+ scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
+ }
+
+ if (i < srq->msrq.max_avail_gather) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+ }
+
+ if (likely(nreq)) {
+ srq->wqe_ctr += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *srq->db.db = cpu_to_be32(srq->wqe_ctr);
+ }
+out:
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return err;
+}
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/mlx5_ib_srq.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ib/user.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ib/user.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ib/user.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,319 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ib/user.h 322810 2017-08-23 12:09:37Z hselasky $
+ */
+
+#ifndef MLX5_IB_USER_H
+#define MLX5_IB_USER_H
+
+#include <linux/types.h>
+
+enum {
+ MLX5_QP_FLAG_SIGNATURE = 1 << 0,
+};
+
+enum {
+ MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
+};
+
+enum {
+ MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
+};
+
+
+/* Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define MLX5_IB_UVERBS_ABI_VERSION 1
+
+/* Make sure that all structs defined in this file remain laid out so
+ * that they pack the same way on 32-bit and 64-bit architectures (to
+ * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+ * In particular do not use pointer types -- pass pointers in __u64
+ * instead.
+ */
+
+struct mlx5_ib_alloc_ucontext_req {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+};
+
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct mlx5_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 bf_reg_size;
+ __u32 tot_uuars;
+ __u32 cache_line_size;
+ __u16 max_sq_desc_sz;
+ __u16 max_rq_desc_sz;
+ __u32 max_send_wqebb;
+ __u32 max_recv_wr;
+ __u32 max_srq_recv_wr;
+ __u16 num_ports;
+ __u16 reserved;
+ __u32 max_desc_sz_sq_dc;
+ __u32 atomic_arg_sizes_dc;
+ __u32 reserved1;
+ __u32 flags;
+ __u32 reserved2[5];
+};
+
+enum mlx5_exp_ib_alloc_ucontext_data_resp_mask {
+ MLX5_EXP_ALLOC_CTX_RESP_MASK_CQE_COMP_MAX_NUM = 1 << 0,
+ MLX5_EXP_ALLOC_CTX_RESP_MASK_CQE_VERSION = 1 << 1,
+ MLX5_EXP_ALLOC_CTX_RESP_MASK_RROCE_UDP_SPORT_MIN = 1 << 2,
+ MLX5_EXP_ALLOC_CTX_RESP_MASK_RROCE_UDP_SPORT_MAX = 1 << 3,
+ MLX5_EXP_ALLOC_CTX_RESP_MASK_HCA_CORE_CLOCK_OFFSET = 1 << 4,
+};
+
+struct mlx5_exp_ib_alloc_ucontext_data_resp {
+ __u32 comp_mask; /* use mlx5_ib_exp_alloc_ucontext_data_resp_mask */
+ __u16 cqe_comp_max_num;
+ __u8 cqe_version;
+ __u8 reserved;
+ __u16 rroce_udp_sport_min;
+ __u16 rroce_udp_sport_max;
+ __u32 hca_core_clock_offset;
+};
+
+struct mlx5_exp_ib_alloc_ucontext_resp {
+ __u32 qp_tab_size;
+ __u32 bf_reg_size;
+ __u32 tot_uuars;
+ __u32 cache_line_size;
+ __u16 max_sq_desc_sz;
+ __u16 max_rq_desc_sz;
+ __u32 max_send_wqebb;
+ __u32 max_recv_wr;
+ __u32 max_srq_recv_wr;
+ __u16 num_ports;
+ __u16 reserved;
+ __u32 max_desc_sz_sq_dc;
+ __u32 atomic_arg_sizes_dc;
+ __u32 reserved1;
+ __u32 flags;
+ __u32 reserved2[5];
+ /* Some more reserved fields for
+ * future growth of mlx5_ib_alloc_ucontext_resp */
+ __u64 prefix_reserved[8];
+ struct mlx5_exp_ib_alloc_ucontext_data_resp exp_data;
+};
+
+struct mlx5_ib_alloc_pd_resp {
+ __u32 pdn;
+};
+
+struct mlx5_ib_create_cq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 cqe_size;
+ __u32 reserved; /* explicit padding (optional on i386) */
+};
+
+enum mlx5_exp_ib_create_cq_mask {
+ MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_EN = 1 << 0,
+ MLX5_EXP_CREATE_CQ_MASK_CQE_COMP_RECV_TYPE = 1 << 1,
+ MLX5_EXP_CREATE_CQ_MASK_RESERVED = 1 << 2,
+};
+
+enum mlx5_exp_cqe_comp_recv_type {
+ MLX5_IB_CQE_FORMAT_HASH,
+ MLX5_IB_CQE_FORMAT_CSUM,
+};
+
+struct mlx5_exp_ib_create_cq_data {
+ __u32 comp_mask; /* use mlx5_exp_ib_creaet_cq_mask */
+ __u8 cqe_comp_en;
+ __u8 cqe_comp_recv_type; /* use mlx5_exp_cqe_comp_recv_type */
+ __u16 reserved;
+};
+
+struct mlx5_exp_ib_create_cq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 cqe_size;
+ __u32 reserved; /* explicit padding (optional on i386) */
+
+ /* Some more reserved fields for future growth of mlx5_ib_create_cq */
+ __u64 prefix_reserved[8];
+
+ /* sizeof prefix aligned with mlx5_ib_create_cq */
+ __u64 size_of_prefix;
+ struct mlx5_exp_ib_create_cq_data exp_data;
+};
+
+struct mlx5_ib_create_cq_resp {
+ __u32 cqn;
+ __u32 reserved;
+};
+
+struct mlx5_ib_resize_cq {
+ __u64 buf_addr;
+ __u16 cqe_size;
+ __u16 reserved0;
+ __u32 reserved1;
+};
+
+struct mlx5_ib_create_srq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 flags;
+ __u32 reserved; /* explicit padding (optional on i386) */
+ __u32 uidx;
+ __u32 reserved1;
+};
+
+struct mlx5_ib_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
+struct mlx5_ib_create_qp {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 sq_wqe_count;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 flags;
+};
+
+enum mlx5_exp_ib_create_qp_mask {
+ MLX5_EXP_CREATE_QP_MASK_UIDX = 1 << 0,
+ MLX5_EXP_CREATE_QP_MASK_SQ_BUFF_ADD = 1 << 1,
+ MLX5_EXP_CREATE_QP_MASK_WC_UAR_IDX = 1 << 2,
+ MLX5_EXP_CREATE_QP_MASK_FLAGS_IDX = 1 << 3,
+ MLX5_EXP_CREATE_QP_MASK_RESERVED = 1 << 4,
+};
+
+enum mlx5_exp_create_qp_flags {
+ MLX5_EXP_CREATE_QP_MULTI_PACKET_WQE_REQ_FLAG = 1 << 0,
+};
+
+enum mlx5_exp_drv_create_qp_uar_idx {
+ MLX5_EXP_CREATE_QP_DB_ONLY_UUAR = -1
+};
+
+struct mlx5_exp_ib_create_qp_data {
+ __u32 comp_mask; /* use mlx5_exp_ib_create_qp_mask */
+ __u32 uidx;
+ __u64 sq_buf_addr;
+ __u32 wc_uar_index;
+ __u32 flags; /* use mlx5_exp_create_qp_flags */
+};
+
+struct mlx5_exp_ib_create_qp {
+ /* To allow casting to mlx5_ib_create_qp the prefix is the same as
+ * struct mlx5_ib_create_qp prefix
+ */
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 sq_wqe_count;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 flags;
+
+ /* Some more reserved fields for future growth of mlx5_ib_create_qp */
+ __u64 prefix_reserved[8];
+
+ /* sizeof prefix aligned with mlx5_ib_create_qp */
+ __u64 size_of_prefix;
+
+ /* Experimental data
+ * Add new experimental data only inside the exp struct
+ */
+ struct mlx5_exp_ib_create_qp_data exp;
+};
+
+enum {
+ MLX5_EXP_INVALID_UUAR = -1,
+};
+
+struct mlx5_ib_create_qp_resp {
+ __u32 uuar_index;
+ __u32 rsvd;
+};
+
+enum mlx5_exp_ib_create_qp_resp_mask {
+ MLX5_EXP_CREATE_QP_RESP_MASK_FLAGS_IDX = 1 << 0,
+ MLX5_EXP_CREATE_QP_RESP_MASK_RESERVED = 1 << 1,
+};
+
+enum mlx5_exp_create_qp_resp_flags {
+ MLX5_EXP_CREATE_QP_RESP_MULTI_PACKET_WQE_FLAG = 1 << 0,
+};
+
+struct mlx5_exp_ib_create_qp_resp_data {
+ __u32 comp_mask; /* use mlx5_exp_ib_create_qp_resp_mask */
+ __u32 flags; /* use mlx5_exp_create_qp_resp_flags */
+};
+
+struct mlx5_exp_ib_create_qp_resp {
+ __u32 uuar_index;
+ __u32 rsvd;
+
+ /* Some more reserved fields for future growth of mlx5_ib_create_qp_resp */
+ __u64 prefix_reserved[8];
+
+ /* sizeof prefix aligned with mlx5_ib_create_qp_resp */
+ __u64 size_of_prefix;
+
+ /* Experimental data
+ * Add new experimental data only inside the exp struct
+ */
+ struct mlx5_exp_ib_create_qp_resp_data exp;
+};
+
+struct mlx5_ib_create_dct {
+ __u32 uidx;
+ __u32 reserved;
+};
+
+struct mlx5_ib_arm_dct {
+ __u64 reserved0;
+ __u64 reserved1;
+};
+
+struct mlx5_ib_arm_dct_resp {
+ __u64 reserved0;
+ __u64 reserved1;
+};
+
+struct mlx5_ib_create_wq {
+ __u64 buf_addr;
+ __u64 db_addr;
+ __u32 rq_wqe_count;
+ __u32 rq_wqe_shift;
+ __u32 user_index;
+ __u32 flags;
+};
+
+#endif /* MLX5_IB_USER_H */
Property changes on: trunk/sys/dev/mlx5/mlx5_ib/user.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_ifc.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_ifc.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_ifc.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,9555 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_ifc.h 321993 2017-08-03 13:52:39Z hselasky $
+ */
+
+#ifndef MLX5_IFC_H
+#define MLX5_IFC_H
+
+enum {
+ MLX5_EVENT_TYPE_COMP = 0x0,
+ MLX5_EVENT_TYPE_PATH_MIG = 0x1,
+ MLX5_EVENT_TYPE_COMM_EST = 0x2,
+ MLX5_EVENT_TYPE_SQ_DRAINED = 0x3,
+ MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
+ MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
+ MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
+ MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
+ MLX5_EVENT_TYPE_CQ_ERROR = 0x4,
+ MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x5,
+ MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x7,
+ MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
+ MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+ MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
+ MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
+ MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x8,
+ MLX5_EVENT_TYPE_PORT_CHANGE = 0x9,
+ MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
+ MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT = 0x16,
+ MLX5_EVENT_TYPE_CODING_TEMP_WARNING_EVENT = 0x17,
+ MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT = 0x1e,
+ MLX5_EVENT_TYPE_CODING_PPS_EVENT = 0x25,
+ MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT = 0x22,
+ MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
+ MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
+ MLX5_EVENT_TYPE_DROPPED_PACKET_LOGGED_EVENT = 0x1f,
+ MLX5_EVENT_TYPE_CMD = 0xa,
+ MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
+ MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd
+};
+
+enum {
+ MLX5_MODIFY_TIR_BITMASK_LRO = 0x0,
+ MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1,
+ MLX5_MODIFY_TIR_BITMASK_HASH = 0x2,
+ MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3,
+ MLX5_MODIFY_TIR_BITMASK_SELF_LB_EN = 0x4
+};
+
+enum {
+ MLX5_MODIFY_RQT_BITMASK_RQN_LIST = 0x1,
+};
+
+enum {
+ MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
+ MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
+ MLX5_CMD_OP_INIT_HCA = 0x102,
+ MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
+ MLX5_CMD_OP_QUERY_PAGES = 0x107,
+ MLX5_CMD_OP_MANAGE_PAGES = 0x108,
+ MLX5_CMD_OP_SET_HCA_CAP = 0x109,
+ MLX5_CMD_OP_QUERY_ISSI = 0x10a,
+ MLX5_CMD_OP_SET_ISSI = 0x10b,
+ MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d,
+ MLX5_CMD_OP_QUERY_OTHER_HCA_CAP = 0x10e,
+ MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP = 0x10f,
+ MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_QUERY_MKEY = 0x201,
+ MLX5_CMD_OP_DESTROY_MKEY = 0x202,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
+ MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
+ MLX5_CMD_OP_CREATE_EQ = 0x301,
+ MLX5_CMD_OP_DESTROY_EQ = 0x302,
+ MLX5_CMD_OP_QUERY_EQ = 0x303,
+ MLX5_CMD_OP_GEN_EQE = 0x304,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
+ MLX5_CMD_OP_DESTROY_CQ = 0x401,
+ MLX5_CMD_OP_QUERY_CQ = 0x402,
+ MLX5_CMD_OP_MODIFY_CQ = 0x403,
+ MLX5_CMD_OP_CREATE_QP = 0x500,
+ MLX5_CMD_OP_DESTROY_QP = 0x501,
+ MLX5_CMD_OP_RST2INIT_QP = 0x502,
+ MLX5_CMD_OP_INIT2RTR_QP = 0x503,
+ MLX5_CMD_OP_RTR2RTS_QP = 0x504,
+ MLX5_CMD_OP_RTS2RTS_QP = 0x505,
+ MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
+ MLX5_CMD_OP_2ERR_QP = 0x507,
+ MLX5_CMD_OP_2RST_QP = 0x50a,
+ MLX5_CMD_OP_QUERY_QP = 0x50b,
+ MLX5_CMD_OP_SQD_RTS_QP = 0x50c,
+ MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
+ MLX5_CMD_OP_CREATE_PSV = 0x600,
+ MLX5_CMD_OP_DESTROY_PSV = 0x601,
+ MLX5_CMD_OP_CREATE_SRQ = 0x700,
+ MLX5_CMD_OP_DESTROY_SRQ = 0x701,
+ MLX5_CMD_OP_QUERY_SRQ = 0x702,
+ MLX5_CMD_OP_ARM_RQ = 0x703,
+ MLX5_CMD_OP_CREATE_XRC_SRQ = 0x705,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ = 0x706,
+ MLX5_CMD_OP_QUERY_XRC_SRQ = 0x707,
+ MLX5_CMD_OP_ARM_XRC_SRQ = 0x708,
+ MLX5_CMD_OP_CREATE_DCT = 0x710,
+ MLX5_CMD_OP_DESTROY_DCT = 0x711,
+ MLX5_CMD_OP_DRAIN_DCT = 0x712,
+ MLX5_CMD_OP_QUERY_DCT = 0x713,
+ MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
+ MLX5_CMD_OP_SET_DC_CNAK_TRACE = 0x715,
+ MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
+ MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
+ MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
+ MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
+ MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
+ MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT = 0x762,
+ MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT = 0x763,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_GID = 0x764,
+ MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY = 0x765,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
+ MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
+ MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
+ MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT = 0x783,
+ MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT = 0x784,
+ MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT = 0x785,
+ MLX5_CMD_OP_CREATE_QOS_PARA_VPORT = 0x786,
+ MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT = 0x787,
+ MLX5_CMD_OP_ALLOC_PD = 0x800,
+ MLX5_CMD_OP_DEALLOC_PD = 0x801,
+ MLX5_CMD_OP_ALLOC_UAR = 0x802,
+ MLX5_CMD_OP_DEALLOC_UAR = 0x803,
+ MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
+ MLX5_CMD_OP_ACCESS_REG = 0x805,
+ MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
+ MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
+ MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
+ MLX5_CMD_OP_MAD_IFC = 0x50d,
+ MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
+ MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
+ MLX5_CMD_OP_NOP = 0x80d,
+ MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
+ MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
+ MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
+ MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
+ MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
+ MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN = 0x817,
+ MLX5_CMD_OP_SET_DIAGNOSTICS = 0x820,
+ MLX5_CMD_OP_QUERY_DIAGNOSTICS = 0x821,
+ MLX5_CMD_OP_QUERY_CONG_STATUS = 0x822,
+ MLX5_CMD_OP_MODIFY_CONG_STATUS = 0x823,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x824,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x825,
+ MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x826,
+ MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT = 0x827,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT = 0x828,
+ MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829,
+ MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b,
+ MLX5_CMD_OP_SET_WOL_ROL = 0x830,
+ MLX5_CMD_OP_QUERY_WOL_ROL = 0x831,
+ MLX5_CMD_OP_CREATE_LAG = 0x840,
+ MLX5_CMD_OP_MODIFY_LAG = 0x841,
+ MLX5_CMD_OP_QUERY_LAG = 0x842,
+ MLX5_CMD_OP_DESTROY_LAG = 0x843,
+ MLX5_CMD_OP_CREATE_VPORT_LAG = 0x844,
+ MLX5_CMD_OP_DESTROY_VPORT_LAG = 0x845,
+ MLX5_CMD_OP_CREATE_TIR = 0x900,
+ MLX5_CMD_OP_MODIFY_TIR = 0x901,
+ MLX5_CMD_OP_DESTROY_TIR = 0x902,
+ MLX5_CMD_OP_QUERY_TIR = 0x903,
+ MLX5_CMD_OP_CREATE_SQ = 0x904,
+ MLX5_CMD_OP_MODIFY_SQ = 0x905,
+ MLX5_CMD_OP_DESTROY_SQ = 0x906,
+ MLX5_CMD_OP_QUERY_SQ = 0x907,
+ MLX5_CMD_OP_CREATE_RQ = 0x908,
+ MLX5_CMD_OP_MODIFY_RQ = 0x909,
+ MLX5_CMD_OP_DESTROY_RQ = 0x90a,
+ MLX5_CMD_OP_QUERY_RQ = 0x90b,
+ MLX5_CMD_OP_CREATE_RMP = 0x90c,
+ MLX5_CMD_OP_MODIFY_RMP = 0x90d,
+ MLX5_CMD_OP_DESTROY_RMP = 0x90e,
+ MLX5_CMD_OP_QUERY_RMP = 0x90f,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS = 0x910,
+ MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS = 0x911,
+ MLX5_CMD_OP_CREATE_TIS = 0x912,
+ MLX5_CMD_OP_MODIFY_TIS = 0x913,
+ MLX5_CMD_OP_DESTROY_TIS = 0x914,
+ MLX5_CMD_OP_QUERY_TIS = 0x915,
+ MLX5_CMD_OP_CREATE_RQT = 0x916,
+ MLX5_CMD_OP_MODIFY_RQT = 0x917,
+ MLX5_CMD_OP_DESTROY_RQT = 0x918,
+ MLX5_CMD_OP_QUERY_RQT = 0x919,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT = 0x92f,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE = 0x930,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE = 0x931,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE = 0x932,
+ MLX5_CMD_OP_CREATE_FLOW_GROUP = 0x933,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP = 0x934,
+ MLX5_CMD_OP_QUERY_FLOW_GROUP = 0x935,
+ MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x936,
+ MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x937,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY = 0x938,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b,
+ MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
+ MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
+ MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+};
+
+enum {
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_QUERY_FW_INFO = 0x8007,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_CAPABILITY = 0x8400,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_ACCESS_REGISTER = 0x9001,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_VIRTUAL_MAC = 0x9003,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_SET_VIRTUAL_MAC = 0x9004,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_QUERY_WOL_ROL = 0x9005,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_SET_WOL_ROL = 0x9006,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_INIT = 0x9007,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_HEADER_STATUS = 0x9008,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_QUERY_ETOC_STATUS = 0x9009,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OCBB_SET_EVENT = 0x900a,
+ MLX5_ICMD_CMDS_OPCODE_ICMD_OPCODE_INIT_OCSD = 0xf004
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+ u8 outer_dmac[0x1];
+ u8 outer_smac[0x1];
+ u8 outer_ether_type[0x1];
+ u8 reserved_0[0x1];
+ u8 outer_first_prio[0x1];
+ u8 outer_first_cfi[0x1];
+ u8 outer_first_vid[0x1];
+ u8 reserved_1[0x1];
+ u8 outer_second_prio[0x1];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0x1];
+ u8 outer_ipv6_flow_label[0x1];
+ u8 outer_sip[0x1];
+ u8 outer_dip[0x1];
+ u8 outer_frag[0x1];
+ u8 outer_ip_protocol[0x1];
+ u8 outer_ip_ecn[0x1];
+ u8 outer_ip_dscp[0x1];
+ u8 outer_udp_sport[0x1];
+ u8 outer_udp_dport[0x1];
+ u8 outer_tcp_sport[0x1];
+ u8 outer_tcp_dport[0x1];
+ u8 outer_tcp_flags[0x1];
+ u8 outer_gre_protocol[0x1];
+ u8 outer_gre_key[0x1];
+ u8 outer_vxlan_vni[0x1];
+ u8 outer_geneve_vni[0x1];
+ u8 outer_geneve_oam[0x1];
+ u8 outer_geneve_protocol_type[0x1];
+ u8 outer_geneve_opt_len[0x1];
+ u8 reserved_2[0x1];
+ u8 source_eswitch_port[0x1];
+
+ u8 inner_dmac[0x1];
+ u8 inner_smac[0x1];
+ u8 inner_ether_type[0x1];
+ u8 reserved_3[0x1];
+ u8 inner_first_prio[0x1];
+ u8 inner_first_cfi[0x1];
+ u8 inner_first_vid[0x1];
+ u8 reserved_4[0x1];
+ u8 inner_second_prio[0x1];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0x1];
+ u8 inner_ipv6_flow_label[0x1];
+ u8 inner_sip[0x1];
+ u8 inner_dip[0x1];
+ u8 inner_frag[0x1];
+ u8 inner_ip_protocol[0x1];
+ u8 inner_ip_ecn[0x1];
+ u8 inner_ip_dscp[0x1];
+ u8 inner_udp_sport[0x1];
+ u8 inner_udp_dport[0x1];
+ u8 inner_tcp_sport[0x1];
+ u8 inner_tcp_dport[0x1];
+ u8 inner_tcp_flags[0x1];
+ u8 reserved_5[0x9];
+
+ u8 reserved_6[0x1a];
+ u8 bth_dst_qp[0x1];
+ u8 reserved_7[0x4];
+ u8 source_sqn[0x1];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_eth_discard_cntrs_grp_bits {
+ u8 ingress_general_high[0x20];
+
+ u8 ingress_general_low[0x20];
+
+ u8 ingress_policy_engine_high[0x20];
+
+ u8 ingress_policy_engine_low[0x20];
+
+ u8 ingress_vlan_membership_high[0x20];
+
+ u8 ingress_vlan_membership_low[0x20];
+
+ u8 ingress_tag_frame_type_high[0x20];
+
+ u8 ingress_tag_frame_type_low[0x20];
+
+ u8 egress_vlan_membership_high[0x20];
+
+ u8 egress_vlan_membership_low[0x20];
+
+ u8 loopback_filter_high[0x20];
+
+ u8 loopback_filter_low[0x20];
+
+ u8 egress_general_high[0x20];
+
+ u8 egress_general_low[0x20];
+
+ u8 reserved_at_1c0[0x40];
+
+ u8 egress_hoq_high[0x20];
+
+ u8 egress_hoq_low[0x20];
+
+ u8 port_isolation_high[0x20];
+
+ u8 port_isolation_low[0x20];
+
+ u8 egress_policy_engine_high[0x20];
+
+ u8 egress_policy_engine_low[0x20];
+
+ u8 ingress_tx_link_down_high[0x20];
+
+ u8 ingress_tx_link_down_low[0x20];
+
+ u8 egress_stp_filter_high[0x20];
+
+ u8 egress_stp_filter_low[0x20];
+
+ u8 egress_hoq_stall_high[0x20];
+
+ u8 egress_hoq_stall_low[0x20];
+
+ u8 reserved_at_340[0x440];
+};
+struct mlx5_ifc_flow_table_prop_layout_bits {
+ u8 ft_support[0x1];
+ u8 flow_tag[0x1];
+ u8 flow_counter[0x1];
+ u8 flow_modify_en[0x1];
+ u8 modify_root[0x1];
+ u8 reserved_0[0x1b];
+
+ u8 reserved_1[0x2];
+ u8 log_max_ft_size[0x6];
+ u8 reserved_2[0x10];
+ u8 max_ft_level[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x18];
+ u8 log_max_ft_num[0x8];
+
+ u8 reserved_5[0x10];
+ u8 log_max_flow_counter[0x8];
+ u8 log_max_destination[0x8];
+
+ u8 reserved_6[0x18];
+ u8 log_max_flow[0x8];
+
+ u8 reserved_7[0x40];
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+ struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+ u8 send[0x1];
+ u8 receive[0x1];
+ u8 write[0x1];
+ u8 read[0x1];
+ u8 atomic[0x1];
+ u8 srq_receive[0x1];
+ u8 reserved_0[0x1a];
+};
+
+struct mlx5_ifc_flow_counter_list_bits {
+ u8 reserved_0[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_1[0x20];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 0x2,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_QP = 0x3,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
+
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 ethertype[0x10];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 first_prio[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vid[0xc];
+
+ u8 ip_protocol[0x8];
+ u8 ip_dscp[0x6];
+ u8 ip_ecn[0x2];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
+ u8 frag[0x1];
+ u8 reserved_1[0x4];
+ u8 tcp_flags[0x9];
+
+ u8 tcp_sport[0x10];
+ u8 tcp_dport[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 udp_sport[0x10];
+ u8 udp_dport[0x10];
+
+ u8 src_ip[4][0x20];
+
+ u8 dst_ip[4][0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+ u8 reserved_0[0x8];
+ u8 source_sqn[0x18];
+
+ u8 reserved_1[0x10];
+ u8 source_port[0x10];
+
+ u8 outer_second_prio[0x3];
+ u8 outer_second_cfi[0x1];
+ u8 outer_second_vid[0xc];
+ u8 inner_second_prio[0x3];
+ u8 inner_second_cfi[0x1];
+ u8 inner_second_vid[0xc];
+
+ u8 outer_second_vlan_tag[0x1];
+ u8 inner_second_vlan_tag[0x1];
+ u8 reserved_2[0xe];
+ u8 gre_protocol[0x10];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 vxlan_vni[0x18];
+ u8 reserved_3[0x8];
+
+ u8 geneve_vni[0x18];
+ u8 reserved4[0x7];
+ u8 geneve_oam[0x1];
+
+ u8 reserved_5[0xc];
+ u8 outer_ipv6_flow_label[0x14];
+
+ u8 reserved_6[0xc];
+ u8 inner_ipv6_flow_label[0x14];
+
+ u8 reserved_7[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+
+ u8 reserved_8[0x8];
+ u8 bth_dst_qp[0x18];
+
+ u8 reserved_9[0xa0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+ u8 pa_h[0x20];
+
+ u8 pa_l[0x14];
+ u8 reserved_0[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+ u8 hi[0x20];
+
+ u8 lo[0x20];
+};
+
+struct mlx5_ifc_application_prio_entry_bits {
+ u8 reserved_0[0x8];
+ u8 priority[0x3];
+ u8 reserved_1[0x2];
+ u8 sel[0x3];
+ u8 protocol_id[0x10];
+};
+
+struct mlx5_ifc_nodnic_ring_doorbell_bits {
+ u8 reserved_0[0x8];
+ u8 ring_pi[0x10];
+ u8 reserved_1[0x8];
+};
+
+enum {
+ MLX5_ADS_STAT_RATE_NO_LIMIT = 0x0,
+ MLX5_ADS_STAT_RATE_2_5GBPS = 0x7,
+ MLX5_ADS_STAT_RATE_10GBPS = 0x8,
+ MLX5_ADS_STAT_RATE_30GBPS = 0x9,
+ MLX5_ADS_STAT_RATE_5GBPS = 0xa,
+ MLX5_ADS_STAT_RATE_20GBPS = 0xb,
+ MLX5_ADS_STAT_RATE_40GBPS = 0xc,
+ MLX5_ADS_STAT_RATE_60GBPS = 0xd,
+ MLX5_ADS_STAT_RATE_80GBPS = 0xe,
+ MLX5_ADS_STAT_RATE_120GBPS = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+ u8 fl[0x1];
+ u8 free_ar[0x1];
+ u8 reserved_0[0xe];
+ u8 pkey_index[0x10];
+
+ u8 reserved_1[0x8];
+ u8 grh[0x1];
+ u8 mlid[0x7];
+ u8 rlid[0x10];
+
+ u8 ack_timeout[0x5];
+ u8 reserved_2[0x3];
+ u8 src_addr_index[0x8];
+ u8 log_rtm[0x4];
+ u8 stat_rate[0x4];
+ u8 hop_limit[0x8];
+
+ u8 reserved_3[0x4];
+ u8 tclass[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+
+ u8 reserved_4[0x4];
+ u8 f_dscp[0x1];
+ u8 f_ecn[0x1];
+ u8 reserved_5[0x1];
+ u8 f_eth_prio[0x1];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+ u8 udp_sport[0x10];
+
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 sl[0x4];
+ u8 port[0x8];
+ u8 rmac_47_32[0x10];
+
+ u8 rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_diagnostic_counter_cap_bits {
+ u8 sync[0x1];
+ u8 reserved_0[0xf];
+ u8 counter_id[0x10];
+};
+
+struct mlx5_ifc_debug_cap_bits {
+ u8 reserved_0[0x18];
+ u8 log_max_samples[0x8];
+
+ u8 single[0x1];
+ u8 repetitive[0x1];
+ u8 health_mon_rx_activity[0x1];
+ u8 reserved_1[0x15];
+ u8 log_min_sample_period[0x8];
+
+ u8 reserved_2[0x1c0];
+
+ struct mlx5_ifc_diagnostic_counter_cap_bits diagnostic_counter[0x1f0];
+};
+
+struct mlx5_ifc_qos_cap_bits {
+ u8 packet_pacing[0x1];
+ u8 esw_scheduling[0x1];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 hll[0x1];
+ u8 packet_pacing_burst_bound[0x1];
+ u8 reserved_at_6[0x1a];
+
+ u8 reserved_at_20[0x20];
+
+ u8 packet_pacing_max_rate[0x20];
+
+ u8 packet_pacing_min_rate[0x20];
+
+ u8 reserved_at_80[0x10];
+ u8 packet_pacing_rate_table_size[0x10];
+
+ u8 esw_element_type[0x10];
+ u8 esw_tsar_type[0x10];
+
+ u8 reserved_at_c0[0x10];
+ u8 max_qos_para_vport[0x10];
+
+ u8 max_tsar_bw_share[0x20];
+
+ u8 reserved_at_100[0x700];
+};
+
+struct mlx5_ifc_snapshot_cap_bits {
+ u8 reserved_0[0x1d];
+ u8 suspend_qp_uc[0x1];
+ u8 suspend_qp_ud[0x1];
+ u8 suspend_qp_rc[0x1];
+
+ u8 reserved_1[0x1c];
+ u8 restore_pd[0x1];
+ u8 restore_uar[0x1];
+ u8 restore_mkey[0x1];
+ u8 restore_qp[0x1];
+
+ u8 reserved_2[0x1e];
+ u8 named_mkey[0x1];
+ u8 named_qp[0x1];
+
+ u8 reserved_3[0x7a0];
+};
+
+struct mlx5_ifc_e_switch_cap_bits {
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert_if_not_exist[0x1];
+ u8 vport_cvlan_insert_overwrite[0x1];
+
+ u8 reserved_0[0x19];
+
+ u8 nic_vport_node_guid_modify[0x1];
+ u8 nic_vport_port_guid_modify[0x1];
+
+ u8 reserved_1[0x7e0];
+};
+
+struct mlx5_ifc_flow_table_eswitch_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
+
+ u8 reserved_1[0x7800];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+ u8 reserved_0[0x200];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_rdma;
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+ u8 reserved_1[0x7200];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 csum_cap[0x1];
+ u8 vlan_cap[0x1];
+ u8 lro_cap[0x1];
+ u8 lro_psh_flag[0x1];
+ u8 lro_time_stamp[0x1];
+ u8 lro_max_msg_sz_mode[0x2];
+ u8 wqe_vlan_insert[0x1];
+ u8 self_lb_en_modifiable[0x1];
+ u8 self_lb_mc[0x1];
+ u8 self_lb_uc[0x1];
+ u8 max_lso_cap[0x5];
+ u8 multi_pkt_send_wqe[0x2];
+ u8 wqe_inline_mode[0x2];
+ u8 rss_ind_tbl_cap[0x4];
+ u8 reserved_1[0x3];
+ u8 tunnel_lso_const_out_ip_id[0x1];
+ u8 tunnel_lro_gre[0x1];
+ u8 tunnel_lro_vxlan[0x1];
+ u8 tunnel_statless_gre[0x1];
+ u8 tunnel_stateless_vxlan[0x1];
+
+ u8 swp[0x1];
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 reserved_2[0x1b];
+ u8 max_geneve_opt_len[0x1];
+ u8 tunnel_stateless_geneve_rx[0x1];
+
+ u8 reserved_3[0x10];
+ u8 lro_min_mss_size[0x10];
+
+ u8 reserved_4[0x120];
+
+ u8 lro_timer_supported_periods[4][0x20];
+
+ u8 reserved_5[0x600];
+};
+
+enum {
+ MLX5_ROCE_CAP_L3_TYPE_GRH = 0x1,
+ MLX5_ROCE_CAP_L3_TYPE_IPV4 = 0x2,
+ MLX5_ROCE_CAP_L3_TYPE_IPV6 = 0x4,
+};
+
+struct mlx5_ifc_roce_cap_bits {
+ u8 roce_apm[0x1];
+ u8 rts2rts_primary_eth_prio[0x1];
+ u8 roce_rx_allow_untagged[0x1];
+ u8 rts2rts_src_addr_index_for_vlan_valid_vlan_id[0x1];
+
+ u8 reserved_0[0x1c];
+
+ u8 reserved_1[0x60];
+
+ u8 reserved_2[0xc];
+ u8 l3_type[0x4];
+ u8 reserved_3[0x8];
+ u8 roce_version[0x8];
+
+ u8 reserved_4[0x10];
+ u8 r_roce_dest_udp_port[0x10];
+
+ u8 r_roce_max_src_udp_port[0x10];
+ u8 r_roce_min_src_udp_port[0x10];
+
+ u8 reserved_5[0x10];
+ u8 roce_address_table_size[0x10];
+
+ u8 reserved_6[0x700];
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES = 0x100,
+};
+
+enum {
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE = 0x1,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES = 0x2,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES = 0x4,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES = 0x8,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES = 0x10,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES = 0x20,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES = 0x40,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES = 0x80,
+ MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+ u8 reserved_0[0x40];
+
+ u8 atomic_req_8B_endianess_mode[0x2];
+ u8 reserved_1[0x4];
+ u8 supported_atomic_req_8B_endianess_mode_1[0x1];
+
+ u8 reserved_2[0x19];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x10];
+ u8 atomic_operations[0x10];
+
+ u8 reserved_5[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_6[0x10];
+ u8 atomic_size_dc[0x10];
+
+ u8 reserved_7[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+ u8 reserved_0[0x40];
+
+ u8 sig[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
+
+ struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
+
+ u8 reserved_3[0x6e0];
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES = 0x4,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES = 0x0,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES = 0x1,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES = 0x2,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES = 0x3,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES = 0x4,
+ MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES = 0x5,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_PORT_TYPE_IB = 0x0,
+ MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED = 0x0,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE = 0x1,
+ MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED = 0x3,
+};
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+ u8 reserved_0[0x80];
+
+ u8 log_max_srq_sz[0x8];
+ u8 log_max_qp_sz[0x8];
+ u8 reserved_1[0xb];
+ u8 log_max_qp[0x5];
+
+ u8 reserved_2[0xb];
+ u8 log_max_srq[0x5];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0x8];
+ u8 log_max_cq_sz[0x8];
+ u8 reserved_5[0xb];
+ u8 log_max_cq[0x5];
+
+ u8 log_max_eq_sz[0x8];
+ u8 reserved_6[0x2];
+ u8 log_max_mkey[0x6];
+ u8 reserved_7[0xc];
+ u8 log_max_eq[0x4];
+
+ u8 max_indirection[0x8];
+ u8 reserved_8[0x1];
+ u8 log_max_mrw_sz[0x7];
+ u8 reserved_9[0x2];
+ u8 log_max_bsf_list_size[0x6];
+ u8 reserved_10[0x2];
+ u8 log_max_klm_list_size[0x6];
+
+ u8 reserved_11[0xa];
+ u8 log_max_ra_req_dc[0x6];
+ u8 reserved_12[0xa];
+ u8 log_max_ra_res_dc[0x6];
+
+ u8 reserved_13[0xa];
+ u8 log_max_ra_req_qp[0x6];
+ u8 reserved_14[0xa];
+ u8 log_max_ra_res_qp[0x6];
+
+ u8 pad_cap[0x1];
+ u8 cc_query_allowed[0x1];
+ u8 cc_modify_allowed[0x1];
+ u8 start_pad[0x1];
+ u8 cache_line_128byte[0x1];
+ u8 reserved_15[0xb];
+ u8 gid_table_size[0x10];
+
+ u8 out_of_seq_cnt[0x1];
+ u8 vport_counters[0x1];
+ u8 retransmission_q_counters[0x1];
+ u8 debug[0x1];
+ u8 modify_rq_counters_set_id[0x1];
+ u8 rq_delay_drop[0x1];
+ u8 max_qp_cnt[0xa];
+ u8 pkey_table_size[0x10];
+
+ u8 vport_group_manager[0x1];
+ u8 vhca_group_manager[0x1];
+ u8 ib_virt[0x1];
+ u8 eth_virt[0x1];
+ u8 reserved_17[0x1];
+ u8 ets[0x1];
+ u8 nic_flow_table[0x1];
+ u8 eswitch_flow_table[0x1];
+ u8 reserved_18[0x3];
+ u8 local_ca_ack_delay[0x5];
+ u8 port_module_event[0x1];
+ u8 reserved_19[0x5];
+ u8 port_type[0x2];
+ u8 num_ports[0x8];
+
+ u8 snapshot[0x1];
+ u8 reserved_20[0x2];
+ u8 log_max_msg[0x5];
+ u8 reserved_21[0x4];
+ u8 max_tc[0x4];
+ u8 temp_warn_event[0x1];
+ u8 dcbx[0x1];
+ u8 reserved_22[0x4];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_23[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
+
+ u8 stat_rate_support[0x10];
+ u8 reserved_24[0xc];
+ u8 cqe_version[0x4];
+
+ u8 compact_address_vector[0x1];
+ u8 striding_rq[0x1];
+ u8 reserved_25[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
+ u8 ipoib_ipoib_offloads[0x1];
+ u8 reserved_26[0x8];
+ u8 dc_connect_qp[0x1];
+ u8 dc_cnak_trace[0x1];
+ u8 drain_sigerr[0x1];
+ u8 cmdif_checksum[0x2];
+ u8 sigerr_cqe[0x1];
+ u8 reserved_27[0x1];
+ u8 wq_signature[0x1];
+ u8 sctr_data_cqe[0x1];
+ u8 reserved_28[0x1];
+ u8 sho[0x1];
+ u8 tph[0x1];
+ u8 rf[0x1];
+ u8 dct[0x1];
+ u8 qos[0x1];
+ u8 eth_net_offloads[0x1];
+ u8 roce[0x1];
+ u8 atomic[0x1];
+ u8 reserved_30[0x1];
+
+ u8 cq_oi[0x1];
+ u8 cq_resize[0x1];
+ u8 cq_moderation[0x1];
+ u8 cq_period_mode_modify[0x1];
+ u8 cq_invalidate[0x1];
+ u8 reserved_at_225[0x1];
+ u8 cq_eq_remap[0x1];
+ u8 pg[0x1];
+ u8 block_lb_mc[0x1];
+ u8 exponential_backoff[0x1];
+ u8 scqe_break_moderation[0x1];
+ u8 cq_period_start_from_cqe[0x1];
+ u8 cd[0x1];
+ u8 atm[0x1];
+ u8 apm[0x1];
+ u8 reserved_32[0x7];
+ u8 qkv[0x1];
+ u8 pkv[0x1];
+ u8 reserved_33[0x4];
+ u8 xrc[0x1];
+ u8 ud[0x1];
+ u8 uc[0x1];
+ u8 rc[0x1];
+
+ u8 reserved_34[0xa];
+ u8 uar_sz[0x6];
+ u8 reserved_35[0x8];
+ u8 log_pg_sz[0x8];
+
+ u8 bf[0x1];
+ u8 driver_version[0x1];
+ u8 pad_tx_eth_packet[0x1];
+ u8 reserved_36[0x8];
+ u8 log_bf_reg_size[0x5];
+ u8 reserved_37[0x10];
+
+ u8 num_of_diagnostic_counters[0x10];
+ u8 max_wqe_sz_sq[0x10];
+
+ u8 reserved_38[0x10];
+ u8 max_wqe_sz_rq[0x10];
+
+ u8 reserved_39[0x10];
+ u8 max_wqe_sz_sq_dc[0x10];
+
+ u8 reserved_40[0x7];
+ u8 max_qp_mcg[0x19];
+
+ u8 reserved_41[0x18];
+ u8 log_max_mcg[0x8];
+
+ u8 reserved_42[0x3];
+ u8 log_max_transport_domain[0x5];
+ u8 reserved_43[0x3];
+ u8 log_max_pd[0x5];
+ u8 reserved_44[0xb];
+ u8 log_max_xrcd[0x5];
+
+ u8 reserved_45[0x10];
+ u8 max_flow_counter[0x10];
+
+ u8 reserved_46[0x3];
+ u8 log_max_rq[0x5];
+ u8 reserved_47[0x3];
+ u8 log_max_sq[0x5];
+ u8 reserved_48[0x3];
+ u8 log_max_tir[0x5];
+ u8 reserved_49[0x3];
+ u8 log_max_tis[0x5];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_50[0x2];
+ u8 log_max_rmp[0x5];
+ u8 reserved_51[0x3];
+ u8 log_max_rqt[0x5];
+ u8 reserved_52[0x3];
+ u8 log_max_rqt_size[0x5];
+ u8 reserved_53[0x3];
+ u8 log_max_tis_per_sq[0x5];
+
+ u8 reserved_54[0x3];
+ u8 log_max_stride_sz_rq[0x5];
+ u8 reserved_55[0x3];
+ u8 log_min_stride_sz_rq[0x5];
+ u8 reserved_56[0x3];
+ u8 log_max_stride_sz_sq[0x5];
+ u8 reserved_57[0x3];
+ u8 log_min_stride_sz_sq[0x5];
+
+ u8 reserved_58[0x1b];
+ u8 log_max_wq_sz[0x5];
+
+ u8 nic_vport_change_event[0x1];
+ u8 disable_local_lb[0x1];
+ u8 reserved_59[0x9];
+ u8 log_max_vlan_list[0x5];
+ u8 reserved_60[0x3];
+ u8 log_max_current_mc_list[0x5];
+ u8 reserved_61[0x3];
+ u8 log_max_current_uc_list[0x5];
+
+ u8 reserved_62[0x80];
+
+ u8 reserved_63[0x3];
+ u8 log_max_l2_table[0x5];
+ u8 reserved_64[0x8];
+ u8 log_uar_page_sz[0x10];
+
+ u8 reserved_65[0x20];
+
+ u8 device_frequency_mhz[0x20];
+
+ u8 device_frequency_khz[0x20];
+
+ u8 reserved_66[0x80];
+
+ u8 log_max_atomic_size_qp[0x8];
+ u8 reserved_67[0x10];
+ u8 log_max_atomic_size_dc[0x8];
+
+ u8 reserved_68[0x1f];
+ u8 cqe_compression[0x1];
+
+ u8 cqe_compression_timeout[0x10];
+ u8 cqe_compression_max_num[0x10];
+
+ u8 reserved_69[0x220];
+};
+
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+ struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+ struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+ u8 reserved_0[0x40];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+ u8 reserved_0[0xa00];
+};
+
+enum {
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP = 0x0,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP = 0x1,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT = 0x2,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT = 0x3,
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+ u8 l3_prot_type[0x1];
+ u8 l4_prot_type[0x1];
+ u8 selected_fields[0x1e];
+};
+
+enum {
+ MLX5_WQ_TYPE_LINKED_LIST = 0x0,
+ MLX5_WQ_TYPE_CYCLIC = 0x1,
+ MLX5_WQ_TYPE_STRQ_LINKED_LIST = 0x2,
+ MLX5_WQ_TYPE_STRQ_CYCLIC = 0x3,
+};
+
+enum rq_type {
+ RQ_TYPE_NONE,
+ RQ_TYPE_STRIDE,
+};
+
+enum {
+ MLX5_WQ_END_PAD_MODE_NONE = 0x0,
+ MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+ u8 wq_type[0x4];
+ u8 wq_signature[0x1];
+ u8 end_padding_mode[0x2];
+ u8 cd_slave[0x1];
+ u8 reserved_0[0x18];
+
+ u8 hds_skip_first_sge[0x1];
+ u8 log2_hds_buf_size[0x3];
+ u8 reserved_1[0x7];
+ u8 page_offset[0x5];
+ u8 lwm[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x8];
+ u8 uar_page[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 hw_counter[0x20];
+
+ u8 sw_counter[0x20];
+
+ u8 reserved_4[0xc];
+ u8 log_wq_stride[0x4];
+ u8 reserved_5[0x3];
+ u8 log_wq_pg_sz[0x5];
+ u8 reserved_6[0x3];
+ u8 log_wq_sz[0x5];
+
+ u8 reserved_7[0x15];
+ u8 single_wqe_log_num_of_strides[0x3];
+ u8 two_byte_shift_en[0x1];
+ u8 reserved_8[0x4];
+ u8 single_stride_log_num_of_bytes[0x3];
+
+ u8 reserved_9[0x4c0];
+
+ struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+ u8 reserved_0[0x8];
+ u8 rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+ u8 reserved_0[0x10];
+ u8 mac_addr_47_32[0x10];
+
+ u8 mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+ u8 reserved_0[0xa0];
+
+ u8 min_time_between_cnps[0x20];
+
+ u8 reserved_1[0x12];
+ u8 cnp_dscp[0x6];
+ u8 reserved_2[0x4];
+ u8 cnp_prio_mode[0x1];
+ u8 cnp_802p_prio[0x3];
+
+ u8 reserved_3[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+ u8 reserved_0[0x60];
+
+ u8 reserved_1[0x4];
+ u8 clamp_tgt_rate[0x1];
+ u8 reserved_2[0x3];
+ u8 clamp_tgt_rate_after_time_inc[0x1];
+ u8 reserved_3[0x17];
+
+ u8 reserved_4[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_5[0xe0];
+
+ u8 rate_to_set_on_first_cnp[0x20];
+
+ u8 dce_tcp_g[0x20];
+
+ u8 dce_tcp_rtt[0x20];
+
+ u8 rate_reduce_monitor_period[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 initial_alpha_value[0x20];
+
+ u8 reserved_7[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+ u8 reserved_0[0x80];
+
+ u8 rppp_max_rps[0x20];
+
+ u8 rpg_time_reset[0x20];
+
+ u8 rpg_byte_reset[0x20];
+
+ u8 rpg_threshold[0x20];
+
+ u8 rpg_max_rate[0x20];
+
+ u8 rpg_ai_rate[0x20];
+
+ u8 rpg_hai_rate[0x20];
+
+ u8 rpg_gd[0x20];
+
+ u8 rpg_min_dec_fac[0x20];
+
+ u8 rpg_min_rate[0x20];
+
+ u8 reserved_1[0x640];
+};
+
+enum {
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE = 0x1,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET = 0x2,
+ MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+ u8 resize_field_select[0x20];
+};
+
+enum {
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI = 0x4,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN = 0x8,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD_MODE = 0x10,
+ MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_STATUS = 0x20,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+ u8 modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+ u8 field_select_r_roce_np[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE = 0x2,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_CLAMP_TGT_RATE_AFTER_TIME_INC = 0x4,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_DEC_FAC = 0x200,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RPG_MIN_RATE = 0x400,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_TO_SET_ON_FIRST_CNP = 0x800,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_G = 0x1000,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_DCE_TCP_RTT = 0x2000,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_RATE_REDUCE_MONITOR_PERIOD = 0x4000,
+ MLX5_FIELD_SELECT_R_ROCE_RP_FIELD_SELECT_R_ROCE_RP_INITIAL_ALPHA_VALUE = 0x8000,
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+ u8 field_select_r_roce_rp[0x20];
+};
+
+enum {
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS = 0x4,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET = 0x8,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET = 0x10,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD = 0x20,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE = 0x40,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE = 0x80,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE = 0x100,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD = 0x200,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC = 0x400,
+ MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+ u8 field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_pptb_reg_bits {
+ u8 reserved_0[0x2];
+ u8 mm[0x2];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 reserved_2[0x6];
+ u8 cm[0x1];
+ u8 um[0x1];
+ u8 pm[0x8];
+
+ u8 prio7buff[0x4];
+ u8 prio6buff[0x4];
+ u8 prio5buff[0x4];
+ u8 prio4buff[0x4];
+ u8 prio3buff[0x4];
+ u8 prio2buff[0x4];
+ u8 prio1buff[0x4];
+ u8 prio0buff[0x4];
+
+ u8 pm_msb[0x8];
+ u8 reserved_3[0x10];
+ u8 ctrl_buff[0x4];
+ u8 untagged_buff[0x4];
+};
+
+struct mlx5_ifc_dcbx_app_reg_bits {
+ u8 reserved_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1a];
+ u8 num_app_prio[0x6];
+
+ u8 reserved_3[0x40];
+
+ struct mlx5_ifc_application_prio_entry_bits app_prio[0];
+};
+
+struct mlx5_ifc_dcbx_param_reg_bits {
+ u8 dcbx_cee_cap[0x1];
+ u8 dcbx_ieee_cap[0x1];
+ u8 dcbx_standby_cap[0x1];
+ u8 reserved_0[0x5];
+ u8 port_number[0x8];
+ u8 reserved_1[0xa];
+ u8 max_application_table_size[0x6];
+
+ u8 reserved_2[0x15];
+ u8 version_oper[0x3];
+ u8 reserved_3[0x5];
+ u8 version_admin[0x3];
+
+ u8 willing_admin[0x1];
+ u8 reserved_4[0x3];
+ u8 pfc_cap_oper[0x4];
+ u8 reserved_5[0x4];
+ u8 pfc_cap_admin[0x4];
+ u8 reserved_6[0x4];
+ u8 num_of_tc_oper[0x4];
+ u8 reserved_7[0x4];
+ u8 num_of_tc_admin[0x4];
+
+ u8 remote_willing[0x1];
+ u8 reserved_8[0x3];
+ u8 remote_pfc_cap[0x4];
+ u8 reserved_9[0x14];
+ u8 remote_num_of_tc[0x4];
+
+ u8 reserved_10[0x18];
+ u8 error[0x8];
+
+ u8 reserved_11[0x160];
+};
+
+struct mlx5_ifc_qhll_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x1b];
+ u8 hll_time[0x5];
+
+ u8 stall_en[0x1];
+ u8 reserved_at_41[0x1c];
+ u8 stall_cnt[0x3];
+};
+
+struct mlx5_ifc_qetcr_reg_bits {
+ u8 operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
+ u8 port_number[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 tc[8][0x40];
+
+ u8 global_configuration[0x40];
+};
+
+struct mlx5_ifc_nodnic_ring_config_reg_bits {
+ u8 queue_address_63_32[0x20];
+
+ u8 queue_address_31_12[0x14];
+ u8 reserved_0[0x6];
+ u8 log_size[0x6];
+
+ struct mlx5_ifc_nodnic_ring_doorbell_bits doorbell;
+
+ u8 reserved_1[0x8];
+ u8 queue_number[0x18];
+
+ u8 q_key[0x20];
+
+ u8 reserved_2[0x10];
+ u8 pkey_index[0x10];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_nodnic_cq_arming_word_bits {
+ u8 reserved_0[0x8];
+ u8 cq_ci[0x10];
+ u8 reserved_1[0x8];
+};
+
+enum {
+ MLX5_NODNIC_EVENT_WORD_LINK_TYPE_INFINIBAND = 0x0,
+ MLX5_NODNIC_EVENT_WORD_LINK_TYPE_ETHERNET = 0x1,
+};
+
+enum {
+ MLX5_NODNIC_EVENT_WORD_PORT_STATE_DOWN = 0x0,
+ MLX5_NODNIC_EVENT_WORD_PORT_STATE_INITIALIZE = 0x1,
+ MLX5_NODNIC_EVENT_WORD_PORT_STATE_ARMED = 0x2,
+ MLX5_NODNIC_EVENT_WORD_PORT_STATE_ACTIVE = 0x3,
+};
+
+struct mlx5_ifc_nodnic_event_word_bits {
+ u8 driver_reset_needed[0x1];
+ u8 port_management_change_event[0x1];
+ u8 reserved_0[0x19];
+ u8 link_type[0x1];
+ u8 port_state[0x4];
+};
+
+struct mlx5_ifc_nic_vport_change_event_bits {
+ u8 reserved_0[0x10];
+ u8 vport_num[0x10];
+
+ u8 reserved_1[0xc0];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+ u8 reserved_0[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+
+ u8 reserved_1[0xa0];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+ u8 command_completion_vector[0x20];
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+ u8 reserved_0[0x18];
+ u8 port_num[0x1];
+ u8 reserved_1[0x3];
+ u8 vl[0x4];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+ u8 event_subtype[0x8];
+ u8 reserved_0[0x8];
+ u8 congestion_level[0x8];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+ u8 reserved_0[0x60];
+
+ u8 gpio_event_hi[0x20];
+
+ u8 gpio_event_lo[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+ u8 reserved_0[0x40];
+
+ u8 port_num[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+ u8 reserved_0[0xe0];
+};
+
+enum {
+ MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN = 0x1,
+ MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+ u8 reserved_0[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 syndrome[0x8];
+
+ u8 reserved_3[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+ u8 bytes_commited[0x20];
+
+ u8 r_key[0x20];
+
+ u8 reserved_0[0x10];
+ u8 packet_len[0x10];
+
+ u8 rdma_op_len[0x20];
+
+ u8 rdma_va[0x40];
+
+ u8 reserved_1[0x5];
+ u8 rdma[0x1];
+ u8 write[0x1];
+ u8 requestor[0x1];
+ u8 qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+ u8 bytes_committed[0x20];
+
+ u8 reserved_0[0x10];
+ u8 wqe_index[0x10];
+
+ u8 reserved_1[0x10];
+ u8 len[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x5];
+ u8 rdma[0x1];
+ u8 write_read[0x1];
+ u8 requestor[0x1];
+ u8 qpn[0x18];
+};
+
+enum {
+ MLX5_QP_EVENTS_TYPE_QP = 0x0,
+ MLX5_QP_EVENTS_TYPE_RQ = 0x1,
+ MLX5_QP_EVENTS_TYPE_SQ = 0x2,
+};
+
+struct mlx5_ifc_qp_events_bits {
+ u8 reserved_0[0xa0];
+
+ u8 type[0x8];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 cq_number[0x18];
+};
+
+struct mlx5_ifc_fw_version_bits {
+ u8 major[0x10];
+ u8 reserved_0[0x10];
+
+ u8 minor[0x10];
+ u8 subminor[0x10];
+
+ u8 second[0x8];
+ u8 minute[0x8];
+ u8 hour[0x8];
+ u8 reserved_1[0x8];
+
+ u8 year[0x10];
+ u8 month[0x8];
+ u8 day[0x8];
+};
+
+enum {
+ MLX5_QPC_STATE_RST = 0x0,
+ MLX5_QPC_STATE_INIT = 0x1,
+ MLX5_QPC_STATE_RTR = 0x2,
+ MLX5_QPC_STATE_RTS = 0x3,
+ MLX5_QPC_STATE_SQER = 0x4,
+ MLX5_QPC_STATE_SQD = 0x5,
+ MLX5_QPC_STATE_ERR = 0x6,
+ MLX5_QPC_STATE_SUSPENDED = 0x9,
+};
+
+enum {
+ MLX5_QPC_ST_RC = 0x0,
+ MLX5_QPC_ST_UC = 0x1,
+ MLX5_QPC_ST_UD = 0x2,
+ MLX5_QPC_ST_XRC = 0x3,
+ MLX5_QPC_ST_DCI = 0x5,
+ MLX5_QPC_ST_QP0 = 0x7,
+ MLX5_QPC_ST_QP1 = 0x8,
+ MLX5_QPC_ST_RAW_DATAGRAM = 0x9,
+ MLX5_QPC_ST_REG_UMR = 0xc,
+};
+
+enum {
+ MLX5_QP_PM_ARMED = 0x0,
+ MLX5_QP_PM_REARM = 0x1,
+ MLX5_QPC_PM_STATE_RESERVED = 0x2,
+ MLX5_QP_PM_MIGRATED = 0x3,
+};
+
+enum {
+ MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS = 0x0,
+ MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT = 0x1,
+};
+
+enum {
+ MLX5_QPC_MTU_256_BYTES = 0x1,
+ MLX5_QPC_MTU_512_BYTES = 0x2,
+ MLX5_QPC_MTU_1K_BYTES = 0x3,
+ MLX5_QPC_MTU_2K_BYTES = 0x4,
+ MLX5_QPC_MTU_4K_BYTES = 0x5,
+ MLX5_QPC_MTU_RAW_ETHERNET_QP = 0x7,
+};
+
+enum {
+ MLX5_QPC_ATOMIC_MODE_IB_SPEC = 0x1,
+ MLX5_QPC_ATOMIC_MODE_ONLY_8B = 0x2,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_8B = 0x3,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_16B = 0x4,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_32B = 0x5,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_64B = 0x6,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_128B = 0x7,
+ MLX5_QPC_ATOMIC_MODE_UP_TO_256B = 0x8,
+};
+
+enum {
+ MLX5_QPC_CS_REQ_DISABLE = 0x0,
+ MLX5_QPC_CS_REQ_UP_TO_32B = 0x11,
+ MLX5_QPC_CS_REQ_UP_TO_64B = 0x22,
+};
+
+enum {
+ MLX5_QPC_CS_RES_DISABLE = 0x0,
+ MLX5_QPC_CS_RES_UP_TO_32B = 0x1,
+ MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+ u8 state[0x4];
+ u8 reserved_0[0x4];
+ u8 st[0x8];
+ u8 reserved_1[0x3];
+ u8 pm_state[0x2];
+ u8 reserved_2[0x7];
+ u8 end_padding_mode[0x2];
+ u8 reserved_3[0x2];
+
+ u8 wq_signature[0x1];
+ u8 block_lb_mc[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 reserved_4[0x1];
+ u8 drain_sigerr[0x1];
+ u8 reserved_5[0x2];
+ u8 pd[0x18];
+
+ u8 mtu[0x3];
+ u8 log_msg_max[0x5];
+ u8 reserved_6[0x1];
+ u8 log_rq_size[0x4];
+ u8 log_rq_stride[0x3];
+ u8 no_sq[0x1];
+ u8 log_sq_size[0x4];
+ u8 reserved_7[0x6];
+ u8 rlky[0x1];
+ u8 ulp_stateless_offload_mode[0x4];
+
+ u8 counter_set_id[0x8];
+ u8 uar_page[0x18];
+
+ u8 reserved_8[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 remote_qpn[0x18];
+
+ struct mlx5_ifc_ads_bits primary_address_path;
+
+ struct mlx5_ifc_ads_bits secondary_address_path;
+
+ u8 log_ack_req_freq[0x4];
+ u8 reserved_10[0x4];
+ u8 log_sra_max[0x3];
+ u8 reserved_11[0x2];
+ u8 retry_count[0x3];
+ u8 rnr_retry[0x3];
+ u8 reserved_12[0x1];
+ u8 fre[0x1];
+ u8 cur_rnr_retry[0x3];
+ u8 cur_retry_count[0x3];
+ u8 reserved_13[0x5];
+
+ u8 reserved_14[0x20];
+
+ u8 reserved_15[0x8];
+ u8 next_send_psn[0x18];
+
+ u8 reserved_16[0x8];
+ u8 cqn_snd[0x18];
+
+ u8 reserved_17[0x40];
+
+ u8 reserved_18[0x8];
+ u8 last_acked_psn[0x18];
+
+ u8 reserved_19[0x8];
+ u8 ssn[0x18];
+
+ u8 reserved_20[0x8];
+ u8 log_rra_max[0x3];
+ u8 reserved_21[0x1];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 reserved_22[0x1];
+ u8 page_offset[0x6];
+ u8 reserved_23[0x3];
+ u8 cd_slave_receive[0x1];
+ u8 cd_slave_send[0x1];
+ u8 cd_master[0x1];
+
+ u8 reserved_24[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 next_rcv_psn[0x18];
+
+ u8 reserved_25[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_26[0x8];
+ u8 cqn_rcv[0x18];
+
+ u8 dbr_addr[0x40];
+
+ u8 q_key[0x20];
+
+ u8 reserved_27[0x5];
+ u8 rq_type[0x3];
+ u8 srqn_rmpn[0x18];
+
+ u8 reserved_28[0x8];
+ u8 rmsn[0x18];
+
+ u8 hw_sq_wqebb_counter[0x10];
+ u8 sw_sq_wqebb_counter[0x10];
+
+ u8 hw_rq_counter[0x20];
+
+ u8 sw_rq_counter[0x20];
+
+ u8 reserved_29[0x20];
+
+ u8 reserved_30[0xf];
+ u8 cgs[0x1];
+ u8 cs_req[0x8];
+ u8 cs_res[0x8];
+
+ u8 dc_access_key[0x40];
+
+ u8 rdma_active[0x1];
+ u8 comm_est[0x1];
+ u8 suspended[0x1];
+ u8 reserved_31[0x5];
+ u8 send_msg_psn[0x18];
+
+ u8 reserved_32[0x8];
+ u8 rcv_msg_psn[0x18];
+
+ u8 rdma_va[0x40];
+
+ u8 rdma_key[0x20];
+
+ u8 reserved_33[0x20];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+ u8 source_l3_address[16][0x8];
+
+ u8 reserved_0[0x3];
+ u8 vlan_valid[0x1];
+ u8 vlan_id[0xc];
+ u8 source_mac_47_32[0x10];
+
+ u8 source_mac_31_0[0x20];
+
+ u8 reserved_1[0x14];
+ u8 roce_l3_type[0x4];
+ u8 roce_version[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_rdbc_bits {
+ u8 reserved_0[0x1c];
+ u8 type[0x4];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x8];
+ u8 psn[0x18];
+
+ u8 rkey[0x20];
+
+ u8 address[0x40];
+
+ u8 byte_count[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 atomic_resp[32][0x8];
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+ MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+};
+
+struct mlx5_ifc_flow_context_bits {
+ u8 reserved_0[0x20];
+
+ u8 group_id[0x20];
+
+ u8 reserved_1[0x8];
+ u8 flow_tag[0x18];
+
+ u8 reserved_2[0x10];
+ u8 action[0x10];
+
+ u8 reserved_3[0x8];
+ u8 destination_list_size[0x18];
+
+ u8 reserved_4[0x8];
+ u8 flow_counter_list_size[0x18];
+
+ u8 reserved_5[0x140];
+
+ struct mlx5_ifc_fte_match_param_bits match_value;
+
+ u8 reserved_6[0x600];
+
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+};
+
+enum {
+ MLX5_XRC_SRQC_STATE_GOOD = 0x0,
+ MLX5_XRC_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+ u8 state[0x4];
+ u8 log_xrc_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_2[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x2];
+ u8 log_page_size[0x6];
+ u8 user_index[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_7[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_8[0x2];
+
+ u8 reserved_9[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+ u8 packets[0x40];
+
+ u8 octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+ u8 reserved_0[0xc];
+ u8 prio[0x4];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x100];
+
+ u8 reserved_3[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_4[0x8];
+ u8 underlay_qpn[0x18];
+
+ u8 reserved_5[0x3a0];
+};
+
+enum {
+ MLX5_TIRC_DISP_TYPE_DIRECT = 0x0,
+ MLX5_TIRC_DISP_TYPE_INDIRECT = 0x1,
+};
+
+enum {
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2,
+};
+
+enum {
+ MLX5_TIRC_RX_HASH_FN_HASH_NONE = 0x0,
+ MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8 = 0x1,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ = 0x2,
+};
+
+enum {
+ MLX5_TIRC_SELF_LB_EN_ENABLE_UNICAST = 0x1,
+ MLX5_TIRC_SELF_LB_EN_ENABLE_MULTICAST = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+ u8 reserved_0[0x20];
+
+ u8 disp_type[0x4];
+ u8 reserved_1[0x1c];
+
+ u8 reserved_2[0x40];
+
+ u8 reserved_3[0x4];
+ u8 lro_timeout_period_usecs[0x10];
+ u8 lro_enable_mask[0x4];
+ u8 lro_max_msg_sz[0x8];
+
+ u8 reserved_4[0x40];
+
+ u8 reserved_5[0x8];
+ u8 inline_rqn[0x18];
+
+ u8 rx_hash_symmetric[0x1];
+ u8 reserved_6[0x1];
+ u8 tunneled_offload_en[0x1];
+ u8 reserved_7[0x5];
+ u8 indirect_table[0x18];
+
+ u8 rx_hash_fn[0x4];
+ u8 reserved_8[0x2];
+ u8 self_lb_en[0x2];
+ u8 transport_domain[0x18];
+
+ u8 rx_hash_toeplitz_key[10][0x20];
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+ struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+ u8 reserved_9[0x4c0];
+};
+
+enum {
+ MLX5_SRQC_STATE_GOOD = 0x0,
+ MLX5_SRQC_STATE_ERROR = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+ u8 state[0x4];
+ u8 log_srq_size[0x4];
+ u8 reserved_0[0x18];
+
+ u8 wq_signature[0x1];
+ u8 cont_srq[0x1];
+ u8 reserved_1[0x1];
+ u8 rlky[0x1];
+ u8 reserved_2[0x1];
+ u8 log_rq_stride[0x3];
+ u8 xrcd[0x18];
+
+ u8 page_offset[0x6];
+ u8 reserved_3[0x2];
+ u8 cqn[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x2];
+ u8 log_page_size[0x6];
+ u8 reserved_6[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x8];
+ u8 pd[0x18];
+
+ u8 lwm[0x10];
+ u8 wqe_cnt[0x10];
+
+ u8 reserved_9[0x40];
+
+ u8 db_record_addr_h[0x20];
+
+ u8 db_record_addr_l[0x1e];
+ u8 reserved_10[0x2];
+
+ u8 reserved_11[0x80];
+};
+
+enum {
+ MLX5_SQC_STATE_RST = 0x0,
+ MLX5_SQC_STATE_RDY = 0x1,
+ MLX5_SQC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+ u8 rlkey[0x1];
+ u8 cd_master[0x1];
+ u8 fre[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 allow_multi_pkt_send_wqe[0x1];
+ u8 min_wqe_inline_mode[0x3];
+ u8 state[0x4];
+ u8 reg_umr[0x1];
+ u8 allow_swp[0x1];
+ u8 reserved_0[0x12];
+
+ u8 reserved_1[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x80];
+
+ u8 qos_para_vport_number[0x10];
+ u8 packet_pacing_rate_limit_index[0x10];
+
+ u8 tis_lst_sz[0x10];
+ u8 reserved_4[0x10];
+
+ u8 reserved_5[0x40];
+
+ u8 reserved_6[0x8];
+ u8 tis_num_0[0x18];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_TSAR_TYPE_DWRR = 0,
+ MLX5_TSAR_TYPE_ROUND_ROUBIN = 1,
+ MLX5_TSAR_TYPE_ETS = 2
+};
+
+struct mlx5_ifc_tsar_element_attributes_bits {
+ u8 reserved_0[0x8];
+ u8 tsar_type[0x8];
+ u8 reserved_1[0x10];
+};
+
+struct mlx5_ifc_vport_element_attributes_bits {
+ u8 reserved_0[0x10];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_tc_element_attributes_bits {
+ u8 traffic_class[0x10];
+ u8 vport_number[0x10];
+};
+
+struct mlx5_ifc_para_vport_tc_element_attributes_bits {
+ u8 reserved_0[0x0C];
+ u8 traffic_class[0x04];
+ u8 qos_para_vport_number[0x10];
+};
+
+enum {
+ MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
+ MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
+ MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
+ MLX5_SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+};
+
+struct mlx5_ifc_scheduling_context_bits {
+ u8 element_type[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 element_attributes[0x20];
+
+ u8 parent_element_id[0x20];
+
+ u8 reserved_at_60[0x40];
+
+ u8 bw_share[0x20];
+
+ u8 max_average_bw[0x20];
+
+ u8 reserved_at_e0[0x120];
+};
+
+struct mlx5_ifc_rqtc_bits {
+ u8 reserved_0[0xa0];
+
+ u8 reserved_1[0x10];
+ u8 rqt_max_size[0x10];
+
+ u8 reserved_2[0x10];
+ u8 rqt_actual_size[0x10];
+
+ u8 reserved_3[0x6a0];
+
+ struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+ MLX5_RQC_RQ_TYPE_MEMORY_RQ_RMP = 0x1,
+};
+
+enum {
+ MLX5_RQC_STATE_RST = 0x0,
+ MLX5_RQC_STATE_RDY = 0x1,
+ MLX5_RQC_STATE_ERR = 0x3,
+};
+
+enum {
+ MLX5_RQC_DROPLESS_MODE_DISABLE = 0x0,
+ MLX5_RQC_DROPLESS_MODE_ENABLE = 0x1,
+};
+
+struct mlx5_ifc_rqc_bits {
+ u8 rlkey[0x1];
+ u8 delay_drop_en[0x1];
+ u8 scatter_fcs[0x1];
+ u8 vlan_strip_disable[0x1];
+ u8 mem_rq_type[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x1];
+ u8 flush_in_error_en[0x1];
+ u8 reserved_2[0x12];
+
+ u8 reserved_3[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 reserved_5[0x18];
+
+ u8 reserved_6[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_7[0xe0];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_RMPC_STATE_RDY = 0x1,
+ MLX5_RMPC_STATE_ERR = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+ u8 reserved_0[0x8];
+ u8 state[0x4];
+ u8 reserved_1[0x14];
+
+ u8 basic_cyclic_rcv_wqe[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x140];
+
+ struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_UC_MAC_ADDRESS = 0x0,
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_CURRENT_MC_MAC_ADDRESS = 0x1,
+ MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST = 0x2,
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_0[0x5];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_1[0x15];
+ u8 disable_mc_local_lb[0x1];
+ u8 disable_uc_local_lb[0x1];
+ u8 roce_en[0x1];
+
+ u8 arm_change_event[0x1];
+ u8 reserved_2[0x1a];
+ u8 event_on_mtu[0x1];
+ u8 event_on_promisc_change[0x1];
+ u8 event_on_vlan_change[0x1];
+ u8 event_on_mc_address_change[0x1];
+ u8 event_on_uc_address_change[0x1];
+
+ u8 reserved_3[0xe0];
+
+ u8 reserved_4[0x10];
+ u8 mtu[0x10];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 reserved_5[0x140];
+
+ u8 qkey_violation_counter[0x10];
+ u8 reserved_6[0x10];
+
+ u8 reserved_7[0x420];
+
+ u8 promisc_uc[0x1];
+ u8 promisc_mc[0x1];
+ u8 promisc_all[0x1];
+ u8 reserved_8[0x2];
+ u8 allowed_list_type[0x3];
+ u8 reserved_9[0xc];
+ u8 allowed_list_size[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+ u8 reserved_10[0x20];
+
+ u8 current_uc_mac_address[0][0x40];
+};
+
+enum {
+ MLX5_ACCESS_MODE_PA = 0x0,
+ MLX5_ACCESS_MODE_MTT = 0x1,
+ MLX5_ACCESS_MODE_KLM = 0x2,
+};
+
+struct mlx5_ifc_mkc_bits {
+ u8 reserved_0[0x1];
+ u8 free[0x1];
+ u8 reserved_1[0xd];
+ u8 small_fence_on_rdma_read_response[0x1];
+ u8 umr_en[0x1];
+ u8 a[0x1];
+ u8 rw[0x1];
+ u8 rr[0x1];
+ u8 lw[0x1];
+ u8 lr[0x1];
+ u8 access_mode[0x2];
+ u8 reserved_2[0x8];
+
+ u8 qpn[0x18];
+ u8 mkey_7_0[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 length64[0x1];
+ u8 bsf_en[0x1];
+ u8 sync_umr[0x1];
+ u8 reserved_4[0x2];
+ u8 expected_sigerr_count[0x1];
+ u8 reserved_5[0x1];
+ u8 en_rinval[0x1];
+ u8 pd[0x18];
+
+ u8 start_addr[0x40];
+
+ u8 len[0x40];
+
+ u8 bsf_octword_size[0x20];
+
+ u8 reserved_6[0x80];
+
+ u8 translations_octword_size[0x20];
+
+ u8 reserved_7[0x1b];
+ u8 log_page_size[0x5];
+
+ u8 reserved_8[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+ u8 reserved_0[0x10];
+ u8 pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+ u8 array128_auto[16][0x8];
+};
+
+enum {
+ MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_PORT_GUID = 0x0,
+ MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_NODE_GUID = 0x1,
+ MLX5_HCA_VPORT_CONTEXT_FIELD_SELECT_VPORT_STATE_POLICY = 0x2,
+};
+
+enum {
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_SLEEP = 0x1,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_POLLING = 0x2,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_DISABLED = 0x3,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PORTCONFIGURATIONTRAINING = 0x4,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKUP = 0x5,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_LINKERRORRECOVERY = 0x6,
+ MLX5_HCA_VPORT_CONTEXT_PORT_PHYSICAL_STATE_PHYTEST = 0x7,
+};
+
+enum {
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_DOWN = 0x0,
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_UP = 0x1,
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_POLICY_FOLLOW = 0x2,
+};
+
+enum {
+ MLX5_HCA_VPORT_CONTEXT_PORT_STATE_DOWN = 0x1,
+ MLX5_HCA_VPORT_CONTEXT_PORT_STATE_INIT = 0x2,
+ MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ARM = 0x3,
+ MLX5_HCA_VPORT_CONTEXT_PORT_STATE_ACTIVE = 0x4,
+};
+
+enum {
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_DOWN = 0x1,
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_INIT = 0x2,
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ARM = 0x3,
+ MLX5_HCA_VPORT_CONTEXT_VPORT_STATE_ACTIVE = 0x4,
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+ u8 field_select[0x20];
+
+ u8 reserved_0[0xe0];
+
+ u8 sm_virt_aware[0x1];
+ u8 has_smi[0x1];
+ u8 has_raw[0x1];
+ u8 grh_required[0x1];
+ u8 reserved_1[0x1];
+ u8 min_wqe_inline_mode[0x3];
+ u8 reserved_2[0x8];
+ u8 port_physical_state[0x4];
+ u8 vport_state_policy[0x4];
+ u8 port_state[0x4];
+ u8 vport_state[0x4];
+
+ u8 reserved_3[0x20];
+
+ u8 system_image_guid[0x40];
+
+ u8 port_guid[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 cap_mask1[0x20];
+
+ u8 cap_mask1_field_select[0x20];
+
+ u8 cap_mask2[0x20];
+
+ u8 cap_mask2_field_select[0x20];
+
+ u8 reserved_4[0x80];
+
+ u8 lid[0x10];
+ u8 reserved_5[0x4];
+ u8 init_type_reply[0x4];
+ u8 lmc[0x3];
+ u8 subnet_timeout[0x5];
+
+ u8 sm_lid[0x10];
+ u8 sm_sl[0x4];
+ u8 reserved_6[0xc];
+
+ u8 qkey_violation_counter[0x10];
+ u8 pkey_violation_counter[0x10];
+
+ u8 reserved_7[0xca0];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+ struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+ struct mlx5_ifc_odp_cap_bits odp_cap;
+ struct mlx5_ifc_atomic_caps_bits atomic_caps;
+ struct mlx5_ifc_roce_cap_bits roce_cap;
+ struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+ struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+ struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+ struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+ struct mlx5_ifc_snapshot_cap_bits snapshot_cap;
+ struct mlx5_ifc_debug_cap_bits diagnostic_counters_cap;
+ struct mlx5_ifc_qos_cap_bits qos_cap;
+ u8 reserved_0[0x8000];
+};
+
+struct mlx5_ifc_esw_vport_context_bits {
+ u8 reserved_0[0x3];
+ u8 vport_svlan_strip[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_insert[0x2];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x20];
+
+ u8 svlan_cfi[0x1];
+ u8 svlan_pcp[0x3];
+ u8 svlan_id[0xc];
+ u8 cvlan_cfi[0x1];
+ u8 cvlan_pcp[0x3];
+ u8 cvlan_id[0xc];
+
+ u8 reserved_3[0x7a0];
+};
+
+enum {
+ MLX5_EQC_STATUS_OK = 0x0,
+ MLX5_EQC_STATUS_EQ_WRITE_FAILURE = 0xa,
+};
+
+enum {
+ MLX5_EQ_STATE_ARMED = 0x9,
+ MLX5_EQ_STATE_FIRED = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x9];
+ u8 ec[0x1];
+ u8 oi[0x1];
+ u8 reserved_1[0x5];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_eq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x18];
+ u8 intr[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x60];
+
+ u8 reserved_12[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_13[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_14[0x80];
+};
+
+enum {
+ MLX5_DCTC_STATE_ACTIVE = 0x0,
+ MLX5_DCTC_STATE_DRAINING = 0x1,
+ MLX5_DCTC_STATE_DRAINED = 0x2,
+};
+
+enum {
+ MLX5_DCTC_CS_RES_DISABLE = 0x0,
+ MLX5_DCTC_CS_RES_NA = 0x1,
+ MLX5_DCTC_CS_RES_UP_TO_64B = 0x2,
+};
+
+enum {
+ MLX5_DCTC_MTU_256_BYTES = 0x1,
+ MLX5_DCTC_MTU_512_BYTES = 0x2,
+ MLX5_DCTC_MTU_1K_BYTES = 0x3,
+ MLX5_DCTC_MTU_2K_BYTES = 0x4,
+ MLX5_DCTC_MTU_4K_BYTES = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+ u8 reserved_0[0x4];
+ u8 state[0x4];
+ u8 reserved_1[0x18];
+
+ u8 reserved_2[0x8];
+ u8 user_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 cqn[0x18];
+
+ u8 counter_set_id[0x8];
+ u8 atomic_mode[0x4];
+ u8 rre[0x1];
+ u8 rwe[0x1];
+ u8 rae[0x1];
+ u8 atomic_like_write_en[0x1];
+ u8 latency_sensitive[0x1];
+ u8 rlky[0x1];
+ u8 reserved_4[0xe];
+
+ u8 reserved_5[0x8];
+ u8 cs_res[0x8];
+ u8 reserved_6[0x3];
+ u8 min_rnr_nak[0x5];
+ u8 reserved_7[0x8];
+
+ u8 reserved_8[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_9[0x8];
+ u8 pd[0x18];
+
+ u8 tclass[0x8];
+ u8 reserved_10[0x4];
+ u8 flow_label[0x14];
+
+ u8 dc_access_key[0x40];
+
+ u8 reserved_11[0x5];
+ u8 mtu[0x3];
+ u8 port[0x8];
+ u8 pkey_index[0x10];
+
+ u8 reserved_12[0x8];
+ u8 my_addr_index[0x8];
+ u8 reserved_13[0x8];
+ u8 hop_limit[0x8];
+
+ u8 dc_access_key_violation_count[0x20];
+
+ u8 reserved_14[0x14];
+ u8 dei_cfi[0x1];
+ u8 eth_prio[0x3];
+ u8 ecn[0x2];
+ u8 dscp[0x6];
+
+ u8 reserved_15[0x40];
+};
+
+enum {
+ MLX5_CQC_STATUS_OK = 0x0,
+ MLX5_CQC_STATUS_CQ_OVERFLOW = 0x9,
+ MLX5_CQC_STATUS_CQ_WRITE_FAIL = 0xa,
+};
+
+enum {
+ CQE_SIZE_64 = 0x0,
+ CQE_SIZE_128 = 0x1,
+};
+
+enum {
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+};
+
+enum {
+ MLX5_CQ_STATE_SOLICITED_ARMED = 0x6,
+ MLX5_CQ_STATE_ARMED = 0x9,
+ MLX5_CQ_STATE_FIRED = 0xa,
+};
+
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 reserved_0[0x4];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_1[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 cq_period_mode[0x2];
+ u8 cqe_compression_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 reserved_4[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_5[0x6];
+
+ u8 reserved_6[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+
+ u8 reserved_7[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+
+ u8 reserved_8[0x18];
+ u8 c_eqn[0x8];
+
+ u8 reserved_9[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_10[0x18];
+
+ u8 reserved_11[0x20];
+
+ u8 reserved_12[0x8];
+ u8 last_notified_index[0x18];
+
+ u8 reserved_13[0x8];
+ u8 last_solicit_index[0x18];
+
+ u8 reserved_14[0x8];
+ u8 consumer_counter[0x18];
+
+ u8 reserved_15[0x8];
+ u8 producer_counter[0x18];
+
+ u8 reserved_16[0x40];
+
+ u8 dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+ struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+ struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+ u8 reserved_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+ u8 reserved_0[0xc0];
+
+ u8 reserved_1[0x8];
+ u8 ieee_vendor_id[0x18];
+
+ u8 reserved_2[0x10];
+ u8 vsd_vendor_id[0x10];
+
+ u8 vsd[208][0x8];
+
+ u8 vsd_contd_psid[16][0x8];
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+ struct mlx5_ifc_modify_field_select_bits modify_field_select;
+ struct mlx5_ifc_resize_field_select_bits resize_field_select;
+ u8 reserved_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+ struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+ struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+ struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+ u8 reserved_0[0x20];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+ u8 reserved_0[0x6];
+ u8 lossy[0x1];
+ u8 epsb[0x1];
+ u8 reserved_1[0xc];
+ u8 size[0xc];
+
+ u8 xoff_threshold[0x10];
+ u8 xon_threshold[0x10];
+};
+
+struct mlx5_ifc_config_item_bits {
+ u8 valid[0x2];
+ u8 reserved_0[0x2];
+ u8 header_type[0x2];
+ u8 reserved_1[0x2];
+ u8 default_location[0x1];
+ u8 reserved_2[0x7];
+ u8 version[0x4];
+ u8 reserved_3[0x3];
+ u8 length[0x9];
+
+ u8 type[0x20];
+
+ u8 reserved_4[0x10];
+ u8 crc16[0x10];
+};
+
+struct mlx5_ifc_nodnic_port_config_reg_bits {
+ struct mlx5_ifc_nodnic_event_word_bits event;
+
+ u8 network_en[0x1];
+ u8 dma_en[0x1];
+ u8 promisc_en[0x1];
+ u8 promisc_multicast_en[0x1];
+ u8 reserved_0[0x17];
+ u8 receive_filter_en[0x5];
+
+ u8 reserved_1[0x10];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 receive_filters_mgid_mac[64][0x8];
+
+ u8 gid[16][0x8];
+
+ u8 reserved_2[0x10];
+ u8 lid[0x10];
+
+ u8 reserved_3[0xc];
+ u8 sm_sl[0x4];
+ u8 sm_lid[0x10];
+
+ u8 completion_address_63_32[0x20];
+
+ u8 completion_address_31_12[0x14];
+ u8 reserved_4[0x6];
+ u8 log_cq_size[0x6];
+
+ u8 working_buffer_address_63_32[0x20];
+
+ u8 working_buffer_address_31_12[0x14];
+ u8 reserved_5[0xc];
+
+ struct mlx5_ifc_nodnic_cq_arming_word_bits arm_cq;
+
+ u8 pkey_index[0x10];
+ u8 pkey[0x10];
+
+ struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring0;
+
+ struct mlx5_ifc_nodnic_ring_config_reg_bits send_ring1;
+
+ struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring0;
+
+ struct mlx5_ifc_nodnic_ring_config_reg_bits receive_ring1;
+
+ u8 reserved_6[0x400];
+};
+
+union mlx5_ifc_event_auto_bits {
+ struct mlx5_ifc_comp_event_bits comp_event;
+ struct mlx5_ifc_dct_events_bits dct_events;
+ struct mlx5_ifc_qp_events_bits qp_events;
+ struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+ struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+ struct mlx5_ifc_cq_error_bits cq_error;
+ struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+ struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+ struct mlx5_ifc_gpio_event_bits gpio_event;
+ struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+ struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+ struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+ struct mlx5_ifc_pages_req_event_bits pages_req_event;
+ struct mlx5_ifc_nic_vport_change_event_bits nic_vport_change_event;
+ u8 reserved_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+ u8 reserved_0[0x100];
+
+ u8 assert_existptr[0x20];
+
+ u8 assert_callra[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 fw_version[0x20];
+
+ u8 hw_id[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 irisc_index[0x8];
+ u8 synd[0x8];
+ u8 ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+ u8 no_lb[0x1];
+ u8 reserved_0[0x7];
+ u8 port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_lrh_bits {
+ u8 vl[4];
+ u8 lver[4];
+ u8 sl[4];
+ u8 reserved2[2];
+ u8 lnh[2];
+ u8 dlid[16];
+ u8 reserved5[5];
+ u8 pkt_len[11];
+ u8 slid[16];
+};
+
+struct mlx5_ifc_icmd_set_wol_rol_out_bits {
+ u8 reserved_0[0x40];
+
+ u8 reserved_1[0x10];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+};
+
+struct mlx5_ifc_icmd_set_wol_rol_in_bits {
+ u8 reserved_0[0x40];
+
+ u8 rol_mode_valid[0x1];
+ u8 wol_mode_valid[0x1];
+ u8 reserved_1[0xe];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_2[0x7a0];
+};
+
+struct mlx5_ifc_icmd_set_virtual_mac_in_bits {
+ u8 virtual_mac_en[0x1];
+ u8 mac_aux_v[0x1];
+ u8 reserved_0[0x1e];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mac_address_layout_bits virtual_mac;
+
+ u8 reserved_2[0x760];
+};
+
+struct mlx5_ifc_icmd_query_virtual_mac_out_bits {
+ u8 virtual_mac_en[0x1];
+ u8 mac_aux_v[0x1];
+ u8 reserved_0[0x1e];
+
+ struct mlx5_ifc_mac_address_layout_bits permanent_mac;
+
+ struct mlx5_ifc_mac_address_layout_bits virtual_mac;
+
+ u8 reserved_1[0x760];
+};
+
+struct mlx5_ifc_icmd_query_fw_info_out_bits {
+ struct mlx5_ifc_fw_version_bits fw_version;
+
+ u8 reserved_0[0x10];
+ u8 hash_signature[0x10];
+
+ u8 psid[16][0x8];
+
+ u8 reserved_1[0x6e0];
+};
+
+struct mlx5_ifc_icmd_query_cap_in_bits {
+ u8 reserved_0[0x10];
+ u8 capability_group[0x10];
+};
+
+struct mlx5_ifc_icmd_query_cap_general_bits {
+ u8 nv_access[0x1];
+ u8 fw_info_psid[0x1];
+ u8 reserved_0[0x1e];
+
+ u8 reserved_1[0x16];
+ u8 rol_s[0x1];
+ u8 rol_g[0x1];
+ u8 reserved_2[0x1];
+ u8 wol_s[0x1];
+ u8 wol_g[0x1];
+ u8 wol_a[0x1];
+ u8 wol_b[0x1];
+ u8 wol_m[0x1];
+ u8 wol_u[0x1];
+ u8 wol_p[0x1];
+};
+
+struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 reserved_1[0x7e0];
+};
+
+struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 reserved_1[0x7e0];
+};
+
+struct mlx5_ifc_icmd_ocbb_init_in_bits {
+ u8 address_hi[0x20];
+
+ u8 address_lo[0x20];
+
+ u8 reserved_0[0x7c0];
+};
+
+struct mlx5_ifc_icmd_init_ocsd_in_bits {
+ u8 reserved_0[0x20];
+
+ u8 address_hi[0x20];
+
+ u8 address_lo[0x20];
+
+ u8 reserved_1[0x7a0];
+};
+
+struct mlx5_ifc_icmd_access_reg_out_bits {
+ u8 reserved_0[0x11];
+ u8 status[0x7];
+ u8 reserved_1[0x8];
+
+ u8 register_id[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x40];
+
+ u8 reserved_4[0x5];
+ u8 len[0xb];
+ u8 reserved_5[0x10];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ICMD_ACCESS_REG_IN_METHOD_QUERY = 0x1,
+ MLX5_ICMD_ACCESS_REG_IN_METHOD_WRITE = 0x2,
+};
+
+struct mlx5_ifc_icmd_access_reg_in_bits {
+ u8 constant_1[0x5];
+ u8 constant_2[0xb];
+ u8 reserved_0[0x10];
+
+ u8 register_id[0x10];
+ u8 reserved_1[0x1];
+ u8 method[0x7];
+ u8 constant_3[0x8];
+
+ u8 reserved_2[0x40];
+
+ u8 constant_4[0x5];
+ u8 len[0xb];
+ u8 reserved_3[0x10];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE = 0x0,
+ MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 profile[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_query_delay_drop_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 delay_drop_timeout[0x10];
+};
+
+struct mlx5_ifc_query_delay_drop_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_suspend_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_suspend_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_set_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rol_mode_valid[0x1];
+ u8 wol_mode_valid[0x1];
+ u8 reserved_2[0xe];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_rdb_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_rdb_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x18];
+ u8 rdb_list_size[0x8];
+
+ struct mlx5_ifc_rdbc_bits rdb_context[0];
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL = 0x0,
+ MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x6];
+ u8 demux_mode[0x2];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x20];
+
+ u8 reserved_5[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_6[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+enum {
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2,
+ MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3
+};
+
+struct mlx5_ifc_set_flow_table_root_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_flow_table_root_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x8];
+ u8 underlay_qpn[0x18];
+
+ u8 reserved_7[0x120];
+};
+
+struct mlx5_ifc_set_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x18];
+ u8 modify_enable_mask[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_8[0xe0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_set_driver_version_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_driver_version_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ u8 driver_version[64][0x8];
+};
+
+struct mlx5_ifc_set_dc_cnak_trace_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_dc_cnak_trace_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 enable[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x160];
+
+ struct mlx5_ifc_cmd_pas_bits pas;
+};
+
+struct mlx5_ifc_set_burst_size_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_burst_size_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x9];
+ u8 device_burst_size[0x17];
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_resume_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_resume_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_wol_rol_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 rol_mode[0x8];
+ u8 wol_mode[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_query_wol_rol_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN = 0x0,
+ MLX5_QUERY_VPORT_STATE_OUT_STATE_UP = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 admin_state[0x4];
+ u8 state[0x4];
+};
+
+enum {
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits received_errors;
+
+ struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+ struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+ struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+ u8 reserved_2[0xa00];
+};
+
+enum {
+ MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x60];
+
+ u8 clear[0x1];
+ u8 reserved_4[0x1f];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 resd_lkey[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0xc0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
+enum {
+ MLX5_SCHEDULING_ELEMENT_IN_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_query_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 roce_address_index[0x10];
+ u8 reserved_2[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xc0];
+
+ struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_rdb_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x18];
+ u8 rdb_list_size[0x8];
+
+ struct mlx5_ifc_rdbc_bits rdb_context[0];
+};
+
+struct mlx5_ifc_query_rdb_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_2[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_3[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 rx_write_requests[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 rx_read_requests[0x20];
+
+ u8 reserved_3[0x20];
+
+ u8 rx_atomic_requests[0x20];
+
+ u8 reserved_4[0x20];
+
+ u8 rx_dct_connect[0x20];
+
+ u8 reserved_5[0x20];
+
+ u8 out_of_buffer[0x20];
+
+ u8 reserved_7[0x20];
+
+ u8 out_of_sequence[0x20];
+
+ u8 reserved_8[0x20];
+
+ u8 duplicate_request[0x20];
+
+ u8 reserved_9[0x20];
+
+ u8 rnr_nak_retry_err[0x20];
+
+ u8 reserved_10[0x20];
+
+ u8 packet_seq_err[0x20];
+
+ u8 reserved_11[0x20];
+
+ u8 implied_nak_seq_err[0x20];
+
+ u8 reserved_12[0x20];
+
+ u8 local_ack_timeout_err[0x20];
+
+ u8 reserved_13[0x20];
+
+ u8 resp_rnr_nak[0x20];
+
+ u8 reserved_14[0x20];
+
+ u8 req_rnr_retries_exceeded[0x20];
+
+ u8 reserved_15[0x460];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x18];
+ u8 counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 function_id[0x10];
+
+ u8 num_pages[0x20];
+};
+
+enum {
+ MLX5_BOOT_PAGES = 0x1,
+ MLX5_INIT_PAGES = 0x2,
+ MLX5_POST_INIT_PAGES = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x5];
+ u8 allowed_list_type[0x3];
+ u8 reserved_4[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_2[0x600];
+
+ u8 bsf0_klm0_pas_mtt0_1[16][0x8];
+
+ u8 bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 reserved_2[0x13];
+ u8 vlan_valid[0x1];
+ u8 vlan[0xc];
+
+ struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+ u8 reserved_3[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 current_issi[0x10];
+
+ u8 reserved_2[0xa0];
+
+ u8 supported_issi_reserved[76][0x8];
+ u8 supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 pkey_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 gids_num[0x10];
+ u8 reserved_2[0x10];
+
+ struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x10];
+ u8 gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x80];
+
+ u8 reserved_2[0x8];
+ u8 level[0x8];
+ u8 reserved_3[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_4[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x1c0];
+
+ struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_7[0xe0];
+};
+
+enum {
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0xa0];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_2[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_3[0xa0];
+
+ u8 reserved_4[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_5[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_6[0x120];
+};
+
+struct mlx5_ifc_query_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_traffic_counter_bits flow_statistics;
+
+ u8 reserved_2[0x700];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x80];
+
+ u8 clear[0x1];
+ u8 reserved_3[0x1f];
+
+ u8 reserved_4[0x10];
+ u8 flow_counter_id[0x10];
+};
+
+struct mlx5_ifc_query_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_query_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_2[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_3[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_dc_cnak_trace_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 enable[0x1];
+ u8 reserved_1[0x1f];
+
+ u8 reserved_2[0x160];
+
+ struct mlx5_ifc_cmd_pas_bits pas;
+};
+
+struct mlx5_ifc_query_dc_cnak_trace_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_2[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_2[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 cur_flows[0x20];
+
+ u8 sum_flows[0x20];
+
+ u8 cnp_ignored_high[0x20];
+
+ u8 cnp_ignored_low[0x20];
+
+ u8 cnp_handled_high[0x20];
+
+ u8 cnp_handled_low[0x20];
+
+ u8 reserved_2[0x100];
+
+ u8 time_stamp_high[0x20];
+
+ u8 time_stamp_low[0x20];
+
+ u8 accumulators_period[0x20];
+
+ u8 ecn_marked_roce_packets_high[0x20];
+
+ u8 ecn_marked_roce_packets_low[0x20];
+
+ u8 cnps_sent_high[0x20];
+
+ u8 cnps_sent_low[0x20];
+
+ u8 reserved_3[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 clear[0x1];
+ u8 reserved_2[0x1f];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_query_burst_size_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x9];
+ u8 device_burst_size[0x17];
+};
+
+struct mlx5_ifc_query_burst_size_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_para_vport_element_bits {
+ u8 reserved_at_0[0xc];
+ u8 traffic_class[0x4];
+ u8 qos_para_vport_number[0x10];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 error[0x1];
+ u8 reserved_2[0x4];
+ u8 rdma[0x1];
+ u8 read_write[0x1];
+ u8 req_res[0x1];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_nop_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_NIC_VPORT = 0x0,
+ MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_MODIFY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2,
+};
+
+enum {
+ MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_DOWN = 0x0,
+ MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_UP = 0x1,
+ MLX5_MODIFY_VPORT_STATE_IN_ADMIN_STATE_FOLLOW = 0x2,
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x18];
+ u8 admin_state[0x4];
+ u8 reserved_4[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum
+{
+ MLX5_MODIFY_SQ_BITMASK_PACKET_PACING_RATE_LIMIT_INDEX = 0x1 << 0,
+ MLX5_MODIFY_SQ_BITMASK_QOS_PARA_VPORT_NUMBER = 0x1 << 1
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 sq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+enum {
+ MLX5_MODIFY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+enum {
+ MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_BW_SHARE = 0x1,
+ MLX5_MODIFY_SCHEDULING_ELEMENT_BITMASK_MAX_AVERAGE_BW = 0x2,
+};
+
+struct mlx5_ifc_modify_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x20];
+
+ u8 modify_bitmask[0x20];
+
+ u8 reserved_at_c0[0x40];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 modify_bitmask[0x40];
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rq_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1e];
+ u8 vlan_strip_disable[0x1];
+ u8 reserved2[0x1];
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rq_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rq_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+ u8 reserved[0x20];
+
+ u8 reserved1[0x1f];
+ u8 lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 rmp_state[0x4];
+ u8 reserved_2[0x4];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+ u8 reserved_4[0x40];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+ u8 reserved_0[0x14];
+ u8 disable_uc_local_lb[0x1];
+ u8 disable_mc_local_lb[0x1];
+ u8 node_guid[0x1];
+ u8 port_guid[0x1];
+ u8 min_wqe_inline_mode[0x1];
+ u8 mtu[0x1];
+ u8 change_event[0x1];
+ u8 promisc[0x1];
+ u8 permanent_address[0x1];
+ u8 addresses_list[0x1];
+ u8 roce_en[0x1];
+ u8 reserved_1[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+ u8 reserved_3[0x780];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_grh_bits {
+ u8 ip_version[4];
+ u8 traffic_class[8];
+ u8 flow_label[20];
+ u8 payload_length[16];
+ u8 next_header[8];
+ u8 hop_limit[8];
+ u8 sgid[128];
+ u8 dgid[128];
+};
+
+struct mlx5_ifc_bth_bits {
+ u8 opcode[8];
+ u8 se[1];
+ u8 migreq[1];
+ u8 pad_count[2];
+ u8 tver[4];
+ u8 p_key[16];
+ u8 reserved8[8];
+ u8 dest_qp[24];
+ u8 ack_req[1];
+ u8 reserved7[7];
+ u8 psn[24];
+};
+
+struct mlx5_ifc_aeth_bits {
+ u8 syndrome[8];
+ u8 msn[24];
+};
+
+struct mlx5_ifc_dceth_bits {
+ u8 reserved0[8];
+ u8 session_id[24];
+ u8 reserved1[8];
+ u8 dci_dct[24];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xb];
+ u8 port_num[0x4];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_esw_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_esw_vport_context_fields_select_bits {
+ u8 reserved[0x1c];
+ u8 vport_cvlan_insert[0x1];
+ u8 vport_svlan_insert[0x1];
+ u8 vport_cvlan_strip[0x1];
+ u8 vport_svlan_strip[0x1];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
+
+ struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ = 0x0,
+ MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 priority[0x4];
+ u8 cong_protocol[0x4];
+
+ u8 enable[0x1];
+ u8 tag_enable[0x1];
+ u8 reserved_3[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 cong_protocol[0x4];
+
+ union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+ u8 reserved_3[0x80];
+
+ union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 output_num_entries[0x20];
+
+ u8 reserved_1[0x20];
+
+ u8 pas[0][0x40];
+};
+
+enum {
+ MLX5_PAGES_CANT_GIVE = 0x0,
+ MLX5_PAGES_GIVE = 0x1,
+ MLX5_PAGES_TAKE = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 input_num_entries[0x20];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 remote_lid[0x10];
+ u8 reserved_2[0x8];
+ u8 port[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_INIT_HCA_IN_OP_MOD_INIT = 0x0,
+ MLX5_INIT_HCA_IN_OP_MOD_PRE_INIT = 0x1,
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 packet_headers_log[128][0x8];
+
+ u8 packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+
+ u8 eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 function_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+enum {
+ MLX5_DESTROY_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_destroy_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_qos_para_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_destroy_qos_para_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 qos_para_vport_number[0x10];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 psvn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 group_id[0x20];
+
+ u8 reserved_6[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x8];
+ u8 table_index[0x18];
+
+ u8 reserved_4[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x40];
+
+ u8 flow_index[0x20];
+
+ u8 reserved_7[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_counter_id_bits {
+ u8 reserved[0x10];
+ u8 counter_id[0x10];
+};
+
+struct mlx5_ifc_diagnostic_params_context_bits {
+ u8 num_of_counters[0x10];
+ u8 reserved_2[0x8];
+ u8 log_num_of_samples[0x8];
+
+ u8 single[0x1];
+ u8 repetitive[0x1];
+ u8 sync[0x1];
+ u8 clear[0x1];
+ u8 on_demand[0x1];
+ u8 enable[0x1];
+ u8 reserved_3[0x12];
+ u8 log_sample_period[0x8];
+
+ u8 reserved_4[0x80];
+
+ struct mlx5_ifc_counter_id_bits counter_id[0];
+};
+
+struct mlx5_ifc_set_diagnostic_params_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ struct mlx5_ifc_diagnostic_params_context_bits diagnostic_params_ctx;
+};
+
+struct mlx5_ifc_set_diagnostic_params_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_query_diagnostic_counters_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_of_samples[0x10];
+ u8 sample_index[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_diagnostic_counter_bits {
+ u8 counter_id[0x10];
+ u8 sample_id[0x10];
+
+ u8 time_stamp_31_0[0x20];
+
+ u8 counter_value_h[0x20];
+
+ u8 counter_value_l[0x20];
+};
+
+struct mlx5_ifc_query_diagnostic_counters_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ struct mlx5_ifc_diagnostic_counter_bits diag_counter[0];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_deactivate_tracer_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_deactivate_tracer_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 mkey[0x20];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tisn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 tirn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 srqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_srqc_bits srq_context_entry;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 sqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_scheduling_element_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ u8 scheduling_element_id[0x20];
+
+ u8 reserved_at_a0[0x160];
+};
+
+enum {
+ MLX5_CREATE_SCHEDULING_ELEMENT_IN_SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_create_scheduling_element_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 scheduling_hierarchy[0x8];
+ u8 reserved_at_48[0x18];
+
+ u8 reserved_at_60[0xa0];
+
+ struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+ u8 reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqtn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 rmpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0xc0];
+
+ struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 input_qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 opt_param_mask[0x20];
+
+ u8 reserved_4[0x20];
+
+ struct mlx5_ifc_qpc_bits qpc;
+
+ u8 reserved_5[0x80];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_qos_para_vport_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x10];
+ u8 qos_para_vport_number[0x10];
+
+ u8 reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_create_qos_para_vport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 reserved_2[0x8];
+ u8 psv0_index[0x18];
+
+ u8 reserved_3[0x8];
+ u8 psv1_index[0x18];
+
+ u8 reserved_4[0x8];
+ u8 psv2_index[0x18];
+
+ u8 reserved_5[0x8];
+ u8 psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 num_psv[0x4];
+ u8 reserved_2[0x4];
+ u8 pd[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 mkey_index[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 pg_access[0x1];
+ u8 reserved_3[0x1f];
+
+ struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+ u8 reserved_4[0x80];
+
+ u8 translations_octword_actual_size[0x20];
+
+ u8 reserved_5[0x560];
+
+ u8 klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x8];
+ u8 level[0x8];
+ u8 reserved_7[0x8];
+ u8 log_size[0x8];
+
+ u8 reserved_8[0x120];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 group_id[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 other_vport[0x1];
+ u8 reserved_2[0xf];
+ u8 vport_number[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_4[0x18];
+
+ u8 reserved_5[0x8];
+ u8 table_id[0x18];
+
+ u8 reserved_6[0x20];
+
+ u8 start_flow_index[0x20];
+
+ u8 reserved_7[0x20];
+
+ u8 end_flow_index[0x20];
+
+ u8 reserved_8[0xa0];
+
+ u8 reserved_9[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+ u8 reserved_10[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 eq_number[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_eqc_bits eq_context_entry;
+
+ u8 reserved_3[0x40];
+
+ u8 event_bitmask[0x40];
+
+ u8 reserved_4[0x580];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_dctc_bits dct_context_entry;
+
+ u8 reserved_3[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 cqn[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+
+ struct mlx5_ifc_cqc_bits cq_context;
+
+ u8 reserved_3[0x600];
+
+ u8 pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+enum {
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE = 0x0,
+ MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x4];
+ u8 min_delay[0xc];
+ u8 int_vector[0x10];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 qpn[0x18];
+
+ u8 reserved_3[0x20];
+
+ u8 multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 xrc_srqn[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+enum {
+ MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 srq_number[0x18];
+
+ u8 reserved_3[0x10];
+ u8 lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x8];
+ u8 dctn[0x18];
+
+ u8 reserved_3[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 xrcd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 uar[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 transport_domain[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x18];
+ u8 counter_set_id[0x8];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x8];
+ u8 pd[0x18];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x10];
+ u8 flow_counter_id[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x10];
+ u8 vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_activate_tracer_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+};
+
+struct mlx5_ifc_activate_tracer_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 mkey[0x20];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_set_rate_limit_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_rate_limit_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 rate_limit_index[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ u8 rate_limit[0x20];
+ u8 burst_upper_bound[0x20];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_1[0x40];
+
+ u8 register_data[0][0x20];
+};
+
+enum {
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE = 0x0,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_2[0x10];
+ u8 register_id[0x10];
+
+ u8 argument[0x20];
+
+ u8 register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x20];
+
+ u8 reserved_3[0x7];
+ u8 polarity[0x1];
+ u8 ob_tap0[0x8];
+ u8 ob_tap1[0x8];
+ u8 ob_tap2[0x8];
+
+ u8 reserved_4[0xc];
+ u8 ob_preemp_mode[0x4];
+ u8 ob_reg[0x8];
+ u8 ob_bias[0x8];
+
+ u8 reserved_5[0x20];
+};
+
+struct mlx5_ifc_slrp_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 ib_sel[0x2];
+ u8 reserved_2[0x11];
+ u8 dp_sel[0x1];
+ u8 dp90sel[0x4];
+ u8 mix90phase[0x8];
+
+ u8 ffe_tap0[0x8];
+ u8 ffe_tap1[0x8];
+ u8 ffe_tap2[0x8];
+ u8 ffe_tap3[0x8];
+
+ u8 ffe_tap4[0x8];
+ u8 ffe_tap5[0x8];
+ u8 ffe_tap6[0x8];
+ u8 ffe_tap7[0x8];
+
+ u8 ffe_tap8[0x8];
+ u8 mixerbias_tap_amp[0x8];
+ u8 reserved_3[0x7];
+ u8 ffe_tap_en[0x9];
+
+ u8 ffe_tap_offset0[0x8];
+ u8 ffe_tap_offset1[0x8];
+ u8 slicer_offset0[0x10];
+
+ u8 mixer_offset0[0x10];
+ u8 mixer_offset1[0x10];
+
+ u8 mixerbgn_inp[0x8];
+ u8 mixerbgn_inn[0x8];
+ u8 mixerbgn_refp[0x8];
+ u8 mixerbgn_refn[0x8];
+
+ u8 sel_slicer_lctrl_h[0x1];
+ u8 sel_slicer_lctrl_l[0x1];
+ u8 reserved_4[0x1];
+ u8 ref_mixer_vreg[0x5];
+ u8 slicer_gctrl[0x8];
+ u8 lctrl_input[0x8];
+ u8 mixer_offset_cm1[0x8];
+
+ u8 common_mode[0x6];
+ u8 reserved_5[0x1];
+ u8 mixer_offset_cm0[0x9];
+ u8 reserved_6[0x7];
+ u8 slicer_offset_cm[0x9];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+ u8 status[0x4];
+ u8 version[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x2];
+ u8 lane[0x4];
+ u8 reserved_1[0x8];
+
+ u8 time_to_link_up[0x10];
+ u8 reserved_2[0xc];
+ u8 grade_lane_speed[0x4];
+
+ u8 grade_version[0x8];
+ u8 grade[0x18];
+
+ u8 reserved_3[0x4];
+ u8 height_grade_type[0x4];
+ u8 height_grade[0x18];
+
+ u8 height_dz[0x10];
+ u8 height_dv[0x10];
+
+ u8 reserved_4[0x10];
+ u8 height_sigma[0x10];
+
+ u8 reserved_5[0x20];
+
+ u8 reserved_6[0x4];
+ u8 phase_grade_type[0x4];
+ u8 phase_grade[0x18];
+
+ u8 reserved_7[0x8];
+ u8 phase_eo_pos[0x8];
+ u8 reserved_8[0x8];
+ u8 phase_eo_neg[0x8];
+
+ u8 ffe_set_tested[0x10];
+ u8 test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1c];
+ u8 vl_hw_cap[0x4];
+
+ u8 reserved_3[0x1c];
+ u8 vl_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 reserved_2[0x60];
+};
+
+enum {
+ MLX5_PTYS_REG_PROTO_MASK_INFINIBAND = 0x1,
+ MLX5_PTYS_REG_PROTO_MASK_ETHERNET = 0x4,
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+ u8 reserved_0[0x1];
+ u8 an_disable_admin[0x1];
+ u8 an_disable_cap[0x1];
+ u8 reserved_1[0x4];
+ u8 force_tx_aba_param[0x1];
+ u8 local_port[0x8];
+ u8 reserved_2[0xd];
+ u8 proto_mask[0x3];
+
+ u8 an_status[0x4];
+ u8 reserved_3[0xc];
+ u8 data_rate_oper[0x10];
+
+ u8 fc_proto_capability[0x20];
+
+ u8 eth_proto_capability[0x20];
+
+ u8 ib_link_width_capability[0x10];
+ u8 ib_proto_capability[0x10];
+
+ u8 fc_proto_admin[0x20];
+
+ u8 eth_proto_admin[0x20];
+
+ u8 ib_link_width_admin[0x10];
+ u8 ib_proto_admin[0x10];
+
+ u8 fc_proto_oper[0x20];
+
+ u8 eth_proto_oper[0x20];
+
+ u8 ib_link_width_oper[0x10];
+ u8 ib_proto_oper[0x10];
+
+ u8 reserved_4[0x20];
+
+ u8 eth_proto_lp_advertise[0x20];
+
+ u8 reserved_5[0x60];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+ u8 reserved_0[0x20];
+
+ u8 algorithm_options[0x10];
+ u8 reserved_1[0x4];
+ u8 repetitions_mode[0x4];
+ u8 num_of_repetitions[0x8];
+
+ u8 grade_version[0x8];
+ u8 height_grade_type[0x4];
+ u8 phase_grade_type[0x4];
+ u8 height_grade_weight[0x8];
+ u8 phase_grade_weight[0x8];
+
+ u8 gisim_measure_bits[0x10];
+ u8 adaptive_tap_measure_bits[0x10];
+
+ u8 ber_bath_high_error_threshold[0x10];
+ u8 ber_bath_mid_error_threshold[0x10];
+
+ u8 ber_bath_low_error_threshold[0x10];
+ u8 one_ratio_high_threshold[0x10];
+
+ u8 one_ratio_high_mid_threshold[0x10];
+ u8 one_ratio_low_mid_threshold[0x10];
+
+ u8 one_ratio_low_threshold[0x10];
+ u8 ndeo_error_threshold[0x10];
+
+ u8 mixer_offset_step_size[0x10];
+ u8 reserved_2[0x8];
+ u8 mix90_phase_for_voltage_bath[0x8];
+
+ u8 mixer_offset_start[0x10];
+ u8 mixer_offset_end[0x10];
+
+ u8 reserved_3[0x15];
+ u8 ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 sub_port[0x8];
+ u8 reserved_0[0x8];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x60];
+
+ u8 reserved_3[0x1c];
+ u8 wrps_admin[0x4];
+
+ u8 reserved_4[0x1c];
+ u8 wrps_status[0x4];
+
+ u8 up_th_vld[0x1];
+ u8 down_th_vld[0x1];
+ u8 reserved_5[0x6];
+ u8 up_threshold[0x8];
+ u8 reserved_6[0x8];
+ u8 down_threshold[0x8];
+
+ u8 reserved_7[0x20];
+
+ u8 reserved_8[0x1c];
+ u8 srps_admin[0x4];
+
+ u8 reserved_9[0x60];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x8];
+ u8 lb_cap[0x8];
+ u8 reserved_3[0x8];
+ u8 lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x20];
+
+ u8 port_profile_mode[0x8];
+ u8 static_port_profile[0x8];
+ u8 active_port_profile[0x8];
+ u8 reserved_3[0x8];
+
+ u8 retransmission_active[0x8];
+ u8 fec_mode_active[0x18];
+
+ u8 reserved_4[0x10];
+ u8 v_100g_fec_override_cap[0x4];
+ u8 v_50g_fec_override_cap[0x4];
+ u8 v_25g_fec_override_cap[0x4];
+ u8 v_10g_40g_fec_override_cap[0x4];
+
+ u8 reserved_5[0x10];
+ u8 v_100g_fec_override_admin[0x4];
+ u8 v_50g_fec_override_admin[0x4];
+ u8 v_25g_fec_override_admin[0x4];
+ u8 v_10g_40g_fec_override_admin[0x4];
+};
+
+struct mlx5_ifc_ppll_reg_bits {
+ u8 num_pll_groups[0x8];
+ u8 pll_group[0x8];
+ u8 reserved_0[0x4];
+ u8 num_plls[0x4];
+ u8 reserved_1[0x8];
+
+ u8 reserved_2[0x1f];
+ u8 ae[0x1];
+
+ u8 pll_status[4][0x40];
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+ u8 reserved_0[0x3];
+ u8 single_mac[0x1];
+ u8 reserved_1[0x4];
+ u8 local_port[0x8];
+ u8 mac_47_32[0x10];
+
+ u8 mac_31_0[0x20];
+
+ u8 reserved_2[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 max_mtu[0x10];
+ u8 reserved_2[0x10];
+
+ u8 admin_mtu[0x10];
+ u8 reserved_3[0x10];
+
+ u8 oper_mtu[0x10];
+ u8 reserved_4[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x18];
+ u8 attenuation_5g[0x8];
+
+ u8 reserved_3[0x18];
+ u8 attenuation_7g[0x8];
+
+ u8 reserved_4[0x18];
+ u8 attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x14];
+ u8 error_type[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0x40];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+ u8 module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+ u8 reserved_0[0x4];
+ u8 mlpn_status[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 e[0x1];
+ u8 reserved_2[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+ u8 rxtx[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 width[0x8];
+
+ u8 lane0_module_mapping[0x20];
+
+ u8 lane1_module_mapping[0x20];
+
+ u8 lane2_module_mapping[0x20];
+
+ u8 lane3_module_mapping[0x20];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_2[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_3[0x12];
+ u8 error_type[0x4];
+ u8 reserved_4[0x6];
+ u8 e[0x2];
+
+ u8 reserved_5[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+ u8 reserved_0[0x4];
+ u8 profile_id[0xc];
+ u8 reserved_1[0x4];
+ u8 proto_mask[0x4];
+ u8 reserved_2[0x8];
+
+ u8 reserved_3[0x10];
+ u8 lane_speed[0x10];
+
+ u8 reserved_4[0x17];
+ u8 lpbf[0x1];
+ u8 fec_mode_policy[0x8];
+
+ u8 retransmission_capability[0x8];
+ u8 fec_mode_capability[0x18];
+
+ u8 retransmission_support_admin[0x8];
+ u8 fec_mode_support_admin[0x18];
+
+ u8 retransmission_request_admin[0x8];
+ u8 fec_mode_request_admin[0x18];
+
+ u8 reserved_5[0x80];
+};
+
+struct mlx5_ifc_pll_status_data_bits {
+ u8 reserved_0[0x1];
+ u8 lock_cal[0x1];
+ u8 lock_status[0x2];
+ u8 reserved_1[0x2];
+ u8 algo_f_ctrl[0xa];
+ u8 analog_algo_num_var[0x6];
+ u8 f_ctrl_measure[0xa];
+
+ u8 reserved_2[0x2];
+ u8 analog_var[0x6];
+ u8 reserved_3[0x2];
+ u8 high_var[0x6];
+ u8 reserved_4[0x2];
+ u8 low_var[0x6];
+ u8 reserved_5[0x2];
+ u8 mid_val[0x6];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x8];
+ u8 ib_port[0x8];
+
+ u8 reserved_2[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0xd];
+ u8 lbf_mode[0x3];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 dic[0x1];
+ u8 reserved_2[0x19];
+ u8 ipg[0x4];
+ u8 reserved_3[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xe0];
+
+ u8 port_filter[8][0x20];
+
+ u8 port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 symbol_errors_high[0x20];
+
+ u8 symbol_errors_low[0x20];
+
+ u8 sync_headers_errors_high[0x20];
+
+ u8 sync_headers_errors_low[0x20];
+
+ u8 edpl_bip_errors_lane0_high[0x20];
+
+ u8 edpl_bip_errors_lane0_low[0x20];
+
+ u8 edpl_bip_errors_lane1_high[0x20];
+
+ u8 edpl_bip_errors_lane1_low[0x20];
+
+ u8 edpl_bip_errors_lane2_high[0x20];
+
+ u8 edpl_bip_errors_lane2_low[0x20];
+
+ u8 edpl_bip_errors_lane3_high[0x20];
+
+ u8 edpl_bip_errors_lane3_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane0_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane1_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane2_low[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_high[0x20];
+
+ u8 fc_fec_corrected_blocks_lane3_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+ u8 fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+ u8 rs_fec_corrected_blocks_high[0x20];
+
+ u8 rs_fec_corrected_blocks_low[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_high[0x20];
+
+ u8 rs_fec_uncorrectable_blocks_low[0x20];
+
+ u8 rs_fec_no_errors_blocks_high[0x20];
+
+ u8 rs_fec_no_errors_blocks_low[0x20];
+
+ u8 rs_fec_single_error_blocks_high[0x20];
+
+ u8 rs_fec_single_error_blocks_low[0x20];
+
+ u8 rs_fec_corrected_symbols_total_high[0x20];
+
+ u8 rs_fec_corrected_symbols_total_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane0_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane1_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane2_low[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_high[0x20];
+
+ u8 rs_fec_corrected_symbols_lane3_low[0x20];
+
+ u8 link_down_events[0x20];
+
+ u8 successful_recovery_events[0x20];
+
+ u8 reserved_0[0x180];
+};
+
+struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 phy_received_bits_high[0x20];
+
+ u8 phy_received_bits_low[0x20];
+
+ u8 phy_symbol_errors_high[0x20];
+
+ u8 phy_symbol_errors_low[0x20];
+
+ u8 phy_corrected_bits_high[0x20];
+
+ u8 phy_corrected_bits_low[0x20];
+
+ u8 phy_corrected_bits_lane0_high[0x20];
+
+ u8 phy_corrected_bits_lane0_low[0x20];
+
+ u8 phy_corrected_bits_lane1_high[0x20];
+
+ u8 phy_corrected_bits_lane1_low[0x20];
+
+ u8 phy_corrected_bits_lane2_high[0x20];
+
+ u8 phy_corrected_bits_lane2_low[0x20];
+
+ u8 phy_corrected_bits_lane3_high[0x20];
+
+ u8 phy_corrected_bits_lane3_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
+};
+
+struct mlx5_ifc_infiniband_port_cntrs_bits {
+ u8 symbol_error_counter[0x10];
+ u8 link_error_recovery_counter[0x8];
+ u8 link_downed_counter[0x8];
+
+ u8 port_rcv_errors[0x10];
+ u8 port_rcv_remote_physical_errors[0x10];
+
+ u8 port_rcv_switch_relay_errors[0x10];
+ u8 port_xmit_discards[0x10];
+
+ u8 port_xmit_constraint_errors[0x8];
+ u8 port_rcv_constraint_errors[0x8];
+ u8 reserved_0[0x8];
+ u8 local_link_integrity_errors[0x4];
+ u8 excessive_buffer_overrun_errors[0x4];
+
+ u8 reserved_1[0x10];
+ u8 vl_15_dropped[0x10];
+
+ u8 port_xmit_data[0x20];
+
+ u8 port_rcv_data[0x20];
+
+ u8 port_xmit_pkts[0x20];
+
+ u8 port_rcv_pkts[0x20];
+
+ u8 port_xmit_wait[0x20];
+
+ u8 reserved_2[0x680];
+};
+
+struct mlx5_ifc_phrr_reg_bits {
+ u8 clr[0x1];
+ u8 reserved_0[0x7];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 hist_group[0x8];
+ u8 reserved_2[0x10];
+ u8 hist_id[0x8];
+
+ u8 reserved_3[0x40];
+
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 bin[10][0x20];
+};
+
+struct mlx5_ifc_phbr_for_prio_reg_bits {
+ u8 reserved_0[0x18];
+ u8 prio[0x8];
+};
+
+struct mlx5_ifc_phbr_for_port_tclass_reg_bits {
+ u8 reserved_0[0x18];
+ u8 tclass[0x8];
+};
+
+struct mlx5_ifc_phbr_binding_reg_bits {
+ u8 opcode[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_1[0xe];
+
+ u8 hist_group[0x8];
+ u8 reserved_2[0x10];
+ u8 hist_id[0x8];
+
+ u8 reserved_3[0x10];
+ u8 hist_type[0x10];
+
+ u8 hist_parameters[0x20];
+
+ u8 hist_min_value[0x20];
+
+ u8 hist_max_value[0x20];
+
+ u8 sample_time[0x20];
+};
+
+enum {
+ MLX5_PFCC_REG_PPAN_DISABLED = 0x0,
+ MLX5_PFCC_REG_PPAN_ENABLED = 0x1,
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+ u8 dcbx_operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_1[0xc];
+ u8 shl_cap[0x1];
+ u8 shl_opr[0x1];
+
+ u8 ppan[0x4];
+ u8 reserved_2[0x4];
+ u8 prio_mask_tx[0x8];
+ u8 reserved_3[0x8];
+ u8 prio_mask_rx[0x8];
+
+ u8 pptx[0x1];
+ u8 aptx[0x1];
+ u8 reserved_4[0x6];
+ u8 pfctx[0x8];
+ u8 reserved_5[0x8];
+ u8 cbftx[0x8];
+
+ u8 pprx[0x1];
+ u8 aprx[0x1];
+ u8 reserved_6[0x6];
+ u8 pfcrx[0x8];
+ u8 reserved_7[0x8];
+ u8 cbfrx[0x8];
+
+ u8 device_stall_minor_watermark[0x10];
+ u8 device_stall_critical_watermark[0x10];
+
+ u8 reserved_8[0x60];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+ u8 op[0x4];
+ u8 reserved_0[0x4];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 op_admin[0x8];
+ u8 op_capability[0x8];
+ u8 op_request[0x8];
+ u8 op_active[0x8];
+
+ u8 admin[0x40];
+
+ u8 capability[0x40];
+
+ u8 request[0x40];
+
+ u8 active[0x40];
+
+ u8 reserved_2[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0xc];
+ u8 error_count[0x4];
+ u8 reserved_3[0x10];
+
+ u8 reserved_4[0xc];
+ u8 lane[0x4];
+ u8 reserved_5[0x8];
+ u8 error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_pbmc_reg_bits {
+ u8 reserved_0[0x8];
+ u8 local_port[0x8];
+ u8 reserved_1[0x10];
+
+ u8 xoff_timer_value[0x10];
+ u8 xoff_refresh[0x10];
+
+ u8 reserved_2[0x10];
+ u8 port_buffer_size[0x10];
+
+ struct mlx5_ifc_bufferx_reg_bits buffer[10];
+
+ u8 reserved_3[0x40];
+
+ u8 port_shared_buffer[0x40];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 reserved_0[0x4];
+ u8 admin_status[0x4];
+ u8 reserved_1[0x4];
+ u8 oper_status[0x4];
+
+ u8 ase[0x1];
+ u8 ee[0x1];
+ u8 reserved_2[0x1c];
+ u8 e[0x2];
+
+ u8 reserved_3[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 opamp_group[0x8];
+ u8 reserved_1[0xc];
+ u8 opamp_group_type[0x4];
+
+ u8 start_index[0x10];
+ u8 reserved_2[0x4];
+ u8 num_of_indices[0xc];
+
+ u8 index_data[18][0x10];
+};
+
+struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits {
+ u8 llr_rx_cells_high[0x20];
+
+ u8 llr_rx_cells_low[0x20];
+
+ u8 llr_rx_error_high[0x20];
+
+ u8 llr_rx_error_low[0x20];
+
+ u8 llr_rx_crc_error_high[0x20];
+
+ u8 llr_rx_crc_error_low[0x20];
+
+ u8 llr_tx_cells_high[0x20];
+
+ u8 llr_tx_cells_low[0x20];
+
+ u8 llr_tx_ret_cells_high[0x20];
+
+ u8 llr_tx_ret_cells_low[0x20];
+
+ u8 llr_tx_ret_events_high[0x20];
+
+ u8 llr_tx_ret_events_low[0x20];
+
+ u8 reserved_0[0x640];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+ u8 reserved_0[0x6];
+ u8 rx_lane[0x2];
+ u8 reserved_1[0x6];
+ u8 tx_lane[0x2];
+ u8 reserved_2[0x8];
+ u8 module[0x8];
+};
+
+struct mlx5_ifc_eth_per_traffic_class_layout_bits {
+ u8 transmit_queue_high[0x20];
+
+ u8 transmit_queue_low[0x20];
+
+ u8 reserved_0[0x780];
+};
+
+struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits {
+ u8 no_buffer_discard_uc_high[0x20];
+
+ u8 no_buffer_discard_uc_low[0x20];
+
+ u8 wred_discard_high[0x20];
+
+ u8 wred_discard_low[0x20];
+
+ u8 reserved_0[0x740];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+ u8 rx_octets_high[0x20];
+
+ u8 rx_octets_low[0x20];
+
+ u8 reserved_0[0xc0];
+
+ u8 rx_frames_high[0x20];
+
+ u8 rx_frames_low[0x20];
+
+ u8 tx_octets_high[0x20];
+
+ u8 tx_octets_low[0x20];
+
+ u8 reserved_1[0xc0];
+
+ u8 tx_frames_high[0x20];
+
+ u8 tx_frames_low[0x20];
+
+ u8 rx_pause_high[0x20];
+
+ u8 rx_pause_low[0x20];
+
+ u8 rx_pause_duration_high[0x20];
+
+ u8 rx_pause_duration_low[0x20];
+
+ u8 tx_pause_high[0x20];
+
+ u8 tx_pause_low[0x20];
+
+ u8 tx_pause_duration_high[0x20];
+
+ u8 tx_pause_duration_low[0x20];
+
+ u8 rx_pause_transition_high[0x20];
+
+ u8 rx_pause_transition_low[0x20];
+
+ u8 rx_discards_high[0x20];
+
+ u8 rx_discards_low[0x20];
+
+ u8 device_stall_minor_watermark_cnt_high[0x20];
+
+ u8 device_stall_minor_watermark_cnt_low[0x20];
+
+ u8 device_stall_critical_watermark_cnt_high[0x20];
+
+ u8 device_stall_critical_watermark_cnt_low[0x20];
+
+ u8 reserved_2[0x340];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+ u8 port_transmit_wait_high[0x20];
+
+ u8 port_transmit_wait_low[0x20];
+
+ u8 ecn_marked_high[0x20];
+
+ u8 ecn_marked_low[0x20];
+
+ u8 no_buffer_discard_mc_high[0x20];
+
+ u8 no_buffer_discard_mc_low[0x20];
+
+ u8 reserved_0[0x700];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+ u8 a_frames_transmitted_ok_high[0x20];
+
+ u8 a_frames_transmitted_ok_low[0x20];
+
+ u8 a_frames_received_ok_high[0x20];
+
+ u8 a_frames_received_ok_low[0x20];
+
+ u8 a_frame_check_sequence_errors_high[0x20];
+
+ u8 a_frame_check_sequence_errors_low[0x20];
+
+ u8 a_alignment_errors_high[0x20];
+
+ u8 a_alignment_errors_low[0x20];
+
+ u8 a_octets_transmitted_ok_high[0x20];
+
+ u8 a_octets_transmitted_ok_low[0x20];
+
+ u8 a_octets_received_ok_high[0x20];
+
+ u8 a_octets_received_ok_low[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_high[0x20];
+
+ u8 a_multicast_frames_xmitted_ok_low[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_high[0x20];
+
+ u8 a_broadcast_frames_xmitted_ok_low[0x20];
+
+ u8 a_multicast_frames_received_ok_high[0x20];
+
+ u8 a_multicast_frames_received_ok_low[0x20];
+
+ u8 a_broadcast_frames_recieved_ok_high[0x20];
+
+ u8 a_broadcast_frames_recieved_ok_low[0x20];
+
+ u8 a_in_range_length_errors_high[0x20];
+
+ u8 a_in_range_length_errors_low[0x20];
+
+ u8 a_out_of_range_length_field_high[0x20];
+
+ u8 a_out_of_range_length_field_low[0x20];
+
+ u8 a_frame_too_long_errors_high[0x20];
+
+ u8 a_frame_too_long_errors_low[0x20];
+
+ u8 a_symbol_error_during_carrier_high[0x20];
+
+ u8 a_symbol_error_during_carrier_low[0x20];
+
+ u8 a_mac_control_frames_transmitted_high[0x20];
+
+ u8 a_mac_control_frames_transmitted_low[0x20];
+
+ u8 a_mac_control_frames_received_high[0x20];
+
+ u8 a_mac_control_frames_received_low[0x20];
+
+ u8 a_unsupported_opcodes_received_high[0x20];
+
+ u8 a_unsupported_opcodes_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_received_low[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+ u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+ u8 reserved_0[0x300];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+ u8 dot3stats_alignment_errors_high[0x20];
+
+ u8 dot3stats_alignment_errors_low[0x20];
+
+ u8 dot3stats_fcs_errors_high[0x20];
+
+ u8 dot3stats_fcs_errors_low[0x20];
+
+ u8 dot3stats_single_collision_frames_high[0x20];
+
+ u8 dot3stats_single_collision_frames_low[0x20];
+
+ u8 dot3stats_multiple_collision_frames_high[0x20];
+
+ u8 dot3stats_multiple_collision_frames_low[0x20];
+
+ u8 dot3stats_sqe_test_errors_high[0x20];
+
+ u8 dot3stats_sqe_test_errors_low[0x20];
+
+ u8 dot3stats_deferred_transmissions_high[0x20];
+
+ u8 dot3stats_deferred_transmissions_low[0x20];
+
+ u8 dot3stats_late_collisions_high[0x20];
+
+ u8 dot3stats_late_collisions_low[0x20];
+
+ u8 dot3stats_excessive_collisions_high[0x20];
+
+ u8 dot3stats_excessive_collisions_low[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_transmit_errors_low[0x20];
+
+ u8 dot3stats_carrier_sense_errors_high[0x20];
+
+ u8 dot3stats_carrier_sense_errors_low[0x20];
+
+ u8 dot3stats_frame_too_longs_high[0x20];
+
+ u8 dot3stats_frame_too_longs_low[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_high[0x20];
+
+ u8 dot3stats_internal_mac_receive_errors_low[0x20];
+
+ u8 dot3stats_symbol_errors_high[0x20];
+
+ u8 dot3stats_symbol_errors_low[0x20];
+
+ u8 dot3control_in_unknown_opcodes_high[0x20];
+
+ u8 dot3control_in_unknown_opcodes_low[0x20];
+
+ u8 dot3in_pause_frames_high[0x20];
+
+ u8 dot3in_pause_frames_low[0x20];
+
+ u8 dot3out_pause_frames_high[0x20];
+
+ u8 dot3out_pause_frames_low[0x20];
+
+ u8 reserved_0[0x3c0];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+ u8 if_in_octets_high[0x20];
+
+ u8 if_in_octets_low[0x20];
+
+ u8 if_in_ucast_pkts_high[0x20];
+
+ u8 if_in_ucast_pkts_low[0x20];
+
+ u8 if_in_discards_high[0x20];
+
+ u8 if_in_discards_low[0x20];
+
+ u8 if_in_errors_high[0x20];
+
+ u8 if_in_errors_low[0x20];
+
+ u8 if_in_unknown_protos_high[0x20];
+
+ u8 if_in_unknown_protos_low[0x20];
+
+ u8 if_out_octets_high[0x20];
+
+ u8 if_out_octets_low[0x20];
+
+ u8 if_out_ucast_pkts_high[0x20];
+
+ u8 if_out_ucast_pkts_low[0x20];
+
+ u8 if_out_discards_high[0x20];
+
+ u8 if_out_discards_low[0x20];
+
+ u8 if_out_errors_high[0x20];
+
+ u8 if_out_errors_low[0x20];
+
+ u8 if_in_multicast_pkts_high[0x20];
+
+ u8 if_in_multicast_pkts_low[0x20];
+
+ u8 if_in_broadcast_pkts_high[0x20];
+
+ u8 if_in_broadcast_pkts_low[0x20];
+
+ u8 if_out_multicast_pkts_high[0x20];
+
+ u8 if_out_multicast_pkts_low[0x20];
+
+ u8 if_out_broadcast_pkts_high[0x20];
+
+ u8 if_out_broadcast_pkts_low[0x20];
+
+ u8 reserved_0[0x480];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+ u8 ether_stats_drop_events_high[0x20];
+
+ u8 ether_stats_drop_events_low[0x20];
+
+ u8 ether_stats_octets_high[0x20];
+
+ u8 ether_stats_octets_low[0x20];
+
+ u8 ether_stats_pkts_high[0x20];
+
+ u8 ether_stats_pkts_low[0x20];
+
+ u8 ether_stats_broadcast_pkts_high[0x20];
+
+ u8 ether_stats_broadcast_pkts_low[0x20];
+
+ u8 ether_stats_multicast_pkts_high[0x20];
+
+ u8 ether_stats_multicast_pkts_low[0x20];
+
+ u8 ether_stats_crc_align_errors_high[0x20];
+
+ u8 ether_stats_crc_align_errors_low[0x20];
+
+ u8 ether_stats_undersize_pkts_high[0x20];
+
+ u8 ether_stats_undersize_pkts_low[0x20];
+
+ u8 ether_stats_oversize_pkts_high[0x20];
+
+ u8 ether_stats_oversize_pkts_low[0x20];
+
+ u8 ether_stats_fragments_high[0x20];
+
+ u8 ether_stats_fragments_low[0x20];
+
+ u8 ether_stats_jabbers_high[0x20];
+
+ u8 ether_stats_jabbers_low[0x20];
+
+ u8 ether_stats_collisions_high[0x20];
+
+ u8 ether_stats_collisions_low[0x20];
+
+ u8 ether_stats_pkts64octets_high[0x20];
+
+ u8 ether_stats_pkts64octets_low[0x20];
+
+ u8 ether_stats_pkts65to127octets_high[0x20];
+
+ u8 ether_stats_pkts65to127octets_low[0x20];
+
+ u8 ether_stats_pkts128to255octets_high[0x20];
+
+ u8 ether_stats_pkts128to255octets_low[0x20];
+
+ u8 ether_stats_pkts256to511octets_high[0x20];
+
+ u8 ether_stats_pkts256to511octets_low[0x20];
+
+ u8 ether_stats_pkts512to1023octets_high[0x20];
+
+ u8 ether_stats_pkts512to1023octets_low[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_high[0x20];
+
+ u8 ether_stats_pkts1024to1518octets_low[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_high[0x20];
+
+ u8 ether_stats_pkts1519to2047octets_low[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_high[0x20];
+
+ u8 ether_stats_pkts2048to4095octets_low[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_high[0x20];
+
+ u8 ether_stats_pkts4096to8191octets_low[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_high[0x20];
+
+ u8 ether_stats_pkts8192to10239octets_low[0x20];
+
+ u8 reserved_0[0x280];
+};
+
+struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits {
+ u8 symbol_error_counter[0x10];
+ u8 link_error_recovery_counter[0x8];
+ u8 link_downed_counter[0x8];
+
+ u8 port_rcv_errors[0x10];
+ u8 port_rcv_remote_physical_errors[0x10];
+
+ u8 port_rcv_switch_relay_errors[0x10];
+ u8 port_xmit_discards[0x10];
+
+ u8 port_xmit_constraint_errors[0x8];
+ u8 port_rcv_constraint_errors[0x8];
+ u8 reserved_0[0x8];
+ u8 local_link_integrity_errors[0x4];
+ u8 excessive_buffer_overrun_errors[0x4];
+
+ u8 reserved_1[0x10];
+ u8 vl_15_dropped[0x10];
+
+ u8 port_xmit_data[0x20];
+
+ u8 port_rcv_data[0x20];
+
+ u8 port_xmit_pkts[0x20];
+
+ u8 port_rcv_pkts[0x20];
+
+ u8 port_xmit_wait[0x20];
+
+ u8 reserved_2[0x680];
+};
+
+struct mlx5_ifc_trc_tlb_reg_bits {
+ u8 reserved_0[0x80];
+
+ u8 tlb_addr[0][0x40];
+};
+
+struct mlx5_ifc_trc_read_fifo_reg_bits {
+ u8 reserved_0[0x10];
+ u8 requested_event_num[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 reserved_2[0x10];
+ u8 acual_event_num[0x10];
+
+ u8 reserved_3[0x20];
+
+ u8 event[0][0x40];
+};
+
+struct mlx5_ifc_trc_lock_reg_bits {
+ u8 reserved_0[0x1f];
+ u8 lock[0x1];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_trc_filter_reg_bits {
+ u8 status[0x1];
+ u8 reserved_0[0xf];
+ u8 filter_index[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 filter_val[0x20];
+
+ u8 reserved_2[0x1a0];
+};
+
+struct mlx5_ifc_trc_event_reg_bits {
+ u8 status[0x1];
+ u8 reserved_0[0xf];
+ u8 event_index[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 event_id[0x20];
+
+ u8 event_selector_val[0x10];
+ u8 event_selector_size[0x10];
+
+ u8 reserved_2[0x180];
+};
+
+struct mlx5_ifc_trc_conf_reg_bits {
+ u8 limit_en[0x1];
+ u8 reserved_0[0x3];
+ u8 dump_mode[0x4];
+ u8 reserved_1[0x15];
+ u8 state[0x3];
+
+ u8 reserved_2[0x20];
+
+ u8 limit_event_index[0x20];
+
+ u8 mkey[0x20];
+
+ u8 fifo_ready_ev_num[0x20];
+
+ u8 reserved_3[0x160];
+};
+
+struct mlx5_ifc_trc_cap_reg_bits {
+ u8 reserved_0[0x18];
+ u8 dump_mode[0x8];
+
+ u8 reserved_1[0x20];
+
+ u8 num_of_events[0x10];
+ u8 num_of_filters[0x10];
+
+ u8 fifo_size[0x20];
+
+ u8 tlb_size[0x10];
+ u8 event_size[0x10];
+
+ u8 reserved_2[0x160];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+ u8 node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+ u8 reserved_0[0x18];
+ u8 power_settings_level[0x8];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_host_endianess_bits {
+ u8 he[0x1];
+ u8 reserved_0[0x1f];
+
+ u8 reserved_1[0x60];
+};
+
+struct mlx5_ifc_register_diag_buffer_ctrl_bits {
+ u8 physical_address[0x40];
+};
+
+struct mlx5_ifc_qtct_reg_bits {
+ u8 operation_type[0x2];
+ u8 cap_local_admin[0x1];
+ u8 cap_remote_admin[0x1];
+ u8 reserved_0[0x4];
+ u8 port_number[0x8];
+ u8 reserved_1[0xd];
+ u8 prio[0x3];
+
+ u8 reserved_2[0x1d];
+ u8 tclass[0x3];
+};
+
+struct mlx5_ifc_qpdp_reg_bits {
+ u8 reserved_0[0x8];
+ u8 port_number[0x8];
+ u8 reserved_1[0x10];
+
+ u8 reserved_2[0x1d];
+ u8 pprio[0x3];
+};
+
+struct mlx5_ifc_port_info_ro_fields_param_bits {
+ u8 reserved_0[0x8];
+ u8 port[0x8];
+ u8 max_gid[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 port_guid[0x40];
+};
+
+struct mlx5_ifc_nvqc_reg_bits {
+ u8 type[0x20];
+
+ u8 reserved_0[0x18];
+ u8 version[0x4];
+ u8 reserved_1[0x2];
+ u8 support_wr[0x1];
+ u8 support_rd[0x1];
+};
+
+struct mlx5_ifc_nvia_reg_bits {
+ u8 reserved_0[0x1d];
+ u8 target[0x3];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_nvdi_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+};
+
+struct mlx5_ifc_nvda_reg_bits {
+ struct mlx5_ifc_config_item_bits configuration_item_header;
+
+ u8 configuration_item_data[0x20];
+};
+
+struct mlx5_ifc_node_info_ro_fields_param_bits {
+ u8 system_image_guid[0x40];
+
+ u8 reserved_0[0x40];
+
+ u8 node_guid[0x40];
+
+ u8 reserved_1[0x10];
+ u8 max_pkey[0x10];
+
+ u8 reserved_2[0x20];
+};
+
+struct mlx5_ifc_ets_tcn_config_reg_bits {
+ u8 g[0x1];
+ u8 b[0x1];
+ u8 r[0x1];
+ u8 reserved_0[0x9];
+ u8 group[0x4];
+ u8 reserved_1[0x9];
+ u8 bw_allocation[0x7];
+
+ u8 reserved_2[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_3[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_ets_global_config_reg_bits {
+ u8 reserved_0[0x2];
+ u8 r[0x1];
+ u8 reserved_1[0x1d];
+
+ u8 reserved_2[0xc];
+ u8 max_bw_units[0x4];
+ u8 reserved_3[0x8];
+ u8 max_bw_value[0x8];
+};
+
+struct mlx5_ifc_nodnic_mac_filters_bits {
+ struct mlx5_ifc_mac_address_layout_bits mac_filter0;
+
+ struct mlx5_ifc_mac_address_layout_bits mac_filter1;
+
+ struct mlx5_ifc_mac_address_layout_bits mac_filter2;
+
+ struct mlx5_ifc_mac_address_layout_bits mac_filter3;
+
+ struct mlx5_ifc_mac_address_layout_bits mac_filter4;
+
+ u8 reserved_0[0xc0];
+};
+
+struct mlx5_ifc_nodnic_gid_filters_bits {
+ u8 mgid_filter0[16][0x8];
+
+ u8 mgid_filter1[16][0x8];
+
+ u8 mgid_filter2[16][0x8];
+
+ u8 mgid_filter3[16][0x8];
+};
+
+enum {
+ MLX5_NODNIC_CONFIG_REG_NUM_PORTS_SINGLE_PORT = 0x0,
+ MLX5_NODNIC_CONFIG_REG_NUM_PORTS_DUAL_PORT = 0x1,
+};
+
+enum {
+ MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_LEGACY_CQE = 0x0,
+ MLX5_NODNIC_CONFIG_REG_CQE_FORMAT_NEW_CQE = 0x1,
+};
+
+struct mlx5_ifc_nodnic_config_reg_bits {
+ u8 no_dram_nic_revision[0x8];
+ u8 hardware_format[0x8];
+ u8 support_receive_filter[0x1];
+ u8 support_promisc_filter[0x1];
+ u8 support_promisc_multicast_filter[0x1];
+ u8 reserved_0[0x2];
+ u8 log_working_buffer_size[0x3];
+ u8 log_pkey_table_size[0x4];
+ u8 reserved_1[0x3];
+ u8 num_ports[0x1];
+
+ u8 reserved_2[0x2];
+ u8 log_max_ring_size[0x6];
+ u8 reserved_3[0x18];
+
+ u8 lkey[0x20];
+
+ u8 cqe_format[0x4];
+ u8 reserved_4[0x1c];
+
+ u8 node_guid[0x40];
+
+ u8 reserved_5[0x740];
+
+ struct mlx5_ifc_nodnic_port_config_reg_bits port1_settings;
+
+ struct mlx5_ifc_nodnic_port_config_reg_bits port2_settings;
+};
+
+struct mlx5_ifc_vlan_layout_bits {
+ u8 reserved_0[0x14];
+ u8 vlan[0xc];
+
+ u8 reserved_1[0x20];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+ u8 reserved_0[0x20];
+
+ u8 mkey[0x20];
+
+ u8 addressh_63_32[0x20];
+
+ u8 addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+ u8 dc_key[0x40];
+
+ u8 ext[0x1];
+ u8 reserved_0[0x7];
+ u8 destination_qp_dct[0x18];
+
+ u8 static_rate[0x4];
+ u8 sl_eth_prio[0x4];
+ u8 fl[0x1];
+ u8 mlid[0x7];
+ u8 rlid_udp_sport[0x10];
+
+ u8 reserved_1[0x20];
+
+ u8 rmac_47_16[0x20];
+
+ u8 rmac_15_0[0x10];
+ u8 tclass[0x8];
+ u8 hop_limit[0x8];
+
+ u8 reserved_2[0x1];
+ u8 grh[0x1];
+ u8 reserved_3[0x2];
+ u8 src_addr_index[0x8];
+ u8 flow_label[0x14];
+
+ u8 rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_port_module_event_bits {
+ u8 reserved_0[0x8];
+ u8 module[0x8];
+ u8 reserved_1[0xc];
+ u8 module_status[0x4];
+
+ u8 reserved_2[0x14];
+ u8 error_type[0x4];
+ u8 reserved_3[0x8];
+
+ u8 reserved_4[0xa0];
+};
+
+struct mlx5_ifc_icmd_control_bits {
+ u8 opcode[0x10];
+ u8 status[0x8];
+ u8 reserved_0[0x7];
+ u8 busy[0x1];
+};
+
+struct mlx5_ifc_eqe_bits {
+ u8 reserved_0[0x8];
+ u8 event_type[0x8];
+ u8 reserved_1[0x8];
+ u8 event_sub_type[0x8];
+
+ u8 reserved_2[0xe0];
+
+ union mlx5_ifc_event_auto_bits event_data;
+
+ u8 reserved_3[0x10];
+ u8 signature[0x8];
+ u8 reserved_4[0x7];
+ u8 owner[0x1];
+};
+
+enum {
+ MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+ u8 type[0x8];
+ u8 reserved_0[0x18];
+
+ u8 input_length[0x20];
+
+ u8 input_mailbox_pointer_63_32[0x20];
+
+ u8 input_mailbox_pointer_31_9[0x17];
+ u8 reserved_1[0x9];
+
+ u8 command_input_inline_data[16][0x8];
+
+ u8 command_output_inline_data[16][0x8];
+
+ u8 output_mailbox_pointer_63_32[0x20];
+
+ u8 output_mailbox_pointer_31_9[0x17];
+ u8 reserved_2[0x9];
+
+ u8 output_length[0x20];
+
+ u8 token[0x8];
+ u8 signature[0x8];
+ u8 reserved_3[0x8];
+ u8 status[0x7];
+ u8 ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+ u8 status[0x8];
+ u8 reserved_0[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_0[0x10];
+
+ u8 reserved_1[0x10];
+ u8 op_mod[0x10];
+
+ u8 command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+ u8 mailbox_data[512][0x8];
+
+ u8 reserved_0[0x180];
+
+ u8 next_pointer_63_32[0x20];
+
+ u8 next_pointer_31_10[0x16];
+ u8 reserved_1[0xa];
+
+ u8 block_number[0x20];
+
+ u8 reserved_2[0x8];
+ u8 token[0x8];
+ u8 ctrl_signature[0x8];
+ u8 signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+ u8 ptag_63_32[0x20];
+
+ u8 ptag_31_8[0x18];
+ u8 reserved_0[0x6];
+ u8 wr_en[0x1];
+ u8 rd_en[0x1];
+};
+
+struct mlx5_ifc_vendor_specific_cap_bits {
+ u8 type[0x8];
+ u8 length[0x8];
+ u8 next_pointer[0x8];
+ u8 capability_id[0x8];
+
+ u8 status[0x3];
+ u8 reserved_0[0xd];
+ u8 space[0x10];
+
+ u8 counter[0x20];
+
+ u8 semaphore[0x20];
+
+ u8 flag[0x1];
+ u8 reserved_1[0x1];
+ u8 address[0x1e];
+
+ u8 data[0x20];
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER = 0x0,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED = 0x1,
+ MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC = 0x2,
+};
+
+enum {
+ MLX5_HEALTH_SYNDR_FW_ERR = 0x1,
+ MLX5_HEALTH_SYNDR_IRISC_ERR = 0x7,
+ MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR = 0x8,
+ MLX5_HEALTH_SYNDR_CRC_ERR = 0x9,
+ MLX5_HEALTH_SYNDR_FETCH_PCI_ERR = 0xa,
+ MLX5_HEALTH_SYNDR_HW_FTL_ERR = 0xb,
+ MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR = 0xc,
+ MLX5_HEALTH_SYNDR_EQ_ERR = 0xd,
+ MLX5_HEALTH_SYNDR_EQ_INV = 0xe,
+ MLX5_HEALTH_SYNDR_FFSER_ERR = 0xf,
+ MLX5_HEALTH_SYNDR_HIGH_TEMP = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+ u8 fw_rev_minor[0x10];
+ u8 fw_rev_major[0x10];
+
+ u8 cmd_interface_rev[0x10];
+ u8 fw_rev_subminor[0x10];
+
+ u8 reserved_0[0x40];
+
+ u8 cmdq_phy_addr_63_32[0x20];
+
+ u8 cmdq_phy_addr_31_12[0x14];
+ u8 reserved_1[0x2];
+ u8 nic_interface[0x2];
+ u8 log_cmdq_size[0x4];
+ u8 log_cmdq_stride[0x4];
+
+ u8 command_doorbell_vector[0x20];
+
+ u8 reserved_2[0xf00];
+
+ u8 initializing[0x1];
+ u8 reserved_3[0x4];
+ u8 nic_interface_supported[0x3];
+ u8 reserved_4[0x18];
+
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+
+ u8 no_dram_nic_offset[0x20];
+
+ u8 reserved_5[0x6de0];
+
+ u8 internal_timer_h[0x20];
+
+ u8 internal_timer_l[0x20];
+
+ u8 reserved_6[0x20];
+
+ u8 reserved_7[0x1f];
+ u8 clear_int[0x1];
+
+ u8 health_syndrome[0x8];
+ u8 health_counter[0x18];
+
+ u8 reserved_8[0x17fc0];
+};
+
+union mlx5_ifc_icmd_interface_document_bits {
+ struct mlx5_ifc_fw_version_bits fw_version;
+ struct mlx5_ifc_icmd_access_reg_in_bits icmd_access_reg_in;
+ struct mlx5_ifc_icmd_access_reg_out_bits icmd_access_reg_out;
+ struct mlx5_ifc_icmd_init_ocsd_in_bits icmd_init_ocsd_in;
+ struct mlx5_ifc_icmd_ocbb_init_in_bits icmd_ocbb_init_in;
+ struct mlx5_ifc_icmd_ocbb_query_etoc_stats_out_bits icmd_ocbb_query_etoc_stats_out;
+ struct mlx5_ifc_icmd_ocbb_query_header_stats_out_bits icmd_ocbb_query_header_stats_out;
+ struct mlx5_ifc_icmd_query_cap_general_bits icmd_query_cap_general;
+ struct mlx5_ifc_icmd_query_cap_in_bits icmd_query_cap_in;
+ struct mlx5_ifc_icmd_query_fw_info_out_bits icmd_query_fw_info_out;
+ struct mlx5_ifc_icmd_query_virtual_mac_out_bits icmd_query_virtual_mac_out;
+ struct mlx5_ifc_icmd_set_virtual_mac_in_bits icmd_set_virtual_mac_in;
+ struct mlx5_ifc_icmd_set_wol_rol_in_bits icmd_set_wol_rol_in;
+ struct mlx5_ifc_icmd_set_wol_rol_out_bits icmd_set_wol_rol_out;
+ u8 reserved_0[0x42c0];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ struct mlx5_ifc_infiniband_port_cntrs_bits infiniband_port_cntrs;
+ u8 reserved_0[0x7c0];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+ u8 swid[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 reserved_0[0x8];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_1[0x1c];
+ u8 prio_tc[0x3];
+
+ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_pcie_performance_counters_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 rx_errors[0x20];
+
+ u8 tx_errors[0x20];
+
+ u8 l0_to_recovery_eieos[0x20];
+
+ u8 l0_to_recovery_ts[0x20];
+
+ u8 l0_to_recovery_framing[0x20];
+
+ u8 l0_to_recovery_retrain[0x20];
+
+ u8 crc_error_dllp[0x20];
+
+ u8 crc_error_tlp[0x20];
+
+ u8 reserved_0[0x680];
+};
+
+struct mlx5_ifc_pcie_timers_and_states_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 time_to_boot_image_start[0x20];
+
+ u8 time_to_link_image[0x20];
+
+ u8 calibration_time[0x20];
+
+ u8 time_to_first_perst[0x20];
+
+ u8 time_to_detect_state[0x20];
+
+ u8 time_to_l0[0x20];
+
+ u8 time_to_crs_en[0x20];
+
+ u8 time_to_plastic_image_start[0x20];
+
+ u8 time_to_iron_image_start[0x20];
+
+ u8 perst_handler[0x20];
+
+ u8 times_in_l1[0x20];
+
+ u8 times_in_l23[0x20];
+
+ u8 dl_down[0x20];
+
+ u8 config_cycle1usec[0x20];
+
+ u8 config_cycle2to7usec[0x20];
+
+ u8 config_cycle8to15usec[0x20];
+
+ u8 config_cycle16to63usec[0x20];
+
+ u8 config_cycle64usec[0x20];
+
+ u8 correctable_err_msg_sent[0x20];
+
+ u8 non_fatal_err_msg_sent[0x20];
+
+ u8 fatal_err_msg_sent[0x20];
+
+ u8 reserved_0[0x4e0];
+};
+
+struct mlx5_ifc_pcie_lanes_counters_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 error_counter_lane0[0x20];
+
+ u8 error_counter_lane1[0x20];
+
+ u8 error_counter_lane2[0x20];
+
+ u8 error_counter_lane3[0x20];
+
+ u8 error_counter_lane4[0x20];
+
+ u8 error_counter_lane5[0x20];
+
+ u8 error_counter_lane6[0x20];
+
+ u8 error_counter_lane7[0x20];
+
+ u8 error_counter_lane8[0x20];
+
+ u8 error_counter_lane9[0x20];
+
+ u8 error_counter_lane10[0x20];
+
+ u8 error_counter_lane11[0x20];
+
+ u8 error_counter_lane12[0x20];
+
+ u8 error_counter_lane13[0x20];
+
+ u8 error_counter_lane14[0x20];
+
+ u8 error_counter_lane15[0x20];
+
+ u8 reserved_0[0x580];
+};
+
+union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits {
+ struct mlx5_ifc_pcie_performance_counters_data_layout_bits pcie_performance_counters_data_layout;
+ struct mlx5_ifc_pcie_timers_and_states_data_layout_bits pcie_timers_and_states_data_layout;
+ struct mlx5_ifc_pcie_lanes_counters_data_layout_bits pcie_lanes_counters_data_layout;
+ u8 reserved_0[0xf8];
+};
+
+struct mlx5_ifc_mpcnt_reg_bits {
+ u8 reserved_0[0x8];
+ u8 pcie_index[0x8];
+ u8 reserved_1[0xa];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_2[0x1f];
+
+ union mlx5_ifc_mpcnt_cntrs_grp_data_layout_bits counter_set;
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+ struct mlx5_ifc_ib_portcntrs_attribute_grp_data_bits ib_portcntrs_attribute_grp_data;
+ struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+ struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_discard_cntrs_grp_bits eth_discard_cntrs_grp;
+ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+ struct mlx5_ifc_eth_per_traffic_class_cong_layout_bits eth_per_traffic_class_cong_layout;
+ struct mlx5_ifc_eth_per_traffic_class_layout_bits eth_per_traffic_class_layout;
+ struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+ struct mlx5_ifc_link_level_retrans_cntr_grp_date_bits link_level_retrans_cntr_grp_date;
+ struct mlx5_ifc_pamp_reg_bits pamp_reg;
+ struct mlx5_ifc_paos_reg_bits paos_reg;
+ struct mlx5_ifc_pbmc_reg_bits pbmc_reg;
+ struct mlx5_ifc_pcap_reg_bits pcap_reg;
+ struct mlx5_ifc_peir_reg_bits peir_reg;
+ struct mlx5_ifc_pelc_reg_bits pelc_reg;
+ struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+ struct mlx5_ifc_phbr_binding_reg_bits phbr_binding_reg;
+ struct mlx5_ifc_phbr_for_port_tclass_reg_bits phbr_for_port_tclass_reg;
+ struct mlx5_ifc_phbr_for_prio_reg_bits phbr_for_prio_reg;
+ struct mlx5_ifc_phrr_reg_bits phrr_reg;
+ struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_pifr_reg_bits pifr_reg;
+ struct mlx5_ifc_pipg_reg_bits pipg_reg;
+ struct mlx5_ifc_plbf_reg_bits plbf_reg;
+ struct mlx5_ifc_plib_reg_bits plib_reg;
+ struct mlx5_ifc_pll_status_data_bits pll_status_data;
+ struct mlx5_ifc_plpc_reg_bits plpc_reg;
+ struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+ struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+ struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+ struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+ struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+ struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+ struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+ struct mlx5_ifc_ppad_reg_bits ppad_reg;
+ struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_ppll_reg_bits ppll_reg;
+ struct mlx5_ifc_pplm_reg_bits pplm_reg;
+ struct mlx5_ifc_pplr_reg_bits pplr_reg;
+ struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+ struct mlx5_ifc_pspa_reg_bits pspa_reg;
+ struct mlx5_ifc_ptas_reg_bits ptas_reg;
+ struct mlx5_ifc_ptys_reg_bits ptys_reg;
+ struct mlx5_ifc_pude_reg_bits pude_reg;
+ struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+ struct mlx5_ifc_slrg_reg_bits slrg_reg;
+ struct mlx5_ifc_slrp_reg_bits slrp_reg;
+ struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ u8 reserved_0[0x7880];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+ struct mlx5_ifc_health_buffer_bits health_buffer;
+ u8 reserved_0[0x200];
+};
+
+union mlx5_ifc_no_dram_nic_document_bits {
+ struct mlx5_ifc_nodnic_config_reg_bits nodnic_config_reg;
+ struct mlx5_ifc_nodnic_cq_arming_word_bits nodnic_cq_arming_word;
+ struct mlx5_ifc_nodnic_event_word_bits nodnic_event_word;
+ struct mlx5_ifc_nodnic_gid_filters_bits nodnic_gid_filters;
+ struct mlx5_ifc_nodnic_mac_filters_bits nodnic_mac_filters;
+ struct mlx5_ifc_nodnic_port_config_reg_bits nodnic_port_config_reg;
+ struct mlx5_ifc_nodnic_ring_config_reg_bits nodnic_ring_config_reg;
+ struct mlx5_ifc_nodnic_ring_doorbell_bits nodnic_ring_doorbell;
+ u8 reserved_0[0x3160];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+ struct mlx5_ifc_initial_seg_bits initial_seg;
+ struct mlx5_ifc_vendor_specific_cap_bits vendor_specific_cap;
+ u8 reserved_0[0x20120];
+};
+
+
+#endif /* MLX5_IFC_H */
Property changes on: trunk/sys/dev/mlx5/mlx5_ifc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/mlx5_rdma_if.h
===================================================================
--- trunk/sys/dev/mlx5/mlx5_rdma_if.h (rev 0)
+++ trunk/sys/dev/mlx5/mlx5_rdma_if.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,32 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_rdma_if.h 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#ifndef MLX5_RDMA_IF_H
+#define MLX5_RDMA_IF_H
+
+#endif /* MLX5_RDMA_IF_H */
Property changes on: trunk/sys/dev/mlx5/mlx5_rdma_if.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/qp.h
===================================================================
--- trunk/sys/dev/mlx5/qp.h (rev 0)
+++ trunk/sys/dev/mlx5/qp.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,737 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/qp.h 308684 2016-11-15 08:58:51Z hselasky $
+ */
+
+#ifndef MLX5_QP_H
+#define MLX5_QP_H
+
+#include <dev/mlx5/device.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_ifc.h>
+
+#define MLX5_INVALID_LKEY 0x100
+#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
+#define MLX5_DIF_SIZE 8
+#define MLX5_STRIDE_BLOCK_OP 0x400
+#define MLX5_CPY_GRD_MASK 0xc0
+#define MLX5_CPY_APP_MASK 0x30
+#define MLX5_CPY_REF_MASK 0x0f
+#define MLX5_BSF_INC_REFTAG (1 << 6)
+#define MLX5_BSF_INL_VALID (1 << 15)
+#define MLX5_BSF_REFRESH_DIF (1 << 14)
+#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
+#define MLX5_BSF_APPTAG_ESCAPE 0x1
+#define MLX5_BSF_APPREF_ESCAPE 0x2
+
+enum mlx5_qp_optpar {
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
+ MLX5_QP_OPTPAR_RRE = 1 << 1,
+ MLX5_QP_OPTPAR_RAE = 1 << 2,
+ MLX5_QP_OPTPAR_RWE = 1 << 3,
+ MLX5_QP_OPTPAR_PKEY_INDEX = 1 << 4,
+ MLX5_QP_OPTPAR_Q_KEY = 1 << 5,
+ MLX5_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
+ MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
+ MLX5_QP_OPTPAR_SRA_MAX = 1 << 8,
+ MLX5_QP_OPTPAR_RRA_MAX = 1 << 9,
+ MLX5_QP_OPTPAR_PM_STATE = 1 << 10,
+ MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12,
+ MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13,
+ MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
+ MLX5_QP_OPTPAR_PRI_PORT = 1 << 16,
+ MLX5_QP_OPTPAR_SRQN = 1 << 18,
+ MLX5_QP_OPTPAR_CQN_RCV = 1 << 19,
+ MLX5_QP_OPTPAR_DC_HS = 1 << 20,
+ MLX5_QP_OPTPAR_DC_KEY = 1 << 21,
+};
+
+enum mlx5_qp_state {
+ MLX5_QP_STATE_RST = 0,
+ MLX5_QP_STATE_INIT = 1,
+ MLX5_QP_STATE_RTR = 2,
+ MLX5_QP_STATE_RTS = 3,
+ MLX5_QP_STATE_SQER = 4,
+ MLX5_QP_STATE_SQD = 5,
+ MLX5_QP_STATE_ERR = 6,
+ MLX5_QP_STATE_SQ_DRAINING = 7,
+ MLX5_QP_STATE_SUSPENDED = 9,
+ MLX5_QP_NUM_STATE
+};
+
+enum {
+ MLX5_QP_ST_RC = 0x0,
+ MLX5_QP_ST_UC = 0x1,
+ MLX5_QP_ST_UD = 0x2,
+ MLX5_QP_ST_XRC = 0x3,
+ MLX5_QP_ST_MLX = 0x4,
+ MLX5_QP_ST_DCI = 0x5,
+ MLX5_QP_ST_DCT = 0x6,
+ MLX5_QP_ST_QP0 = 0x7,
+ MLX5_QP_ST_QP1 = 0x8,
+ MLX5_QP_ST_RAW_ETHERTYPE = 0x9,
+ MLX5_QP_ST_RAW_IPV6 = 0xa,
+ MLX5_QP_ST_SNIFFER = 0xb,
+ MLX5_QP_ST_SYNC_UMR = 0xe,
+ MLX5_QP_ST_PTP_1588 = 0xd,
+ MLX5_QP_ST_REG_UMR = 0xc,
+ MLX5_QP_ST_SW_CNAK = 0x10,
+ MLX5_QP_ST_MAX
+};
+
+enum {
+ MLX5_NON_ZERO_RQ = 0 << 24,
+ MLX5_SRQ_RQ = 1 << 24,
+ MLX5_CRQ_RQ = 2 << 24,
+ MLX5_ZERO_LEN_RQ = 3 << 24
+};
+
+enum {
+ /* params1 */
+ MLX5_QP_BIT_SRE = 1 << 15,
+ MLX5_QP_BIT_SWE = 1 << 14,
+ MLX5_QP_BIT_SAE = 1 << 13,
+ /* params2 */
+ MLX5_QP_BIT_RRE = 1 << 15,
+ MLX5_QP_BIT_RWE = 1 << 14,
+ MLX5_QP_BIT_RAE = 1 << 13,
+ MLX5_QP_BIT_RIC = 1 << 4,
+ MLX5_QP_BIT_COLL_SYNC_RQ = 1 << 2,
+ MLX5_QP_BIT_COLL_SYNC_SQ = 1 << 1,
+ MLX5_QP_BIT_COLL_MASTER = 1 << 0
+};
+
+enum {
+ MLX5_DCT_BIT_RRE = 1 << 19,
+ MLX5_DCT_BIT_RWE = 1 << 18,
+ MLX5_DCT_BIT_RAE = 1 << 17,
+};
+
+enum {
+ MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2,
+ MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2,
+ MLX5_WQE_CTRL_SOLICITED = 1 << 1,
+};
+
+enum {
+ MLX5_SEND_WQE_DS = 16,
+ MLX5_SEND_WQE_BB = 64,
+};
+
+#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+ MLX5_SEND_WQE_MAX_WQEBBS = 16,
+};
+
+enum {
+ MLX5_WQE_FMR_PERM_LOCAL_READ = 1 << 27,
+ MLX5_WQE_FMR_PERM_LOCAL_WRITE = 1 << 28,
+ MLX5_WQE_FMR_PERM_REMOTE_READ = 1 << 29,
+ MLX5_WQE_FMR_PERM_REMOTE_WRITE = 1 << 30,
+ MLX5_WQE_FMR_PERM_ATOMIC = 1 << 31
+};
+
+enum {
+ MLX5_FENCE_MODE_NONE = 0 << 5,
+ MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5,
+ MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5,
+ MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5,
+};
+
+enum {
+ MLX5_QP_DRAIN_SIGERR = 1 << 26,
+ MLX5_QP_LAT_SENSITIVE = 1 << 28,
+ MLX5_QP_BLOCK_MCAST = 1 << 30,
+ MLX5_QP_ENABLE_SIG = 1 << 31,
+};
+
+enum {
+ MLX5_RCV_DBR = 0,
+ MLX5_SND_DBR = 1,
+};
+
+enum {
+ MLX5_FLAGS_INLINE = 1<<7,
+ MLX5_FLAGS_CHECK_FREE = 1<<5,
+};
+
+struct mlx5_wqe_fmr_seg {
+ __be32 flags;
+ __be32 mem_key;
+ __be64 buf_list;
+ __be64 start_addr;
+ __be64 reg_len;
+ __be32 offset;
+ __be32 page_size;
+ u32 reserved[2];
+};
+
+struct mlx5_wqe_ctrl_seg {
+ __be32 opmod_idx_opcode;
+ __be32 qpn_ds;
+ u8 signature;
+ u8 rsvd[2];
+ u8 fm_ce_se;
+ __be32 imm;
+};
+
+enum {
+ MLX5_MLX_FLAG_MASK_VL15 = 0x40,
+ MLX5_MLX_FLAG_MASK_SLR = 0x20,
+ MLX5_MLX_FLAG_MASK_ICRC = 0x8,
+ MLX5_MLX_FLAG_MASK_FL = 4
+};
+
+struct mlx5_mlx_seg {
+ __be32 rsvd0;
+ u8 flags;
+ u8 stat_rate_sl;
+ u8 rsvd1[8];
+ __be16 dlid;
+};
+
+enum {
+ MLX5_ETH_WQE_L3_INNER_CSUM = 1 << 4,
+ MLX5_ETH_WQE_L4_INNER_CSUM = 1 << 5,
+ MLX5_ETH_WQE_L3_CSUM = 1 << 6,
+ MLX5_ETH_WQE_L4_CSUM = 1 << 7,
+};
+
+enum {
+ MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 0,
+ MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 1,
+ MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 4,
+ MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 5,
+};
+
+struct mlx5_wqe_eth_seg {
+ u8 swp_outer_l4_offset;
+ u8 swp_outer_l3_offset;
+ u8 swp_inner_l4_offset;
+ u8 swp_inner_l3_offset;
+ u8 cs_flags;
+ u8 swp_flags;
+ __be16 mss;
+ __be32 rsvd2;
+ __be16 inline_hdr_sz;
+ u8 inline_hdr_start[2];
+};
+
+struct mlx5_wqe_xrc_seg {
+ __be32 xrc_srqn;
+ u8 rsvd[12];
+};
+
+struct mlx5_wqe_masked_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+ __be64 swap_add_mask;
+ __be64 compare_mask;
+};
+
+struct mlx5_av {
+ union {
+ struct {
+ __be32 qkey;
+ __be32 reserved;
+ } qkey;
+ __be64 dc_key;
+ } key;
+ __be32 dqp_dct;
+ u8 stat_rate_sl;
+ u8 fl_mlid;
+ union {
+ __be16 rlid;
+ __be16 udp_sport;
+ };
+ u8 reserved0[4];
+ u8 rmac[6];
+ u8 tclass;
+ u8 hop_limit;
+ __be32 grh_gid_fl;
+ u8 rgid[16];
+};
+
+struct mlx5_wqe_datagram_seg {
+ struct mlx5_av av;
+};
+
+struct mlx5_wqe_raddr_seg {
+ __be64 raddr;
+ __be32 rkey;
+ u32 reserved;
+};
+
+struct mlx5_wqe_atomic_seg {
+ __be64 swap_add;
+ __be64 compare;
+};
+
+struct mlx5_wqe_data_seg {
+ __be32 byte_count;
+ __be32 lkey;
+ __be64 addr;
+};
+
+struct mlx5_wqe_umr_ctrl_seg {
+ u8 flags;
+ u8 rsvd0[3];
+ __be16 klm_octowords;
+ __be16 bsf_octowords;
+ __be64 mkey_mask;
+ u8 rsvd1[32];
+};
+
+struct mlx5_seg_set_psv {
+ __be32 psv_num;
+ __be16 syndrome;
+ __be16 status;
+ __be32 transient_sig;
+ __be32 ref_tag;
+};
+
+struct mlx5_seg_get_psv {
+ u8 rsvd[19];
+ u8 num_psv;
+ __be32 l_key;
+ __be64 va;
+ __be32 psv_index[4];
+};
+
+struct mlx5_seg_check_psv {
+ u8 rsvd0[2];
+ __be16 err_coalescing_op;
+ u8 rsvd1[2];
+ __be16 xport_err_op;
+ u8 rsvd2[2];
+ __be16 xport_err_mask;
+ u8 rsvd3[7];
+ u8 num_psv;
+ __be32 l_key;
+ __be64 va;
+ __be32 psv_index[4];
+};
+
+struct mlx5_rwqe_sig {
+ u8 rsvd0[4];
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+struct mlx5_wqe_signature_seg {
+ u8 rsvd0[4];
+ u8 signature;
+ u8 rsvd1[11];
+};
+
+struct mlx5_wqe_inline_seg {
+ __be32 byte_count;
+};
+
+enum mlx5_sig_type {
+ MLX5_DIF_CRC = 0x1,
+ MLX5_DIF_IPCS = 0x2,
+};
+
+struct mlx5_bsf_inl {
+ __be16 vld_refresh;
+ __be16 dif_apptag;
+ __be32 dif_reftag;
+ u8 sig_type;
+ u8 rp_inv_seed;
+ u8 rsvd[3];
+ u8 dif_inc_ref_guard_check;
+ __be16 dif_app_bitmask_check;
+};
+
+struct mlx5_bsf {
+ struct mlx5_bsf_basic {
+ u8 bsf_size_sbs;
+ u8 check_byte_mask;
+ union {
+ u8 copy_byte_mask;
+ u8 bs_selector;
+ u8 rsvd_wflags;
+ } wire;
+ union {
+ u8 bs_selector;
+ u8 rsvd_mflags;
+ } mem;
+ __be32 raw_data_size;
+ __be32 w_bfs_psv;
+ __be32 m_bfs_psv;
+ } basic;
+ struct mlx5_bsf_ext {
+ __be32 t_init_gen_pro_size;
+ __be32 rsvd_epi_size;
+ __be32 w_tfs_psv;
+ __be32 m_tfs_psv;
+ } ext;
+ struct mlx5_bsf_inl w_inl;
+ struct mlx5_bsf_inl m_inl;
+};
+
+struct mlx5_klm {
+ __be32 bcount;
+ __be32 key;
+ __be64 va;
+};
+
+struct mlx5_stride_block_entry {
+ __be16 stride;
+ __be16 bcount;
+ __be32 key;
+ __be64 va;
+};
+
+struct mlx5_stride_block_ctrl_seg {
+ __be32 bcount_per_cycle;
+ __be32 op;
+ __be32 repeat_count;
+ u16 rsvd;
+ __be16 num_entries;
+};
+
+struct mlx5_core_qp {
+ struct mlx5_core_rsc_common common; /* must be first */
+ void (*event) (struct mlx5_core_qp *, int);
+ int qpn;
+ struct mlx5_rsc_debug *dbg;
+ int pid;
+};
+
+struct mlx5_qp_path {
+ u8 fl_free_ar;
+ u8 rsvd3;
+ __be16 pkey_index;
+ u8 rsvd0;
+ u8 grh_mlid;
+ __be16 rlid;
+ u8 ackto_lt;
+ u8 mgid_index;
+ u8 static_rate;
+ u8 hop_limit;
+ __be32 tclass_flowlabel;
+ union {
+ u8 rgid[16];
+ u8 rip[16];
+ };
+ u8 f_dscp_ecn_prio;
+ u8 ecn_dscp;
+ __be16 udp_sport;
+ u8 dci_cfi_prio_sl;
+ u8 port;
+ u8 rmac[6];
+};
+
+struct mlx5_qp_context {
+ __be32 flags;
+ __be32 flags_pd;
+ u8 mtu_msgmax;
+ u8 rq_size_stride;
+ __be16 sq_crq_size;
+ __be32 qp_counter_set_usr_page;
+ __be32 wire_qpn;
+ __be32 log_pg_sz_remote_qpn;
+ struct mlx5_qp_path pri_path;
+ struct mlx5_qp_path alt_path;
+ __be32 params1;
+ u8 reserved2[4];
+ __be32 next_send_psn;
+ __be32 cqn_send;
+ u8 reserved3[8];
+ __be32 last_acked_psn;
+ __be32 ssn;
+ __be32 params2;
+ __be32 rnr_nextrecvpsn;
+ __be32 xrcd;
+ __be32 cqn_recv;
+ __be64 db_rec_addr;
+ __be32 qkey;
+ __be32 rq_type_srqn;
+ __be32 rmsn;
+ __be16 hw_sq_wqe_counter;
+ __be16 sw_sq_wqe_counter;
+ __be16 hw_rcyclic_byte_counter;
+ __be16 hw_rq_counter;
+ __be16 sw_rcyclic_byte_counter;
+ __be16 sw_rq_counter;
+ u8 rsvd0[5];
+ u8 cgs;
+ u8 cs_req;
+ u8 cs_res;
+ __be64 dc_access_key;
+ u8 rsvd1[24];
+};
+
+struct mlx5_create_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 input_qpn;
+ u8 rsvd0[4];
+ __be32 opt_param_mask;
+ u8 rsvd1[4];
+ struct mlx5_qp_context ctx;
+ u8 rsvd3[16];
+ __be64 pas[0];
+};
+
+struct mlx5_dct_context {
+ u8 state;
+ u8 rsvd0[7];
+ __be32 cqn;
+ __be32 flags;
+ u8 rsvd1;
+ u8 cs_res;
+ u8 min_rnr;
+ u8 rsvd2;
+ __be32 srqn;
+ __be32 pdn;
+ __be32 tclass_flow_label;
+ __be64 access_key;
+ u8 mtu;
+ u8 port;
+ __be16 pkey_index;
+ u8 rsvd4;
+ u8 mgid_index;
+ u8 rsvd5;
+ u8 hop_limit;
+ __be32 access_violations;
+ u8 rsvd[12];
+};
+
+struct mlx5_create_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_dct_context context;
+ u8 rsvd[48];
+};
+
+struct mlx5_create_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_drain_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_drain_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_create_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd0[4];
+};
+
+struct mlx5_destroy_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_modify_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd1[4];
+ __be32 optparam;
+ u8 rsvd0[4];
+ struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
+};
+
+struct mlx5_modify_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_query_qp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd[4];
+};
+
+struct mlx5_query_qp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd1[8];
+ __be32 optparam;
+ u8 rsvd0[4];
+ struct mlx5_qp_context ctx;
+ u8 rsvd2[16];
+ __be64 pas[0];
+};
+
+struct mlx5_query_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd[4];
+};
+
+struct mlx5_query_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+ struct mlx5_dct_context ctx;
+ u8 rsvd1[48];
+};
+
+struct mlx5_arm_dct_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 dctn;
+ u8 rsvd[4];
+};
+
+struct mlx5_arm_dct_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd0[8];
+};
+
+struct mlx5_conf_sqp_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ __be32 qpn;
+ u8 rsvd[3];
+ u8 type;
+};
+
+struct mlx5_conf_sqp_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
+{
+ return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
+}
+
+static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
+{
+ return radix_tree_lookup(&dev->priv.mr_table.tree, key);
+}
+
+int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp,
+ struct mlx5_create_qp_mbox_in *in,
+ int inlen);
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
+ struct mlx5_modify_qp_mbox_in *in, int sqd_event,
+ struct mlx5_core_qp *qp);
+int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *qp);
+int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+ struct mlx5_query_qp_mbox_out *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
+ struct mlx5_query_dct_mbox_out *out);
+int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
+
+int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
+int mlx5_core_create_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct,
+ struct mlx5_create_dct_mbox_in *in);
+int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
+ struct mlx5_core_dct *dct);
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *sq);
+void mlx5_init_qp_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
+int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+
+static inline const char *mlx5_qp_type_str(int type)
+{
+ switch (type) {
+ case MLX5_QP_ST_RC: return "RC";
+ case MLX5_QP_ST_UC: return "C";
+ case MLX5_QP_ST_UD: return "UD";
+ case MLX5_QP_ST_XRC: return "XRC";
+ case MLX5_QP_ST_MLX: return "MLX";
+ case MLX5_QP_ST_DCI: return "DCI";
+ case MLX5_QP_ST_QP0: return "QP0";
+ case MLX5_QP_ST_QP1: return "QP1";
+ case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
+ case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
+ case MLX5_QP_ST_SNIFFER: return "SNIFFER";
+ case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
+ case MLX5_QP_ST_PTP_1588: return "PTP_1588";
+ case MLX5_QP_ST_REG_UMR: return "REG_UMR";
+ case MLX5_QP_ST_SW_CNAK: return "DC_CNAK";
+ default: return "Invalid transport type";
+ }
+}
+
+static inline const char *mlx5_qp_state_str(int state)
+{
+ switch (state) {
+ case MLX5_QP_STATE_RST:
+ return "RST";
+ case MLX5_QP_STATE_INIT:
+ return "INIT";
+ case MLX5_QP_STATE_RTR:
+ return "RTR";
+ case MLX5_QP_STATE_RTS:
+ return "RTS";
+ case MLX5_QP_STATE_SQER:
+ return "SQER";
+ case MLX5_QP_STATE_SQD:
+ return "SQD";
+ case MLX5_QP_STATE_ERR:
+ return "ERR";
+ case MLX5_QP_STATE_SQ_DRAINING:
+ return "SQ_DRAINING";
+ case MLX5_QP_STATE_SUSPENDED:
+ return "SUSPENDED";
+ default: return "Invalid QP state";
+ }
+}
+
+#endif /* MLX5_QP_H */
Property changes on: trunk/sys/dev/mlx5/qp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/srq.h
===================================================================
--- trunk/sys/dev/mlx5/srq.h (rev 0)
+++ trunk/sys/dev/mlx5/srq.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,37 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/srq.h 290650 2015-11-10 12:20:22Z hselasky $
+ */
+
+#ifndef MLX5_SRQ_H
+#define MLX5_SRQ_H
+
+#include <dev/mlx5/driver.h>
+
+void mlx5_init_srq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
+
+#endif /* MLX5_SRQ_H */
Property changes on: trunk/sys/dev/mlx5/srq.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/mlx5/vport.h
===================================================================
--- trunk/sys/dev/mlx5/vport.h (rev 0)
+++ trunk/sys/dev/mlx5/vport.h 2018-05-27 23:36:47 UTC (rev 10097)
@@ -0,0 +1,134 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: stable/10/sys/dev/mlx5/vport.h 306244 2016-09-23 08:28:44Z hselasky $
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <dev/mlx5/driver.h>
+int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
+ u16 *counter_set_id);
+int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev, int client_id,
+ u16 counter_set_id);
+int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
+ int reset,
+ void *out,
+ int out_size);
+int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
+ u16 counter_set_id,
+ u32 *out_of_rx_buffer);
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport);
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+ u16 vport, u8 state);
+
+int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu);
+int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu);
+int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header);
+int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev, u8 vport,
+ int min_header);
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ u16 vport,
+ int *promisc_uc,
+ int *promisc_mc,
+ int *promisc_all);
+
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+ int promisc_uc,
+ int promisc_mc,
+ int promisc_all);
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+ u16 vport, u8 *addr);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+ u16 vport, u8 mac[ETH_ALEN]);
+int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
+ bool other_vport, u8 *addr);
+int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 port_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid);
+int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
+ u16 *vlan_list, int list_len);
+int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
+ u64 *addr_list, size_t addr_list_len);
+int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
+ bool promisc_mc, bool promisc_uc,
+ bool promisc_all);
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ u16 vport,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int *list_size);
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vport,
+ u16 vlans[],
+ int *size);
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+ u16 vlans[],
+ int list_size);
+int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable);
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+ enum mlx5_list_type list_type,
+ u8 addr_list[][ETH_ALEN],
+ int list_size);
+int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
+ u8 *addr);
+int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
+int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ u64 *system_image_guid);
+int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
+ u64 *sys_image_guid);
+int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid);
+int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid);
+int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+ u16 *qkey_viol_cntr);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ u64 *system_image_guid);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
+ u8 port_num, u8 vport_num, u32 *out,
+ int outlen);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+ u8 port_num, u16 vf_num, u16 pkey_index,
+ u16 *pkey);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
+ u16 vport_num, u16 gid_index, union ib_gid *gid);
+int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
+ u8 insert_mode, u8 strip_mode,
+ u16 vlan, u8 cfi, u8 pcp);
+int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
+ u8 port_num, u16 vport_num,
+ void *out, int out_size);
+int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
+ struct mlx5_vport_counters *vc);
+#endif /* __MLX5_VPORT_H__ */
Property changes on: trunk/sys/dev/mlx5/vport.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Modified: trunk/sys/dev/mn/if_mn.c
===================================================================
--- trunk/sys/dev/mn/if_mn.c 2018-05-27 23:36:35 UTC (rev 10096)
+++ trunk/sys/dev/mn/if_mn.c 2018-05-27 23:36:47 UTC (rev 10097)
@@ -1,3 +1,4 @@
+/* $MidnightBSD$ */
/*-
* ----------------------------------------------------------------------------
* "THE BEER-WARE LICENSE" (Revision 42):
@@ -23,7 +24,7 @@
*/
#include <sys/cdefs.h>
-__MBSDID("$MidnightBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/mn/if_mn.c 254263 2013-08-12 23:30:01Z scottl $");
/*
* Stuff to describe the MUNIC32X and FALC54 chips.
@@ -1254,24 +1255,6 @@
sc->m32x->stat = stat;
}
-static void
-mn_timeout(void *xsc)
-{
- static int round = 0;
- struct mn_softc *sc;
-
- mn_intr(xsc);
- sc = xsc;
- timeout(mn_timeout, xsc, 10 * hz);
- round++;
- if (round == 2) {
- sc->m32_mem.ccb = 0x00008004;
- sc->m32x->cmd = 0x1;
- } else if (round > 2) {
- printf("%s: timeout\n", sc->name);
- }
-}
-
/*
* PCI initialization stuff
*/
@@ -1364,9 +1347,9 @@
return(ENXIO);
}
- u = pci_read_config(self, PCIR_COMMAND, 1);
+ u = pci_read_config(self, PCIR_COMMAND, 2);
printf("%x\n", u);
- pci_write_config(self, PCIR_COMMAND, u | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN, 1);
+ pci_write_config(self, PCIR_COMMAND, u | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN, 2);
#if 0
pci_write_config(self, PCIR_COMMAND, 0x02800046, 4);
#endif
@@ -1436,7 +1419,7 @@
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- {0, 0}
+ DEVMETHOD_END
};
static driver_t mn_driver = {
More information about the Midnightbsd-cvs
mailing list