[Midnightbsd-cvs] src [10086] trunk/sys/dev/nvme: sync nvram
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Sun May 27 19:27:49 EDT 2018
Revision: 10086
http://svnweb.midnightbsd.org/src/?rev=10086
Author: laffer1
Date: 2018-05-27 19:27:48 -0400 (Sun, 27 May 2018)
Log Message:
-----------
sync nvram
Modified Paths:
--------------
trunk/sys/dev/nvme/nvme.c
trunk/sys/dev/nvme/nvme.h
trunk/sys/dev/nvme/nvme_ctrlr.c
trunk/sys/dev/nvme/nvme_ctrlr_cmd.c
trunk/sys/dev/nvme/nvme_ns.c
trunk/sys/dev/nvme/nvme_ns_cmd.c
trunk/sys/dev/nvme/nvme_private.h
trunk/sys/dev/nvme/nvme_qpair.c
trunk/sys/dev/nvme/nvme_sysctl.c
trunk/sys/dev/nvme/nvme_test.c
trunk/sys/dev/nvme/nvme_util.c
Modified: trunk/sys/dev/nvme/nvme.c
===================================================================
--- trunk/sys/dev/nvme/nvme.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme.c 265565 2014-05-07 16:47:58Z jimharris $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme.c 293671 2016-01-11 17:31:18Z jimharris $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -82,27 +82,54 @@
static struct _pcsid
{
- u_int32_t type;
- const char *desc;
+ uint32_t devid;
+ int match_subdevice;
+ uint16_t subdevice;
+ const char *desc;
} pci_ids[] = {
- { 0x01118086, "NVMe Controller" },
- { CHATHAM_PCI_ID, "Chatham Prototype NVMe Controller" },
- { IDT32_PCI_ID, "IDT NVMe Controller (32 channel)" },
- { IDT8_PCI_ID, "IDT NVMe Controller (8 channel)" },
- { 0x00000000, NULL }
+ { 0x01118086, 0, 0, "NVMe Controller" },
+ { IDT32_PCI_ID, 0, 0, "IDT NVMe Controller (32 channel)" },
+ { IDT8_PCI_ID, 0, 0, "IDT NVMe Controller (8 channel)" },
+ { 0x09538086, 1, 0x3702, "DC P3700 SSD" },
+ { 0x09538086, 1, 0x3703, "DC P3700 SSD [2.5\" SFF]" },
+ { 0x09538086, 1, 0x3704, "DC P3500 SSD [Add-in Card]" },
+ { 0x09538086, 1, 0x3705, "DC P3500 SSD [2.5\" SFF]" },
+ { 0x09538086, 1, 0x3709, "DC P3600 SSD [Add-in Card]" },
+ { 0x09538086, 1, 0x370a, "DC P3600 SSD [2.5\" SFF]" },
+ { 0x00000000, 0, 0, NULL }
};
static int
+nvme_match(uint32_t devid, uint16_t subdevice, struct _pcsid *ep)
+{
+ if (devid != ep->devid)
+ return 0;
+
+ if (!ep->match_subdevice)
+ return 1;
+
+ if (subdevice == ep->subdevice)
+ return 1;
+ else
+ return 0;
+}
+
+static int
nvme_probe (device_t device)
{
struct _pcsid *ep;
- u_int32_t type;
+ uint32_t devid;
+ uint16_t subdevice;
- type = pci_get_devid(device);
+ devid = pci_get_devid(device);
+ subdevice = pci_get_subdevice(device);
ep = pci_ids;
- while (ep->type && ep->type != type)
+ while (ep->devid) {
+ if (nvme_match(devid, subdevice, ep))
+ break;
++ep;
+ }
if (ep->desc) {
device_set_desc(device, ep->desc);
@@ -244,8 +271,6 @@
return (status);
}
- nvme_sysctl_initialize_ctrlr(ctrlr);
-
pci_enable_busmaster(dev);
ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
@@ -364,6 +389,15 @@
struct nvme_consumer *cons;
uint32_t i;
+ /*
+ * This controller failed during initialization (i.e. IDENTIFY
+ * command failed or timed out). Do not notify any nvme
+ * consumers of the failure here, since the consumer does not
+ * even know about the controller yet.
+ */
+ if (!ctrlr->is_initialized)
+ return;
+
for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
cons = &nvme_consumer[i];
if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
Modified: trunk/sys/dev/nvme/nvme.h
===================================================================
--- trunk/sys/dev/nvme/nvme.h 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme.h 2018-05-27 23:27:48 UTC (rev 10086)
@@ -24,7 +24,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/9/sys/dev/nvme/nvme.h 263273 2014-03-17 21:42:31Z jimharris $
+ * $FreeBSD: stable/10/sys/dev/nvme/nvme.h 291214 2015-11-23 17:27:29Z jimharris $
*/
#ifndef __NVME_H__
@@ -871,6 +871,7 @@
const char * nvme_ns_get_model_number(struct nvme_namespace *ns);
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns);
+uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns);
int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
nvme_cb_fn_t cb_fn);
Modified: trunk/sys/dev/nvme/nvme_ctrlr.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ctrlr.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_ctrlr.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -1,6 +1,6 @@
/* $MidnightBSD$ */
/*-
- * Copyright (C) 2012-2014 Intel Corporation
+ * Copyright (C) 2012-2016 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_ctrlr.c 265566 2014-05-07 16:48:43Z jimharris $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ctrlr.c 296191 2016-02-29 15:45:43Z jimharris $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -45,16 +45,13 @@
static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
struct nvme_async_event_request *aer);
+static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr);
static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
- /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
- ctrlr->resource_id = PCIR_BAR(2);
- else
- ctrlr->resource_id = PCIR_BAR(0);
+ ctrlr->resource_id = PCIR_BAR(0);
ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
&ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
@@ -82,118 +79,7 @@
return (0);
}
-#ifdef CHATHAM2
-static int
-nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
-{
-
- ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
- ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
- SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
- RF_ACTIVE);
-
- if(ctrlr->chatham_resource == NULL) {
- nvme_printf(ctrlr, "unable to alloc pci resource\n");
- return (ENOMEM);
- }
-
- ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
- ctrlr->chatham_bus_handle =
- rman_get_bushandle(ctrlr->chatham_resource);
-
- return (0);
-}
-
static void
-nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
-{
- uint64_t reg1, reg2, reg3;
- uint64_t temp1, temp2;
- uint32_t temp3;
- uint32_t use_flash_timings = 0;
-
- DELAY(10000);
-
- temp3 = chatham_read_4(ctrlr, 0x8080);
-
- device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
-
- ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
- ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
-
- device_printf(ctrlr->dev, "Chatham size: %jd\n",
- (intmax_t)ctrlr->chatham_size);
-
- reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
-
- TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
- if (use_flash_timings) {
- device_printf(ctrlr->dev, "Chatham: using flash timings\n");
- temp1 = 0x00001b58000007d0LL;
- temp2 = 0x000000cb00000131LL;
- } else {
- device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
- temp1 = temp2 = 0x0LL;
- }
-
- chatham_write_8(ctrlr, 0x8000, reg1);
- chatham_write_8(ctrlr, 0x8008, reg2);
- chatham_write_8(ctrlr, 0x8010, reg3);
-
- chatham_write_8(ctrlr, 0x8020, temp1);
- temp3 = chatham_read_4(ctrlr, 0x8020);
-
- chatham_write_8(ctrlr, 0x8028, temp2);
- temp3 = chatham_read_4(ctrlr, 0x8028);
-
- chatham_write_8(ctrlr, 0x8030, temp1);
- chatham_write_8(ctrlr, 0x8038, temp2);
- chatham_write_8(ctrlr, 0x8040, temp1);
- chatham_write_8(ctrlr, 0x8048, temp2);
- chatham_write_8(ctrlr, 0x8050, temp1);
- chatham_write_8(ctrlr, 0x8058, temp2);
-
- DELAY(10000);
-}
-
-static void
-nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
-{
- struct nvme_controller_data *cdata;
-
- cdata = &ctrlr->cdata;
-
- cdata->vid = 0x8086;
- cdata->ssvid = 0x2011;
-
- /*
- * Chatham2 puts garbage data in these fields when we
- * invoke IDENTIFY_CONTROLLER, so we need to re-zero
- * the fields before calling bcopy().
- */
- memset(cdata->sn, 0, sizeof(cdata->sn));
- memcpy(cdata->sn, "2012", strlen("2012"));
- memset(cdata->mn, 0, sizeof(cdata->mn));
- memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
- memset(cdata->fr, 0, sizeof(cdata->fr));
- memcpy(cdata->fr, "0", strlen("0"));
- cdata->rab = 8;
- cdata->aerl = 3;
- cdata->lpa.ns_smart = 1;
- cdata->sqes.min = 6;
- cdata->sqes.max = 6;
- cdata->cqes.min = 4;
- cdata->cqes.max = 4;
- cdata->nn = 1;
-
- /* Chatham2 doesn't support DSM command */
- cdata->oncs.dsm = 0;
-
- cdata->vwc.present = 1;
-}
-#endif /* CHATHAM2 */
-
-static void
nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
{
struct nvme_qpair *qpair;
@@ -256,6 +142,13 @@
*/
num_trackers = min(num_trackers, (num_entries-1));
+ /*
+ * This was calculated previously when setting up interrupts, but
+ * a controller could theoretically support fewer I/O queues than
+ * MSI-X vectors. So calculate again here just to be safe.
+ */
+ ctrlr->num_cpus_per_ioq = howmany(mp_ncpus, ctrlr->num_io_queues);
+
ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
M_NVME, M_ZERO | M_WAITOK);
@@ -276,8 +169,13 @@
num_trackers,
ctrlr);
- if (ctrlr->per_cpu_io_queues)
- bus_bind_intr(ctrlr->dev, qpair->res, i);
+ /*
+ * Do not bother binding interrupts if we only have one I/O
+ * interrupt thread for this controller.
+ */
+ if (ctrlr->num_io_queues > 1)
+ bus_bind_intr(ctrlr->dev, qpair->res,
+ i * ctrlr->num_cpus_per_ioq);
}
return (0);
@@ -323,7 +221,7 @@
}
static int
-nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
+nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr, int desired_val)
{
int ms_waited;
union cc_register cc;
@@ -332,18 +230,19 @@
cc.raw = nvme_mmio_read_4(ctrlr, cc);
csts.raw = nvme_mmio_read_4(ctrlr, csts);
- if (!cc.bits.en) {
- nvme_printf(ctrlr, "%s called with cc.en = 0\n", __func__);
+ if (cc.bits.en != desired_val) {
+ nvme_printf(ctrlr, "%s called with desired_val = %d "
+ "but cc.en = %d\n", __func__, desired_val, cc.bits.en);
return (ENXIO);
}
ms_waited = 0;
- while (!csts.bits.rdy) {
+ while (csts.bits.rdy != desired_val) {
DELAY(1000);
if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
- nvme_printf(ctrlr, "controller did not become ready "
- "within %d ms\n", ctrlr->ready_timeout_in_ms);
+ nvme_printf(ctrlr, "controller ready did not become %d "
+ "within %d ms\n", desired_val, ctrlr->ready_timeout_in_ms);
return (ENXIO);
}
csts.raw = nvme_mmio_read_4(ctrlr, csts);
@@ -362,11 +261,12 @@
csts.raw = nvme_mmio_read_4(ctrlr, csts);
if (cc.bits.en == 1 && csts.bits.rdy == 0)
- nvme_ctrlr_wait_for_ready(ctrlr);
+ nvme_ctrlr_wait_for_ready(ctrlr, 1);
cc.bits.en = 0;
nvme_mmio_write_4(ctrlr, cc, cc.raw);
DELAY(5000);
+ nvme_ctrlr_wait_for_ready(ctrlr, 0);
}
static int
@@ -383,7 +283,7 @@
if (csts.bits.rdy == 1)
return (0);
else
- return (nvme_ctrlr_wait_for_ready(ctrlr));
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
}
nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
@@ -411,7 +311,7 @@
nvme_mmio_write_4(ctrlr, cc, cc.raw);
DELAY(5000);
- return (nvme_ctrlr_wait_for_ready(ctrlr));
+ return (nvme_ctrlr_wait_for_ready(ctrlr, 1));
}
int
@@ -420,8 +320,15 @@
int i;
nvme_admin_qpair_disable(&ctrlr->adminq);
- for (i = 0; i < ctrlr->num_io_queues; i++)
- nvme_io_qpair_disable(&ctrlr->ioq[i]);
+ /*
+ * I/O queues are not allocated before the initial HW
+ * reset, so do not try to disable them. Use is_initialized
+ * to determine if this is the initial HW reset.
+ */
+ if (ctrlr->is_initialized) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_disable(&ctrlr->ioq[i]);
+ }
DELAY(100*1000);
@@ -462,11 +369,6 @@
return (ENXIO);
}
-#ifdef CHATHAM2
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
- nvme_chatham_populate_cdata(ctrlr);
-#endif
-
/*
* Use MDTS to ensure our default max_xfer_size doesn't exceed what the
* controller supports.
@@ -482,7 +384,7 @@
nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
{
struct nvme_completion_poll_status status;
- int cq_allocated, i, sq_allocated;
+ int cq_allocated, sq_allocated;
status.done = FALSE;
nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
@@ -503,27 +405,13 @@
cq_allocated = (status.cpl.cdw0 >> 16) + 1;
/*
- * Check that the controller was able to allocate the number of
- * queues we requested. If not, revert to one IO queue pair.
+ * Controller may allocate more queues than we requested,
+ * so use the minimum of the number requested and what was
+ * actually allocated.
*/
- if (sq_allocated < ctrlr->num_io_queues ||
- cq_allocated < ctrlr->num_io_queues) {
+ ctrlr->num_io_queues = min(ctrlr->num_io_queues, sq_allocated);
+ ctrlr->num_io_queues = min(ctrlr->num_io_queues, cq_allocated);
- /*
- * Destroy extra IO queue pairs that were created at
- * controller construction time but are no longer
- * needed. This will only happen when a controller
- * supports fewer queues than MSI-X vectors. This
- * is not the normal case, but does occur with the
- * Chatham prototype board.
- */
- for (i = 1; i < ctrlr->num_io_queues; i++)
- nvme_io_qpair_destroy(&ctrlr->ioq[i]);
-
- ctrlr->num_io_queues = 1;
- ctrlr->per_cpu_io_queues = 0;
- }
-
return (0);
}
@@ -780,10 +668,6 @@
/* aerl is a zero-based value, so we need to add 1 here. */
ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
- /* Chatham doesn't support AERs. */
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
- ctrlr->num_aers = 0;
-
for (i = 0; i < ctrlr->num_aers; i++) {
aer = &ctrlr->aer[i];
nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
@@ -810,9 +694,20 @@
nvme_ctrlr_start(void *ctrlr_arg)
{
struct nvme_controller *ctrlr = ctrlr_arg;
+ uint32_t old_num_io_queues;
int i;
- nvme_qpair_reset(&ctrlr->adminq);
+ /*
+ * Only reset adminq here when we are restarting the
+ * controller after a reset. During initialization,
+ * we have already submitted admin commands to get
+ * the number of I/O queues supported, so cannot reset
+ * the adminq again here.
+ */
+ if (ctrlr->is_resetting) {
+ nvme_qpair_reset(&ctrlr->adminq);
+ }
+
for (i = 0; i < ctrlr->num_io_queues; i++)
nvme_qpair_reset(&ctrlr->ioq[i]);
@@ -823,9 +718,25 @@
return;
}
- if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
- nvme_ctrlr_fail(ctrlr);
- return;
+ /*
+ * The number of qpairs are determined during controller initialization,
+ * including using NVMe SET_FEATURES/NUMBER_OF_QUEUES to determine the
+ * HW limit. We call SET_FEATURES again here so that it gets called
+ * after any reset for controllers that depend on the driver to
+ * explicit specify how many queues it will use. This value should
+ * never change between resets, so panic if somehow that does happen.
+ */
+ if (ctrlr->is_resetting) {
+ old_num_io_queues = ctrlr->num_io_queues;
+ if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (old_num_io_queues != ctrlr->num_io_queues) {
+ panic("num_io_queues changed from %u to %u",
+ old_num_io_queues, ctrlr->num_io_queues);
+ }
}
if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
@@ -850,7 +761,16 @@
{
struct nvme_controller *ctrlr = arg;
- nvme_ctrlr_start(ctrlr);
+ nvme_qpair_reset(&ctrlr->adminq);
+ nvme_admin_qpair_enable(&ctrlr->adminq);
+
+ if (nvme_ctrlr_set_num_qpairs(ctrlr) == 0 &&
+ nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
+ nvme_ctrlr_start(ctrlr);
+ else
+ nvme_ctrlr_fail(ctrlr);
+
+ nvme_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
ctrlr->is_initialized = 1;
@@ -891,7 +811,7 @@
nvme_qpair_process_completions(&ctrlr->adminq);
- if (ctrlr->ioq[0].cpl)
+ if (ctrlr->ioq && ctrlr->ioq[0].cpl)
nvme_qpair_process_completions(&ctrlr->ioq[0]);
nvme_mmio_write_4(ctrlr, intmc, 1);
@@ -901,8 +821,9 @@
nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
{
+ ctrlr->msix_enabled = 0;
ctrlr->num_io_queues = 1;
- ctrlr->per_cpu_io_queues = 0;
+ ctrlr->num_cpus_per_ioq = mp_ncpus;
ctrlr->rid = 0;
ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
&ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
@@ -1035,27 +956,6 @@
break;
case NVME_PASSTHROUGH_CMD:
pt = (struct nvme_pt_command *)arg;
-#ifdef CHATHAM2
- /*
- * Chatham IDENTIFY data is spoofed, so copy the spoofed data
- * rather than issuing the command to the Chatham controller.
- */
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID &&
- pt->cmd.opc == NVME_OPC_IDENTIFY) {
- if (pt->cmd.cdw10 == 1) {
- if (pt->len != sizeof(ctrlr->cdata))
- return (EINVAL);
- return (copyout(&ctrlr->cdata, pt->buf,
- pt->len));
- } else {
- if (pt->len != sizeof(ctrlr->ns[0].data) ||
- pt->cmd.nsid != 1)
- return (EINVAL);
- return (copyout(&ctrlr->ns[0].data, pt->buf,
- pt->len));
- }
- }
-#endif
return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
1 /* is_user_buffer */, 1 /* is_admin_cmd */));
default:
@@ -1071,12 +971,93 @@
.d_ioctl = nvme_ctrlr_ioctl
};
+static void
+nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr)
+{
+ device_t dev;
+ int per_cpu_io_queues;
+ int min_cpus_per_ioq;
+ int num_vectors_requested, num_vectors_allocated;
+ int num_vectors_available;
+
+ dev = ctrlr->dev;
+ min_cpus_per_ioq = 1;
+ TUNABLE_INT_FETCH("hw.nvme.min_cpus_per_ioq", &min_cpus_per_ioq);
+
+ if (min_cpus_per_ioq < 1) {
+ min_cpus_per_ioq = 1;
+ } else if (min_cpus_per_ioq > mp_ncpus) {
+ min_cpus_per_ioq = mp_ncpus;
+ }
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
+
+ if (per_cpu_io_queues == 0) {
+ min_cpus_per_ioq = mp_ncpus;
+ }
+
+ ctrlr->force_intx = 0;
+ TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
+
+ /*
+ * FreeBSD currently cannot allocate more than about 190 vectors at
+ * boot, meaning that systems with high core count and many devices
+ * requesting per-CPU interrupt vectors will not get their full
+ * allotment. So first, try to allocate as many as we may need to
+ * understand what is available, then immediately release them.
+ * Then figure out how many of those we will actually use, based on
+ * assigning an equal number of cores to each I/O queue.
+ */
+
+ /* One vector for per core I/O queue, plus one vector for admin queue. */
+ num_vectors_available = min(pci_msix_count(dev), mp_ncpus + 1);
+ if (pci_alloc_msix(dev, &num_vectors_available) != 0) {
+ num_vectors_available = 0;
+ }
+ pci_release_msi(dev);
+
+ if (ctrlr->force_intx || num_vectors_available < 2) {
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ /*
+ * Do not use all vectors for I/O queues - one must be saved for the
+ * admin queue.
+ */
+ ctrlr->num_cpus_per_ioq = max(min_cpus_per_ioq,
+ howmany(mp_ncpus, num_vectors_available - 1));
+
+ ctrlr->num_io_queues = howmany(mp_ncpus, ctrlr->num_cpus_per_ioq);
+ num_vectors_requested = ctrlr->num_io_queues + 1;
+ num_vectors_allocated = num_vectors_requested;
+
+ /*
+ * Now just allocate the number of vectors we need. This should
+ * succeed, since we previously called pci_alloc_msix()
+ * successfully returning at least this many vectors, but just to
+ * be safe, if something goes wrong just revert to INTx.
+ */
+ if (pci_alloc_msix(dev, &num_vectors_allocated) != 0) {
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ if (num_vectors_allocated < num_vectors_requested) {
+ pci_release_msi(dev);
+ nvme_ctrlr_configure_intx(ctrlr);
+ return;
+ }
+
+ ctrlr->msix_enabled = 1;
+}
+
int
nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
{
union cap_lo_register cap_lo;
union cap_hi_register cap_hi;
- int i, num_vectors, per_cpu_io_queues, rid;
int status, timeout_period;
ctrlr->dev = dev;
@@ -1088,15 +1069,6 @@
if (status != 0)
return (status);
-#ifdef CHATHAM2
- if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
- status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
- if (status != 0)
- return (status);
- nvme_ctrlr_setup_chatham(ctrlr);
- }
-#endif
-
/*
* Software emulators may set the doorbell stride to something
* other than zero, but this driver is not set up to handle that.
@@ -1120,88 +1092,14 @@
nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
- per_cpu_io_queues = 1;
- TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
- ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
-
- if (ctrlr->per_cpu_io_queues)
- ctrlr->num_io_queues = mp_ncpus;
- else
- ctrlr->num_io_queues = 1;
-
- ctrlr->force_intx = 0;
- TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
-
ctrlr->enable_aborts = 0;
TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
- ctrlr->msix_enabled = 1;
+ nvme_ctrlr_setup_interrupts(ctrlr);
- if (ctrlr->force_intx) {
- ctrlr->msix_enabled = 0;
- goto intx;
- }
-
- /* One vector per IO queue, plus one vector for admin queue. */
- num_vectors = ctrlr->num_io_queues + 1;
-
- if (pci_msix_count(dev) < num_vectors) {
- ctrlr->msix_enabled = 0;
- goto intx;
- }
-
- if (pci_alloc_msix(dev, &num_vectors) != 0) {
- ctrlr->msix_enabled = 0;
- goto intx;
- }
-
- /*
- * On earlier FreeBSD releases, there are reports that
- * pci_alloc_msix() can return successfully with all vectors
- * requested, but a subsequent bus_alloc_resource_any()
- * for one of those vectors fails. This issue occurs more
- * readily with multiple devices using per-CPU vectors.
- * To workaround this issue, try to allocate the resources now,
- * and fall back to INTx if we cannot allocate all of them.
- * This issue cannot be reproduced on more recent versions of
- * FreeBSD which have increased the maximum number of MSI-X
- * vectors, but adding the workaround makes it easier for
- * vendors wishing to import this driver into kernels based on
- * older versions of FreeBSD.
- */
- for (i = 0; i < num_vectors; i++) {
- rid = i + 1;
- ctrlr->msi_res[i] = bus_alloc_resource_any(ctrlr->dev,
- SYS_RES_IRQ, &rid, RF_ACTIVE);
-
- if (ctrlr->msi_res[i] == NULL) {
- ctrlr->msix_enabled = 0;
- while (i > 0) {
- i--;
- bus_release_resource(ctrlr->dev,
- SYS_RES_IRQ,
- rman_get_rid(ctrlr->msi_res[i]),
- ctrlr->msi_res[i]);
- }
- pci_release_msi(dev);
- nvme_printf(ctrlr, "could not obtain all MSI-X "
- "resources, reverting to intx\n");
- break;
- }
- }
-
-intx:
-
- if (!ctrlr->msix_enabled)
- nvme_ctrlr_configure_intx(ctrlr);
-
ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
nvme_ctrlr_construct_admin_qpair(ctrlr);
- status = nvme_ctrlr_construct_io_qpairs(ctrlr);
- if (status != 0)
- return (status);
-
ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, device_get_unit(dev),
UID_ROOT, GID_WHEEL, 0600, "nvme%d", device_get_unit(dev));
@@ -1237,14 +1135,8 @@
* during shutdown). This ensures the controller receives a
* shutdown notification in case the system is shutdown before
* reloading the driver.
- *
- * Chatham does not let you re-enable the controller after shutdown
- * notification has been received, so do not send it in this case.
- * This is OK because Chatham does not depend on the shutdown
- * notification anyways.
*/
- if (pci_get_devid(ctrlr->dev) != CHATHAM_PCI_ID)
- nvme_ctrlr_shutdown(ctrlr);
+ nvme_ctrlr_shutdown(ctrlr);
nvme_ctrlr_disable(ctrlr);
taskqueue_free(ctrlr->taskqueue);
@@ -1273,13 +1165,6 @@
ctrlr->bar4_resource_id, ctrlr->bar4_resource);
}
-#ifdef CHATHAM2
- if (ctrlr->chatham_resource != NULL) {
- bus_release_resource(dev, SYS_RES_MEMORY,
- ctrlr->chatham_resource_id, ctrlr->chatham_resource);
- }
-#endif
-
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -1325,11 +1210,7 @@
{
struct nvme_qpair *qpair;
- if (ctrlr->per_cpu_io_queues)
- qpair = &ctrlr->ioq[curcpu];
- else
- qpair = &ctrlr->ioq[0];
-
+ qpair = &ctrlr->ioq[curcpu / ctrlr->num_cpus_per_ioq];
nvme_qpair_submit_request(qpair, req);
}
Modified: trunk/sys/dev/nvme/nvme_ctrlr_cmd.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ctrlr_cmd.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_ctrlr_cmd.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_ctrlr_cmd.c 267619 2014-06-18 19:28:55Z jimharris $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ctrlr_cmd.c 267620 2014-06-18 19:32:38Z jimharris $");
#include "nvme_private.h"
Modified: trunk/sys/dev/nvme/nvme_ns.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ns.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_ns.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_ns.c 257721 2013-11-05 22:33:45Z pluknet $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ns.c 291214 2015-11-23 17:27:29Z jimharris $");
#include <sys/param.h>
#include <sys/bio.h>
@@ -211,6 +211,13 @@
return (&ns->data);
}
+uint32_t
+nvme_ns_get_stripesize(struct nvme_namespace *ns)
+{
+
+ return (ns->stripesize);
+}
+
static void
nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
{
@@ -240,7 +247,7 @@
nvme_bio_child_inbed(struct bio *parent, int bio_error)
{
struct nvme_completion parent_cpl;
- int inbed;
+ int children, inbed;
if (bio_error != 0) {
parent->bio_flags |= BIO_ERROR;
@@ -249,10 +256,13 @@
/*
* atomic_fetchadd will return value before adding 1, so we still
- * must add 1 to get the updated inbed number.
+ * must add 1 to get the updated inbed number. Save bio_children
+ * before incrementing to guard against race conditions when
+ * two children bios complete on different queues.
*/
+ children = atomic_load_acq_int(&parent->bio_children);
inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1;
- if (inbed == parent->bio_children) {
+ if (inbed == children) {
bzero(&parent_cpl, sizeof(parent_cpl));
if (parent->bio_flags & BIO_ERROR)
parent_cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR;
@@ -466,28 +476,6 @@
return (err);
}
-#ifdef CHATHAM2
-static void
-nvme_ns_populate_chatham_data(struct nvme_namespace *ns)
-{
- struct nvme_controller *ctrlr;
- struct nvme_namespace_data *nsdata;
-
- ctrlr = ns->ctrlr;
- nsdata = &ns->data;
-
- nsdata->nsze = ctrlr->chatham_lbas;
- nsdata->ncap = ctrlr->chatham_lbas;
- nsdata->nuse = ctrlr->chatham_lbas;
-
- /* Chatham2 doesn't support thin provisioning. */
- nsdata->nsfeat.thin_prov = 0;
-
- /* Set LBA size to 512 bytes. */
- nsdata->lbaf[0].lbads = 9;
-}
-#endif /* CHATHAM2 */
-
int
nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
struct nvme_controller *ctrlr)
@@ -514,23 +502,15 @@
if (!mtx_initialized(&ns->lock))
mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
-#ifdef CHATHAM2
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
- nvme_ns_populate_chatham_data(ns);
- else {
-#endif
- status.done = FALSE;
- nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
- nvme_completion_poll_cb, &status);
- while (status.done == FALSE)
- DELAY(5);
- if (nvme_completion_is_error(&status.cpl)) {
- nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
- return (ENXIO);
- }
-#ifdef CHATHAM2
+ status.done = FALSE;
+ nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ DELAY(5);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
+ return (ENXIO);
}
-#endif
/*
* Note: format is a 0-based value, so > is appropriate here,
Modified: trunk/sys/dev/nvme/nvme_ns_cmd.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ns_cmd.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_ns_cmd.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_ns_cmd.c 253474 2013-07-19 21:33:24Z jimharris $");
#include "nvme_private.h"
Modified: trunk/sys/dev/nvme/nvme_private.h
===================================================================
--- trunk/sys/dev/nvme/nvme_private.h 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_private.h 2018-05-27 23:27:48 UTC (rev 10086)
@@ -24,7 +24,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: stable/9/sys/dev/nvme/nvme_private.h 265566 2014-05-07 16:48:43Z jimharris $
+ * $FreeBSD: stable/10/sys/dev/nvme/nvme_private.h 293671 2016-01-11 17:31:18Z jimharris $
*/
#ifndef __NVME_PRIVATE_H__
@@ -51,13 +51,6 @@
MALLOC_DECLARE(M_NVME);
-#define CHATHAM2
-
-#ifdef CHATHAM2
-#define CHATHAM_PCI_ID 0x20118086
-#define CHATHAM_CONTROL_BAR 0
-#endif
-
#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
@@ -212,6 +205,7 @@
struct nvme_completion *cpl;
bus_dma_tag_t dma_tag;
+ bus_dma_tag_t dma_tag_payload;
bus_dmamap_t cmd_dma_map;
uint64_t cmd_bus_addr;
@@ -267,19 +261,12 @@
int bar4_resource_id;
struct resource *bar4_resource;
-#ifdef CHATHAM2
- bus_space_tag_t chatham_bus_tag;
- bus_space_handle_t chatham_bus_handle;
- int chatham_resource_id;
- struct resource *chatham_resource;
-#endif
-
uint32_t msix_enabled;
uint32_t force_intx;
uint32_t enable_aborts;
uint32_t num_io_queues;
- boolean_t per_cpu_io_queues;
+ uint32_t num_cpus_per_ioq;
/* Fields for tracking progress during controller initialization. */
struct intr_config_hook config_hook;
@@ -290,8 +277,6 @@
struct task fail_req_task;
struct taskqueue *taskqueue;
- struct resource *msi_res[MAXCPU + 1];
-
/* For shared legacy interrupt. */
int rid;
struct resource *res;
@@ -339,11 +324,6 @@
boolean_t is_failed;
STAILQ_HEAD(, nvme_request) fail_req;
-
-#ifdef CHATHAM2
- uint64_t chatham_size;
- uint64_t chatham_lbas;
-#endif
};
#define nvme_mmio_offsetof(reg) \
@@ -366,22 +346,6 @@
(val & 0xFFFFFFFF00000000UL) >> 32); \
} while (0);
-#ifdef CHATHAM2
-#define chatham_read_4(softc, reg) \
- bus_space_read_4((softc)->chatham_bus_tag, \
- (softc)->chatham_bus_handle, reg)
-
-#define chatham_write_8(sc, reg, val) \
- do { \
- bus_space_write_4((sc)->chatham_bus_tag, \
- (sc)->chatham_bus_handle, reg, val & 0xffffffff); \
- bus_space_write_4((sc)->chatham_bus_tag, \
- (sc)->chatham_bus_handle, reg+4, \
- (val & 0xFFFFFFFF00000000UL) >> 32); \
- } while (0);
-
-#endif /* CHATHAM2 */
-
#if __FreeBSD_version < 800054
#define wmb() __asm volatile("sfence" ::: "memory")
#define mb() __asm volatile("mfence" ::: "memory")
@@ -492,6 +456,8 @@
{
uint64_t *bus_addr = (uint64_t *)arg;
+ if (error != 0)
+ printf("nvme_single_map err %d\n", error);
*bus_addr = seg[0].ds_addr;
}
Modified: trunk/sys/dev/nvme/nvme_qpair.c
===================================================================
--- trunk/sys/dev/nvme/nvme_qpair.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_qpair.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_qpair.c 265566 2014-05-07 16:48:43Z jimharris $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_qpair.c 293669 2016-01-11 17:28:47Z jimharris $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -295,7 +295,7 @@
uint16_t cid)
{
- bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
+ bus_dmamap_create(qpair->dma_tag_payload, 0, &tr->payload_dma_map);
bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp,
@@ -338,7 +338,7 @@
nvme_qpair_submit_tracker(qpair, tr);
} else {
if (req->type != NVME_REQUEST_NULL)
- bus_dmamap_unload(qpair->dma_tag,
+ bus_dmamap_unload(qpair->dma_tag_payload,
tr->payload_dma_map);
nvme_free_request(req);
@@ -465,19 +465,11 @@
{
struct nvme_tracker *tr;
uint32_t i;
+ int err;
qpair->id = id;
qpair->vector = vector;
qpair->num_entries = num_entries;
-#ifdef CHATHAM2
- /*
- * Chatham prototype board starts having issues at higher queue
- * depths. So use a conservative estimate here of no more than 64
- * outstanding I/O per queue at any one point.
- */
- if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
- num_trackers = min(num_trackers, 64);
-#endif
qpair->num_trackers = num_trackers;
qpair->ctrlr = ctrlr;
@@ -488,8 +480,9 @@
* the queue's vector to get the corresponding rid to use.
*/
qpair->rid = vector + 1;
- qpair->res = ctrlr->msi_res[vector];
+ qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &qpair->rid, RF_ACTIVE);
bus_setup_intr(ctrlr->dev, qpair->res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL,
nvme_qpair_msix_handler, qpair, &qpair->tag);
@@ -498,11 +491,20 @@
mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
/* Note: NVMe PRP format is restricted to 4-byte alignment. */
- bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
+ err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
4, PAGE_SIZE, BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
(NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
+ NULL, NULL, &qpair->dma_tag_payload);
+ if (err != 0)
+ nvme_printf(ctrlr, "payload tag create failed %d\n", err);
+
+ err = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
+ 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ BUS_SPACE_MAXSIZE, 1, BUS_SPACE_MAXSIZE, 0,
NULL, NULL, &qpair->dma_tag);
+ if (err != 0)
+ nvme_printf(ctrlr, "tag create failed %d\n", err);
qpair->num_cmds = 0;
qpair->num_intr_handler_calls = 0;
@@ -514,9 +516,14 @@
sizeof(struct nvme_completion), M_NVME, M_ZERO,
0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
- bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
- bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
+ err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
+ if (err != 0)
+ nvme_printf(ctrlr, "cmd_dma_map create failed %d\n", err);
+ err = bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
+ if (err != 0)
+ nvme_printf(ctrlr, "cpl_dma_map create failed %d\n", err);
+
bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map,
qpair->cmd, qpair->num_entries * sizeof(struct nvme_command),
nvme_single_map, &qpair->cmd_bus_addr, 0);
@@ -571,6 +578,9 @@
if (qpair->dma_tag)
bus_dma_tag_destroy(qpair->dma_tag);
+ if (qpair->dma_tag_payload)
+ bus_dma_tag_destroy(qpair->dma_tag_payload);
+
if (qpair->act_tr)
free(qpair->act_tr, M_NVME);
@@ -708,8 +718,11 @@
* is responsible for detecting the error status and failing the
* tracker manually.
*/
- if (error != 0)
+ if (error != 0) {
+ nvme_printf(tr->qpair->ctrlr,
+ "nvme_payload_map err %d\n", error);
return;
+ }
/*
* Note that we specified PAGE_SIZE for alignment and max
@@ -729,6 +742,13 @@
(uint64_t)seg[cur_nseg].ds_addr;
cur_nseg++;
}
+ } else {
+ /*
+ * prp2 should not be used by the controller
+ * since there is only one segment, but set
+ * to 0 just to be safe.
+ */
+ tr->req->cmd.prp2 = 0;
}
nvme_qpair_submit_tracker(tr->qpair, tr);
@@ -781,8 +801,9 @@
KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
("payload_size (%d) exceeds max_xfer_size (%d)\n",
req->payload_size, qpair->ctrlr->max_xfer_size));
- err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
- req->u.payload, req->payload_size, nvme_payload_map, tr, 0);
+ err = bus_dmamap_load(tr->qpair->dma_tag_payload,
+ tr->payload_dma_map, req->u.payload, req->payload_size,
+ nvme_payload_map, tr, 0);
if (err != 0)
nvme_printf(qpair->ctrlr,
"bus_dmamap_load returned 0x%x!\n", err);
@@ -796,7 +817,7 @@
("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
(intmax_t)req->u.bio->bio_bcount,
qpair->ctrlr->max_xfer_size));
- err = bus_dmamap_load_bio(tr->qpair->dma_tag,
+ err = bus_dmamap_load_bio(tr->qpair->dma_tag_payload,
tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
if (err != 0)
nvme_printf(qpair->ctrlr,
Modified: trunk/sys/dev/nvme/nvme_sysctl.c
===================================================================
--- trunk/sys/dev/nvme/nvme_sysctl.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_sysctl.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -1,6 +1,6 @@
/* $MidnightBSD$ */
/*-
- * Copyright (C) 2012-2013 Intel Corporation
+ * Copyright (C) 2012-2016 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_sysctl.c 297126 2016-03-21 00:34:22Z mav $");
#include <sys/param.h>
#include <sys/bus.h>
@@ -252,6 +252,10 @@
ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cpus_per_ioq",
+ CTLFLAG_RD, &ctrlr->num_cpus_per_ioq, 0,
+ "Number of CPUs assigned per I/O queue pair");
+
SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
"int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
nvme_sysctl_int_coal_time, "IU",
Modified: trunk/sys/dev/nvme/nvme_test.c
===================================================================
--- trunk/sys/dev/nvme/nvme_test.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_test.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: stable/9/sys/dev/nvme/nvme_test.c 257588 2013-11-03 20:52:13Z jimharris $");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_test.c 256152 2013-10-08 15:47:22Z jimharris $");
#include <sys/param.h>
#include <sys/bio.h>
Modified: trunk/sys/dev/nvme/nvme_util.c
===================================================================
--- trunk/sys/dev/nvme/nvme_util.c 2018-05-27 23:27:34 UTC (rev 10085)
+++ trunk/sys/dev/nvme/nvme_util.c 2018-05-27 23:27:48 UTC (rev 10086)
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
+__FBSDID("$FreeBSD: stable/10/sys/dev/nvme/nvme_util.c 253476 2013-07-19 21:40:57Z jimharris $");
#include <sys/param.h>
#include <dev/nvme/nvme.h>
More information about the Midnightbsd-cvs
mailing list