[Midnightbsd-cvs] src [9565] trunk/sys/dev: add nvd, nvme from FreeBSD 9.2
laffer1 at midnightbsd.org
laffer1 at midnightbsd.org
Mon Sep 18 21:19:27 EDT 2017
Revision: 9565
http://svnweb.midnightbsd.org/src/?rev=9565
Author: laffer1
Date: 2017-09-18 21:19:26 -0400 (Mon, 18 Sep 2017)
Log Message:
-----------
add nvd, nvme from FreeBSD 9.2
Added Paths:
-----------
trunk/sys/dev/nvd/
trunk/sys/dev/nvd/nvd.c
trunk/sys/dev/nvme/
trunk/sys/dev/nvme/nvme.c
trunk/sys/dev/nvme/nvme.h
trunk/sys/dev/nvme/nvme_ctrlr.c
trunk/sys/dev/nvme/nvme_ctrlr_cmd.c
trunk/sys/dev/nvme/nvme_ns.c
trunk/sys/dev/nvme/nvme_ns_cmd.c
trunk/sys/dev/nvme/nvme_private.h
trunk/sys/dev/nvme/nvme_qpair.c
trunk/sys/dev/nvme/nvme_sysctl.c
trunk/sys/dev/nvme/nvme_test.c
trunk/sys/dev/nvme/nvme_util.c
Added: trunk/sys/dev/nvd/nvd.c
===================================================================
--- trunk/sys/dev/nvd/nvd.c (rev 0)
+++ trunk/sys/dev/nvd/nvd.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,403 @@
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__MBSDID("$MidnightBSD$");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+
+#include <geom/geom.h>
+#include <geom/geom_disk.h>
+
+#include <dev/nvme/nvme.h>
+
+#define NVD_STR "nvd"
+
+struct nvd_disk;
+
+static disk_ioctl_t nvd_ioctl;
+static disk_strategy_t nvd_strategy;
+
+static void *nvd_new_disk(struct nvme_namespace *ns, void *ctrlr);
+static void destroy_geom_disk(struct nvd_disk *ndisk);
+
+static void *nvd_new_controller(struct nvme_controller *ctrlr);
+static void nvd_controller_fail(void *ctrlr);
+
+static int nvd_load(void);
+static void nvd_unload(void);
+
+MALLOC_DEFINE(M_NVD, "nvd", "nvd(4) allocations");
+
+struct nvme_consumer *consumer_handle;
+
+struct nvd_disk {
+
+ struct bio_queue_head bioq;
+ struct task bioqtask;
+ struct mtx bioqlock;
+
+ struct disk *disk;
+ struct taskqueue *tq;
+ struct nvme_namespace *ns;
+
+ uint32_t cur_depth;
+
+ TAILQ_ENTRY(nvd_disk) global_tailq;
+ TAILQ_ENTRY(nvd_disk) ctrlr_tailq;
+};
+
+struct nvd_controller {
+
+ TAILQ_ENTRY(nvd_controller) tailq;
+ TAILQ_HEAD(, nvd_disk) disk_head;
+};
+
+static TAILQ_HEAD(, nvd_controller) ctrlr_head;
+static TAILQ_HEAD(disk_list, nvd_disk) disk_head;
+
+static int nvd_modevent(module_t mod, int type, void *arg)
+{
+ int error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = nvd_load();
+ break;
+ case MOD_UNLOAD:
+ nvd_unload();
+ break;
+ default:
+ break;
+ }
+
+ return (error);
+}
+
+moduledata_t nvd_mod = {
+ NVD_STR,
+ (modeventhand_t)nvd_modevent,
+ 0
+};
+
+DECLARE_MODULE(nvd, nvd_mod, SI_SUB_DRIVERS, SI_ORDER_ANY);
+MODULE_VERSION(nvd, 1);
+MODULE_DEPEND(nvd, nvme, 1, 1, 1);
+
+static int
+nvd_load()
+{
+
+ TAILQ_INIT(&ctrlr_head);
+ TAILQ_INIT(&disk_head);
+
+ consumer_handle = nvme_register_consumer(nvd_new_disk,
+ nvd_new_controller, NULL, nvd_controller_fail);
+
+ return (consumer_handle != NULL ? 0 : -1);
+}
+
+static void
+nvd_unload()
+{
+ struct nvd_controller *ctrlr;
+ struct nvd_disk *disk;
+
+ while (!TAILQ_EMPTY(&ctrlr_head)) {
+ ctrlr = TAILQ_FIRST(&ctrlr_head);
+ TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
+ free(ctrlr, M_NVD);
+ }
+
+ while (!TAILQ_EMPTY(&disk_head)) {
+ disk = TAILQ_FIRST(&disk_head);
+ TAILQ_REMOVE(&disk_head, disk, global_tailq);
+ destroy_geom_disk(disk);
+ free(disk, M_NVD);
+ }
+
+ nvme_unregister_consumer(consumer_handle);
+}
+
+static void
+nvd_strategy(struct bio *bp)
+{
+ struct nvd_disk *ndisk;
+
+ ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1;
+
+ mtx_lock(&ndisk->bioqlock);
+ bioq_insert_tail(&ndisk->bioq, bp);
+ mtx_unlock(&ndisk->bioqlock);
+ taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask);
+}
+
+static int
+nvd_ioctl(struct disk *ndisk, u_long cmd, void *data, int fflag,
+ struct thread *td)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ default:
+ ret = EIO;
+ }
+
+ return (ret);
+}
+
+static void
+nvd_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct bio *bp;
+ struct nvd_disk *ndisk;
+
+ bp = (struct bio *)arg;
+
+ ndisk = bp->bio_disk->d_drv1;
+
+ atomic_add_int(&ndisk->cur_depth, -1);
+
+ /*
+ * TODO: add more extensive translation of NVMe status codes
+ * to different bio error codes (i.e. EIO, EINVAL, etc.)
+ */
+ if (nvme_completion_is_error(cpl)) {
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ } else
+ bp->bio_resid = 0;
+
+ biodone(bp);
+}
+
+static void
+nvd_bioq_process(void *arg, int pending)
+{
+ struct nvd_disk *ndisk = arg;
+ struct bio *bp;
+ int err;
+
+ for (;;) {
+ mtx_lock(&ndisk->bioqlock);
+ bp = bioq_takefirst(&ndisk->bioq);
+ mtx_unlock(&ndisk->bioqlock);
+ if (bp == NULL)
+ break;
+
+#ifdef BIO_ORDERED
+ /*
+ * BIO_ORDERED flag dictates that all outstanding bios
+ * must be completed before processing the bio with
+ * BIO_ORDERED flag set.
+ */
+ if (bp->bio_flags & BIO_ORDERED) {
+ while (ndisk->cur_depth > 0) {
+ pause("nvd flush", 1);
+ }
+ }
+#endif
+
+ bp->bio_driver1 = NULL;
+ atomic_add_int(&ndisk->cur_depth, 1);
+
+ err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);
+
+ if (err) {
+ atomic_add_int(&ndisk->cur_depth, -1);
+ bp->bio_error = err;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ biodone(bp);
+ }
+
+#ifdef BIO_ORDERED
+ /*
+ * BIO_ORDERED flag dictates that the bio with BIO_ORDERED
+ * flag set must be completed before proceeding with
+ * additional bios.
+ */
+ if (bp->bio_flags & BIO_ORDERED) {
+ while (ndisk->cur_depth > 0) {
+ pause("nvd flush", 1);
+ }
+ }
+#endif
+ }
+}
+
+static void *
+nvd_new_controller(struct nvme_controller *ctrlr)
+{
+ struct nvd_controller *nvd_ctrlr;
+
+ nvd_ctrlr = malloc(sizeof(struct nvd_controller), M_NVD,
+ M_ZERO | M_WAITOK);
+
+ TAILQ_INIT(&nvd_ctrlr->disk_head);
+ TAILQ_INSERT_TAIL(&ctrlr_head, nvd_ctrlr, tailq);
+
+ return (nvd_ctrlr);
+}
+
+static void *
+nvd_new_disk(struct nvme_namespace *ns, void *ctrlr_arg)
+{
+ uint8_t descr[NVME_MODEL_NUMBER_LENGTH+1];
+ struct nvd_disk *ndisk;
+ struct disk *disk;
+ struct nvd_controller *ctrlr = ctrlr_arg;
+
+ ndisk = malloc(sizeof(struct nvd_disk), M_NVD, M_ZERO | M_WAITOK);
+
+ disk = disk_alloc();
+ disk->d_strategy = nvd_strategy;
+ disk->d_ioctl = nvd_ioctl;
+ disk->d_name = NVD_STR;
+ disk->d_drv1 = ndisk;
+
+ disk->d_maxsize = nvme_ns_get_max_io_xfer_size(ns);
+ disk->d_sectorsize = nvme_ns_get_sector_size(ns);
+ disk->d_mediasize = (off_t)nvme_ns_get_size(ns);
+
+ if (TAILQ_EMPTY(&disk_head))
+ disk->d_unit = 0;
+ else
+ disk->d_unit =
+ TAILQ_LAST(&disk_head, disk_list)->disk->d_unit + 1;
+
+ disk->d_flags = 0;
+
+ if (nvme_ns_get_flags(ns) & NVME_NS_DEALLOCATE_SUPPORTED)
+ disk->d_flags |= DISKFLAG_CANDELETE;
+
+ if (nvme_ns_get_flags(ns) & NVME_NS_FLUSH_SUPPORTED)
+ disk->d_flags |= DISKFLAG_CANFLUSHCACHE;
+
+/* ifdef used here to ease porting to stable branches at a later point. */
+#ifdef DISKFLAG_UNMAPPED_BIO
+ disk->d_flags |= DISKFLAG_UNMAPPED_BIO;
+#endif
+
+ /*
+ * d_ident and d_descr are both far bigger than the length of either
+ * the serial or model number strings.
+ */
+ nvme_strvis(disk->d_ident, nvme_ns_get_serial_number(ns),
+ sizeof(disk->d_ident), NVME_SERIAL_NUMBER_LENGTH);
+
+ nvme_strvis(descr, nvme_ns_get_model_number(ns), sizeof(descr),
+ NVME_MODEL_NUMBER_LENGTH);
+
+#if __FreeBSD_version >= 900034
+ strlcpy(disk->d_descr, descr, sizeof(descr));
+#endif
+
+ ndisk->ns = ns;
+ ndisk->disk = disk;
+ ndisk->cur_depth = 0;
+
+ mtx_init(&ndisk->bioqlock, "NVD bioq lock", NULL, MTX_DEF);
+ bioq_init(&ndisk->bioq);
+
+ TASK_INIT(&ndisk->bioqtask, 0, nvd_bioq_process, ndisk);
+ ndisk->tq = taskqueue_create("nvd_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ndisk->tq);
+ taskqueue_start_threads(&ndisk->tq, 1, PI_DISK, "nvd taskq");
+
+ TAILQ_INSERT_TAIL(&disk_head, ndisk, global_tailq);
+ TAILQ_INSERT_TAIL(&ctrlr->disk_head, ndisk, ctrlr_tailq);
+
+ disk_create(disk, DISK_VERSION);
+
+ printf(NVD_STR"%u: <%s> NVMe namespace\n", disk->d_unit, descr);
+ printf(NVD_STR"%u: %juMB (%ju %u byte sectors)\n", disk->d_unit,
+ (uintmax_t)disk->d_mediasize / (1024*1024),
+ (uintmax_t)disk->d_mediasize / disk->d_sectorsize,
+ disk->d_sectorsize);
+
+ return (NULL);
+}
+
+static void
+destroy_geom_disk(struct nvd_disk *ndisk)
+{
+ struct bio *bp;
+ struct disk *disk;
+ uint32_t unit;
+ int cnt = 0;
+
+ disk = ndisk->disk;
+ unit = disk->d_unit;
+ taskqueue_free(ndisk->tq);
+
+ disk_destroy(ndisk->disk);
+
+ mtx_lock(&ndisk->bioqlock);
+ for (;;) {
+ bp = bioq_takefirst(&ndisk->bioq);
+ if (bp == NULL)
+ break;
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ cnt++;
+ biodone(bp);
+ }
+
+ printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt);
+ printf(NVD_STR"%u: removing device entry\n", unit);
+
+ mtx_unlock(&ndisk->bioqlock);
+
+ mtx_destroy(&ndisk->bioqlock);
+}
+
+static void
+nvd_controller_fail(void *ctrlr_arg)
+{
+ struct nvd_controller *ctrlr = ctrlr_arg;
+ struct nvd_disk *disk;
+
+ while (!TAILQ_EMPTY(&ctrlr->disk_head)) {
+ disk = TAILQ_FIRST(&ctrlr->disk_head);
+ TAILQ_REMOVE(&disk_head, disk, global_tailq);
+ TAILQ_REMOVE(&ctrlr->disk_head, disk, ctrlr_tailq);
+ destroy_geom_disk(disk);
+ free(disk, M_NVD);
+ }
+
+ TAILQ_REMOVE(&ctrlr_head, ctrlr, tailq);
+ free(ctrlr, M_NVD);
+}
+
Property changes on: trunk/sys/dev/nvd/nvd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme.c
===================================================================
--- trunk/sys/dev/nvme/nvme.c (rev 0)
+++ trunk/sys/dev/nvme/nvme.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,386 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme.c 253631 2013-07-24 22:48:29Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include <vm/uma.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+struct nvme_consumer {
+ uint32_t id;
+ nvme_cons_ns_fn_t ns_fn;
+ nvme_cons_ctrlr_fn_t ctrlr_fn;
+ nvme_cons_async_fn_t async_fn;
+ nvme_cons_fail_fn_t fail_fn;
+};
+
+struct nvme_consumer nvme_consumer[NVME_MAX_CONSUMERS];
+#define INVALID_CONSUMER_ID 0xFFFF
+
+uma_zone_t nvme_request_zone;
+int32_t nvme_retry_count;
+
+MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
+
+static int nvme_probe(device_t);
+static int nvme_attach(device_t);
+static int nvme_detach(device_t);
+static int nvme_modevent(module_t mod, int type, void *arg);
+
+static devclass_t nvme_devclass;
+
+static device_method_t nvme_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nvme_probe),
+ DEVMETHOD(device_attach, nvme_attach),
+ DEVMETHOD(device_detach, nvme_detach),
+ { 0, 0 }
+};
+
+static driver_t nvme_pci_driver = {
+ "nvme",
+ nvme_pci_methods,
+ sizeof(struct nvme_controller),
+};
+
+DRIVER_MODULE(nvme, pci, nvme_pci_driver, nvme_devclass, nvme_modevent, 0);
+MODULE_VERSION(nvme, 1);
+
+static struct _pcsid
+{
+ u_int32_t type;
+ const char *desc;
+} pci_ids[] = {
+ { 0x01118086, "NVMe Controller" },
+ { CHATHAM_PCI_ID, "Chatham Prototype NVMe Controller" },
+ { IDT32_PCI_ID, "IDT NVMe Controller (32 channel)" },
+ { IDT8_PCI_ID, "IDT NVMe Controller (8 channel)" },
+ { 0x00000000, NULL }
+};
+
+static int
+nvme_probe (device_t device)
+{
+ struct _pcsid *ep;
+ u_int32_t type;
+
+ type = pci_get_devid(device);
+ ep = pci_ids;
+
+ while (ep->type && ep->type != type)
+ ++ep;
+
+ if (ep->desc) {
+ device_set_desc(device, ep->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+#if defined(PCIS_STORAGE_NVM)
+ if (pci_get_class(device) == PCIC_STORAGE &&
+ pci_get_subclass(device) == PCIS_STORAGE_NVM &&
+ pci_get_progif(device) == PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0) {
+ device_set_desc(device, "Generic NVMe Device");
+ return (BUS_PROBE_GENERIC);
+ }
+#endif
+
+ return (ENXIO);
+}
+
+static void
+nvme_init(void)
+{
+ uint32_t i;
+
+ nvme_request_zone = uma_zcreate("nvme_request",
+ sizeof(struct nvme_request), NULL, NULL, NULL, NULL, 0, 0);
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++)
+ nvme_consumer[i].id = INVALID_CONSUMER_ID;
+}
+
+SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
+
+static void
+nvme_uninit(void)
+{
+ uma_zdestroy(nvme_request_zone);
+}
+
+SYSUNINIT(nvme_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_uninit, NULL);
+
+static void
+nvme_load(void)
+{
+}
+
+static void
+nvme_unload(void)
+{
+}
+
+static void
+nvme_shutdown(void)
+{
+ device_t *devlist;
+ struct nvme_controller *ctrlr;
+ union cc_register cc;
+ union csts_register csts;
+ int dev, devcount;
+
+ if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
+ return;
+
+ for (dev = 0; dev < devcount; dev++) {
+ /*
+ * Only notify controller of shutdown when a real shutdown is
+ * in process, not when a module unload occurs. It seems at
+ * least some controllers (Chatham at least) don't let you
+ * re-enable the controller after shutdown notification has
+ * been received.
+ */
+ ctrlr = DEVICE2SOFTC(devlist[dev]);
+ cc.raw = nvme_mmio_read_4(ctrlr, cc);
+ cc.bits.shn = NVME_SHN_NORMAL;
+ nvme_mmio_write_4(ctrlr, cc, cc.raw);
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+ while (csts.bits.shst != NVME_SHST_COMPLETE) {
+ DELAY(5);
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+ }
+ }
+
+ free(devlist, M_TEMP);
+}
+
+static int
+nvme_modevent(module_t mod, int type, void *arg)
+{
+
+ switch (type) {
+ case MOD_LOAD:
+ nvme_load();
+ break;
+ case MOD_UNLOAD:
+ nvme_unload();
+ break;
+ case MOD_SHUTDOWN:
+ nvme_shutdown();
+ break;
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+void
+nvme_dump_command(struct nvme_command *cmd)
+{
+ printf(
+"opc:%x f:%x r1:%x cid:%x nsid:%x r2:%x r3:%x mptr:%jx prp1:%jx prp2:%jx cdw:%x %x %x %x %x %x\n",
+ cmd->opc, cmd->fuse, cmd->rsvd1, cmd->cid, cmd->nsid,
+ cmd->rsvd2, cmd->rsvd3,
+ (uintmax_t)cmd->mptr, (uintmax_t)cmd->prp1, (uintmax_t)cmd->prp2,
+ cmd->cdw10, cmd->cdw11, cmd->cdw12, cmd->cdw13, cmd->cdw14,
+ cmd->cdw15);
+}
+
+void
+nvme_dump_completion(struct nvme_completion *cpl)
+{
+ printf("cdw0:%08x sqhd:%04x sqid:%04x "
+ "cid:%04x p:%x sc:%02x sct:%x m:%x dnr:%x\n",
+ cpl->cdw0, cpl->sqhd, cpl->sqid,
+ cpl->cid, cpl->status.p, cpl->status.sc, cpl->status.sct,
+ cpl->status.m, cpl->status.dnr);
+}
+
+static int
+nvme_attach(device_t dev)
+{
+ struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
+ int status;
+
+ status = nvme_ctrlr_construct(ctrlr, dev);
+
+ if (status != 0)
+ return (status);
+
+ /*
+ * Reset controller twice to ensure we do a transition from cc.en==1
+ * to cc.en==0. This is because we don't really know what status
+ * the controller was left in when boot handed off to OS.
+ */
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ if (status != 0)
+ return (status);
+
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ if (status != 0)
+ return (status);
+
+ nvme_sysctl_initialize_ctrlr(ctrlr);
+
+ pci_enable_busmaster(dev);
+
+ ctrlr->config_hook.ich_func = nvme_ctrlr_start_config_hook;
+ ctrlr->config_hook.ich_arg = ctrlr;
+
+ config_intrhook_establish(&ctrlr->config_hook);
+
+ return (0);
+}
+
+static int
+nvme_detach (device_t dev)
+{
+ struct nvme_controller *ctrlr = DEVICE2SOFTC(dev);
+
+ nvme_ctrlr_destruct(ctrlr, dev);
+ pci_disable_busmaster(dev);
+ return (0);
+}
+
+static void
+nvme_notify_consumer(struct nvme_consumer *cons)
+{
+ device_t *devlist;
+ struct nvme_controller *ctrlr;
+ struct nvme_namespace *ns;
+ void *ctrlr_cookie;
+ int dev_idx, ns_idx, devcount;
+
+ if (devclass_get_devices(nvme_devclass, &devlist, &devcount))
+ return;
+
+ for (dev_idx = 0; dev_idx < devcount; dev_idx++) {
+ ctrlr = DEVICE2SOFTC(devlist[dev_idx]);
+ if (cons->ctrlr_fn != NULL)
+ ctrlr_cookie = (*cons->ctrlr_fn)(ctrlr);
+ else
+ ctrlr_cookie = NULL;
+ ctrlr->cons_cookie[cons->id] = ctrlr_cookie;
+ for (ns_idx = 0; ns_idx < ctrlr->cdata.nn; ns_idx++) {
+ ns = &ctrlr->ns[ns_idx];
+ if (cons->ns_fn != NULL)
+ ns->cons_cookie[cons->id] =
+ (*cons->ns_fn)(ns, ctrlr_cookie);
+ }
+ }
+
+ free(devlist, M_TEMP);
+}
+
+void
+nvme_notify_async_consumers(struct nvme_controller *ctrlr,
+ const struct nvme_completion *async_cpl,
+ uint32_t log_page_id, void *log_page_buffer,
+ uint32_t log_page_size)
+{
+ struct nvme_consumer *cons;
+ uint32_t i;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ cons = &nvme_consumer[i];
+ if (cons->id != INVALID_CONSUMER_ID && cons->async_fn != NULL)
+ (*cons->async_fn)(ctrlr->cons_cookie[i], async_cpl,
+ log_page_id, log_page_buffer, log_page_size);
+ }
+}
+
+void
+nvme_notify_fail_consumers(struct nvme_controller *ctrlr)
+{
+ struct nvme_consumer *cons;
+ uint32_t i;
+
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++) {
+ cons = &nvme_consumer[i];
+ if (cons->id != INVALID_CONSUMER_ID && cons->fail_fn != NULL)
+ cons->fail_fn(ctrlr->cons_cookie[i]);
+ }
+}
+
+struct nvme_consumer *
+nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
+ nvme_cons_async_fn_t async_fn,
+ nvme_cons_fail_fn_t fail_fn)
+{
+ int i;
+
+ /*
+ * TODO: add locking around consumer registration. Not an issue
+ * right now since we only have one nvme consumer - nvd(4).
+ */
+ for (i = 0; i < NVME_MAX_CONSUMERS; i++)
+ if (nvme_consumer[i].id == INVALID_CONSUMER_ID) {
+ nvme_consumer[i].id = i;
+ nvme_consumer[i].ns_fn = ns_fn;
+ nvme_consumer[i].ctrlr_fn = ctrlr_fn;
+ nvme_consumer[i].async_fn = async_fn;
+ nvme_consumer[i].fail_fn = fail_fn;
+
+ nvme_notify_consumer(&nvme_consumer[i]);
+ return (&nvme_consumer[i]);
+ }
+
+ printf("nvme(4): consumer not registered - no slots available\n");
+ return (NULL);
+}
+
+void
+nvme_unregister_consumer(struct nvme_consumer *consumer)
+{
+
+ consumer->id = INVALID_CONSUMER_ID;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+
+ /*
+ * Copy status into the argument passed by the caller, so that
+ * the caller can check the status to determine if the
+ * the request passed or failed.
+ */
+ memcpy(&status->cpl, cpl, sizeof(*cpl));
+ wmb();
+ status->done = TRUE;
+}
Property changes on: trunk/sys/dev/nvme/nvme.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme.h
===================================================================
--- trunk/sys/dev/nvme/nvme.h (rev 0)
+++ trunk/sys/dev/nvme/nvme.h 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,877 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/9.2.0/sys/dev/nvme/nvme.h 253631 2013-07-24 22:48:29Z jimharris $
+ */
+
+#ifndef __NVME_H__
+#define __NVME_H__
+
+#ifdef _KERNEL
+#include <sys/types.h>
+#endif
+
+#include <sys/param.h>
+
+#define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command)
+#define NVME_RESET_CONTROLLER _IO('n', 1)
+
+#define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test)
+#define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test)
+
+/*
+ * Use to mark a command to apply to all namespaces, or to retrieve global
+ * log pages.
+ */
+#define NVME_GLOBAL_NAMESPACE_TAG ((uint32_t)0xFFFFFFFF)
+
+#define NVME_MAX_XFER_SIZE MAXPHYS
+
+union cap_lo_register {
+ uint32_t raw;
+ struct {
+ /** maximum queue entries supported */
+ uint32_t mqes : 16;
+
+ /** contiguous queues required */
+ uint32_t cqr : 1;
+
+ /** arbitration mechanism supported */
+ uint32_t ams : 2;
+
+ uint32_t reserved1 : 5;
+
+ /** timeout */
+ uint32_t to : 8;
+ } bits __packed;
+} __packed;
+
+union cap_hi_register {
+ uint32_t raw;
+ struct {
+ /** doorbell stride */
+ uint32_t dstrd : 4;
+
+ uint32_t reserved3 : 1;
+
+ /** command sets supported */
+ uint32_t css_nvm : 1;
+
+ uint32_t css_reserved : 3;
+ uint32_t reserved2 : 7;
+
+ /** memory page size minimum */
+ uint32_t mpsmin : 4;
+
+ /** memory page size maximum */
+ uint32_t mpsmax : 4;
+
+ uint32_t reserved1 : 8;
+ } bits __packed;
+} __packed;
+
+union cc_register {
+ uint32_t raw;
+ struct {
+ /** enable */
+ uint32_t en : 1;
+
+ uint32_t reserved1 : 3;
+
+ /** i/o command set selected */
+ uint32_t css : 3;
+
+ /** memory page size */
+ uint32_t mps : 4;
+
+ /** arbitration mechanism selected */
+ uint32_t ams : 3;
+
+ /** shutdown notification */
+ uint32_t shn : 2;
+
+ /** i/o submission queue entry size */
+ uint32_t iosqes : 4;
+
+ /** i/o completion queue entry size */
+ uint32_t iocqes : 4;
+
+ uint32_t reserved2 : 8;
+ } bits __packed;
+} __packed;
+
+enum shn_value {
+ NVME_SHN_NORMAL = 0x1,
+ NVME_SHN_ABRUPT = 0x2,
+};
+
+union csts_register {
+ uint32_t raw;
+ struct {
+ /** ready */
+ uint32_t rdy : 1;
+
+ /** controller fatal status */
+ uint32_t cfs : 1;
+
+ /** shutdown status */
+ uint32_t shst : 2;
+
+ uint32_t reserved1 : 28;
+ } bits __packed;
+} __packed;
+
+enum shst_value {
+ NVME_SHST_NORMAL = 0x0,
+ NVME_SHST_OCCURRING = 0x1,
+ NVME_SHST_COMPLETE = 0x2,
+};
+
+union aqa_register {
+ uint32_t raw;
+ struct {
+ /** admin submission queue size */
+ uint32_t asqs : 12;
+
+ uint32_t reserved1 : 4;
+
+ /** admin completion queue size */
+ uint32_t acqs : 12;
+
+ uint32_t reserved2 : 4;
+ } bits __packed;
+} __packed;
+
+struct nvme_registers
+{
+ /** controller capabilities */
+ union cap_lo_register cap_lo;
+ union cap_hi_register cap_hi;
+
+ uint32_t vs; /* version */
+ uint32_t intms; /* interrupt mask set */
+ uint32_t intmc; /* interrupt mask clear */
+
+ /** controller configuration */
+ union cc_register cc;
+
+ uint32_t reserved1;
+ uint32_t csts; /* controller status */
+ uint32_t reserved2;
+
+ /** admin queue attributes */
+ union aqa_register aqa;
+
+ uint64_t asq; /* admin submission queue base addr */
+ uint64_t acq; /* admin completion queue base addr */
+ uint32_t reserved3[0x3f2];
+
+ struct {
+ uint32_t sq_tdbl; /* submission queue tail doorbell */
+ uint32_t cq_hdbl; /* completion queue head doorbell */
+ } doorbell[1] __packed;
+} __packed;
+
+struct nvme_command
+{
+ /* dword 0 */
+ uint16_t opc : 8; /* opcode */
+ uint16_t fuse : 2; /* fused operation */
+ uint16_t rsvd1 : 6;
+ uint16_t cid; /* command identifier */
+
+ /* dword 1 */
+ uint32_t nsid; /* namespace identifier */
+
+ /* dword 2-3 */
+ uint32_t rsvd2;
+ uint32_t rsvd3;
+
+ /* dword 4-5 */
+ uint64_t mptr; /* metadata pointer */
+
+ /* dword 6-7 */
+ uint64_t prp1; /* prp entry 1 */
+
+ /* dword 8-9 */
+ uint64_t prp2; /* prp entry 2 */
+
+ /* dword 10-15 */
+ uint32_t cdw10; /* command-specific */
+ uint32_t cdw11; /* command-specific */
+ uint32_t cdw12; /* command-specific */
+ uint32_t cdw13; /* command-specific */
+ uint32_t cdw14; /* command-specific */
+ uint32_t cdw15; /* command-specific */
+} __packed;
+
+struct nvme_status {
+
+ uint16_t p : 1; /* phase tag */
+ uint16_t sc : 8; /* status code */
+ uint16_t sct : 3; /* status code type */
+ uint16_t rsvd2 : 2;
+ uint16_t m : 1; /* more */
+ uint16_t dnr : 1; /* do not retry */
+} __packed;
+
+struct nvme_completion {
+
+ /* dword 0 */
+ uint32_t cdw0; /* command-specific */
+
+ /* dword 1 */
+ uint32_t rsvd1;
+
+ /* dword 2 */
+ uint16_t sqhd; /* submission queue head pointer */
+ uint16_t sqid; /* submission queue identifier */
+
+ /* dword 3 */
+ uint16_t cid; /* command identifier */
+ struct nvme_status status;
+} __packed;
+
+struct nvme_dsm_range {
+
+ uint32_t attributes;
+ uint32_t length;
+ uint64_t starting_lba;
+} __packed;
+
+/* status code types */
+enum nvme_status_code_type {
+ NVME_SCT_GENERIC = 0x0,
+ NVME_SCT_COMMAND_SPECIFIC = 0x1,
+ NVME_SCT_MEDIA_ERROR = 0x2,
+ /* 0x3-0x6 - reserved */
+ NVME_SCT_VENDOR_SPECIFIC = 0x7,
+};
+
+/* generic command status codes */
+enum nvme_generic_command_status_code {
+ NVME_SC_SUCCESS = 0x00,
+ NVME_SC_INVALID_OPCODE = 0x01,
+ NVME_SC_INVALID_FIELD = 0x02,
+ NVME_SC_COMMAND_ID_CONFLICT = 0x03,
+ NVME_SC_DATA_TRANSFER_ERROR = 0x04,
+ NVME_SC_ABORTED_POWER_LOSS = 0x05,
+ NVME_SC_INTERNAL_DEVICE_ERROR = 0x06,
+ NVME_SC_ABORTED_BY_REQUEST = 0x07,
+ NVME_SC_ABORTED_SQ_DELETION = 0x08,
+ NVME_SC_ABORTED_FAILED_FUSED = 0x09,
+ NVME_SC_ABORTED_MISSING_FUSED = 0x0a,
+ NVME_SC_INVALID_NAMESPACE_OR_FORMAT = 0x0b,
+ NVME_SC_COMMAND_SEQUENCE_ERROR = 0x0c,
+
+ NVME_SC_LBA_OUT_OF_RANGE = 0x80,
+ NVME_SC_CAPACITY_EXCEEDED = 0x81,
+ NVME_SC_NAMESPACE_NOT_READY = 0x82,
+};
+
+/* command specific status codes */
+enum nvme_command_specific_status_code {
+ NVME_SC_COMPLETION_QUEUE_INVALID = 0x00,
+ NVME_SC_INVALID_QUEUE_IDENTIFIER = 0x01,
+ NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED = 0x02,
+ NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED = 0x03,
+ /* 0x04 - reserved */
+ NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED = 0x05,
+ NVME_SC_INVALID_FIRMWARE_SLOT = 0x06,
+ NVME_SC_INVALID_FIRMWARE_IMAGE = 0x07,
+ NVME_SC_INVALID_INTERRUPT_VECTOR = 0x08,
+ NVME_SC_INVALID_LOG_PAGE = 0x09,
+ NVME_SC_INVALID_FORMAT = 0x0a,
+ NVME_SC_FIRMWARE_REQUIRES_RESET = 0x0b,
+
+ NVME_SC_CONFLICTING_ATTRIBUTES = 0x80,
+ NVME_SC_INVALID_PROTECTION_INFO = 0x81,
+ NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE = 0x82,
+};
+
+/* media error status codes */
+enum nvme_media_error_status_code {
+ NVME_SC_WRITE_FAULTS = 0x80,
+ NVME_SC_UNRECOVERED_READ_ERROR = 0x81,
+ NVME_SC_GUARD_CHECK_ERROR = 0x82,
+ NVME_SC_APPLICATION_TAG_CHECK_ERROR = 0x83,
+ NVME_SC_REFERENCE_TAG_CHECK_ERROR = 0x84,
+ NVME_SC_COMPARE_FAILURE = 0x85,
+ NVME_SC_ACCESS_DENIED = 0x86,
+};
+
+/* admin opcodes */
+enum nvme_admin_opcode {
+ NVME_OPC_DELETE_IO_SQ = 0x00,
+ NVME_OPC_CREATE_IO_SQ = 0x01,
+ NVME_OPC_GET_LOG_PAGE = 0x02,
+ /* 0x03 - reserved */
+ NVME_OPC_DELETE_IO_CQ = 0x04,
+ NVME_OPC_CREATE_IO_CQ = 0x05,
+ NVME_OPC_IDENTIFY = 0x06,
+ /* 0x07 - reserved */
+ NVME_OPC_ABORT = 0x08,
+ NVME_OPC_SET_FEATURES = 0x09,
+ NVME_OPC_GET_FEATURES = 0x0a,
+ /* 0x0b - reserved */
+ NVME_OPC_ASYNC_EVENT_REQUEST = 0x0c,
+ /* 0x0d-0x0f - reserved */
+ NVME_OPC_FIRMWARE_ACTIVATE = 0x10,
+ NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD = 0x11,
+
+ NVME_OPC_FORMAT_NVM = 0x80,
+ NVME_OPC_SECURITY_SEND = 0x81,
+ NVME_OPC_SECURITY_RECEIVE = 0x82,
+};
+
+/* nvme nvm opcodes */
+enum nvme_nvm_opcode {
+ NVME_OPC_FLUSH = 0x00,
+ NVME_OPC_WRITE = 0x01,
+ NVME_OPC_READ = 0x02,
+ /* 0x03 - reserved */
+ NVME_OPC_WRITE_UNCORRECTABLE = 0x04,
+ NVME_OPC_COMPARE = 0x05,
+ /* 0x06-0x07 - reserved */
+ NVME_OPC_DATASET_MANAGEMENT = 0x09,
+};
+
+enum nvme_feature {
+ /* 0x00 - reserved */
+ NVME_FEAT_ARBITRATION = 0x01,
+ NVME_FEAT_POWER_MANAGEMENT = 0x02,
+ NVME_FEAT_LBA_RANGE_TYPE = 0x03,
+ NVME_FEAT_TEMPERATURE_THRESHOLD = 0x04,
+ NVME_FEAT_ERROR_RECOVERY = 0x05,
+ NVME_FEAT_VOLATILE_WRITE_CACHE = 0x06,
+ NVME_FEAT_NUMBER_OF_QUEUES = 0x07,
+ NVME_FEAT_INTERRUPT_COALESCING = 0x08,
+ NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
+ NVME_FEAT_WRITE_ATOMICITY = 0x0A,
+ NVME_FEAT_ASYNC_EVENT_CONFIGURATION = 0x0B,
+ /* 0x0C-0x7F - reserved */
+ NVME_FEAT_SOFTWARE_PROGRESS_MARKER = 0x80,
+ /* 0x81-0xBF - command set specific (reserved) */
+ /* 0xC0-0xFF - vendor specific */
+};
+
+enum nvme_dsm_attribute {
+ NVME_DSM_ATTR_INTEGRAL_READ = 0x1,
+ NVME_DSM_ATTR_INTEGRAL_WRITE = 0x2,
+ NVME_DSM_ATTR_DEALLOCATE = 0x4,
+};
+
+enum nvme_activate_action {
+ NVME_AA_REPLACE_NO_ACTIVATE = 0x0,
+ NVME_AA_REPLACE_ACTIVATE = 0x1,
+ NVME_AA_ACTIVATE = 0x2,
+};
+
+#define NVME_SERIAL_NUMBER_LENGTH 20
+#define NVME_MODEL_NUMBER_LENGTH 40
+#define NVME_FIRMWARE_REVISION_LENGTH 8
+
+struct nvme_controller_data {
+
+ /* bytes 0-255: controller capabilities and features */
+
+ /** pci vendor id */
+ uint16_t vid;
+
+ /** pci subsystem vendor id */
+ uint16_t ssvid;
+
+ /** serial number */
+ uint8_t sn[NVME_SERIAL_NUMBER_LENGTH];
+
+ /** model number */
+ uint8_t mn[NVME_MODEL_NUMBER_LENGTH];
+
+ /** firmware revision */
+ uint8_t fr[NVME_FIRMWARE_REVISION_LENGTH];
+
+ /** recommended arbitration burst */
+ uint8_t rab;
+
+ /** ieee oui identifier */
+ uint8_t ieee[3];
+
+ /** multi-interface capabilities */
+ uint8_t mic;
+
+ /** maximum data transfer size */
+ uint8_t mdts;
+
+ uint8_t reserved1[178];
+
+ /* bytes 256-511: admin command set attributes */
+
+ /** optional admin command support */
+ struct {
+ /* supports security send/receive commands */
+ uint16_t security : 1;
+
+ /* supports format nvm command */
+ uint16_t format : 1;
+
+ /* supports firmware activate/download commands */
+ uint16_t firmware : 1;
+
+ uint16_t oacs_rsvd : 13;
+ } __packed oacs;
+
+ /** abort command limit */
+ uint8_t acl;
+
+ /** asynchronous event request limit */
+ uint8_t aerl;
+
+ /** firmware updates */
+ struct {
+ /* first slot is read-only */
+ uint8_t slot1_ro : 1;
+
+ /* number of firmware slots */
+ uint8_t num_slots : 3;
+
+ uint8_t frmw_rsvd : 4;
+ } __packed frmw;
+
+ /** log page attributes */
+ struct {
+ /* per namespace smart/health log page */
+ uint8_t ns_smart : 1;
+
+ uint8_t lpa_rsvd : 7;
+ } __packed lpa;
+
+ /** error log page entries */
+ uint8_t elpe;
+
+ /** number of power states supported */
+ uint8_t npss;
+
+ /** admin vendor specific command configuration */
+ struct {
+ /* admin vendor specific commands use spec format */
+ uint8_t spec_format : 1;
+
+ uint8_t avscc_rsvd : 7;
+ } __packed avscc;
+
+ uint8_t reserved2[247];
+
+ /* bytes 512-703: nvm command set attributes */
+
+ /** submission queue entry size */
+ struct {
+ uint8_t min : 4;
+ uint8_t max : 4;
+ } __packed sqes;
+
+ /** completion queue entry size */
+ struct {
+ uint8_t min : 4;
+ uint8_t max : 4;
+ } __packed cqes;
+
+ uint8_t reserved3[2];
+
+ /** number of namespaces */
+ uint32_t nn;
+
+ /** optional nvm command support */
+ struct {
+ uint16_t compare : 1;
+ uint16_t write_unc : 1;
+ uint16_t dsm: 1;
+ uint16_t reserved: 13;
+ } __packed oncs;
+
+ /** fused operation support */
+ uint16_t fuses;
+
+ /** format nvm attributes */
+ uint8_t fna;
+
+ /** volatile write cache */
+ struct {
+ uint8_t present : 1;
+ uint8_t reserved : 7;
+ } __packed vwc;
+
+ /* TODO: flesh out remaining nvm command set attributes */
+ uint8_t reserved4[178];
+
+ /* bytes 704-2047: i/o command set attributes */
+ uint8_t reserved5[1344];
+
+ /* bytes 2048-3071: power state descriptors */
+ uint8_t reserved6[1024];
+
+ /* bytes 3072-4095: vendor specific */
+ uint8_t reserved7[1024];
+} __packed __aligned(4);
+
+struct nvme_namespace_data {
+
+ /** namespace size */
+ uint64_t nsze;
+
+ /** namespace capacity */
+ uint64_t ncap;
+
+ /** namespace utilization */
+ uint64_t nuse;
+
+ /** namespace features */
+ struct {
+ /** thin provisioning */
+ uint8_t thin_prov : 1;
+ uint8_t reserved1 : 7;
+ } __packed nsfeat;
+
+ /** number of lba formats */
+ uint8_t nlbaf;
+
+ /** formatted lba size */
+ struct {
+ uint8_t format : 4;
+ uint8_t extended : 1;
+ uint8_t reserved2 : 3;
+ } __packed flbas;
+
+ /** metadata capabilities */
+ struct {
+ /* metadata can be transferred as part of data prp list */
+ uint8_t extended : 1;
+
+ /* metadata can be transferred with separate metadata pointer */
+ uint8_t pointer : 1;
+
+ uint8_t reserved3 : 6;
+ } __packed mc;
+
+ /** end-to-end data protection capabilities */
+ struct {
+ /* protection information type 1 */
+ uint8_t pit1 : 1;
+
+ /* protection information type 2 */
+ uint8_t pit2 : 1;
+
+ /* protection information type 3 */
+ uint8_t pit3 : 1;
+
+ /* first eight bytes of metadata */
+ uint8_t md_start : 1;
+
+ /* last eight bytes of metadata */
+ uint8_t md_end : 1;
+ } __packed dpc;
+
+ /** end-to-end data protection type settings */
+ struct {
+ /* protection information type */
+ uint8_t pit : 3;
+
+ /* 1 == protection info transferred at start of metadata */
+ /* 0 == protection info transferred at end of metadata */
+ uint8_t md_start : 1;
+
+ uint8_t reserved4 : 4;
+ } __packed dps;
+
+ uint8_t reserved5[98];
+
+ /** lba format support */
+ struct {
+ /** metadata size */
+ uint32_t ms : 16;
+
+ /** lba data size */
+ uint32_t lbads : 8;
+
+ /** relative performance */
+ uint32_t rp : 2;
+
+ uint32_t reserved6 : 6;
+ } __packed lbaf[16];
+
+ uint8_t reserved6[192];
+
+ uint8_t vendor_specific[3712];
+} __packed __aligned(4);
+
+enum nvme_log_page {
+
+ /* 0x00 - reserved */
+ NVME_LOG_ERROR = 0x01,
+ NVME_LOG_HEALTH_INFORMATION = 0x02,
+ NVME_LOG_FIRMWARE_SLOT = 0x03,
+ /* 0x04-0x7F - reserved */
+ /* 0x80-0xBF - I/O command set specific */
+ /* 0xC0-0xFF - vendor specific */
+};
+
+struct nvme_error_information_entry {
+
+ uint64_t error_count;
+ uint16_t sqid;
+ uint16_t cid;
+ struct nvme_status status;
+ uint16_t error_location;
+ uint64_t lba;
+ uint32_t nsid;
+ uint8_t vendor_specific;
+ uint8_t reserved[35];
+} __packed __aligned(4);
+
+union nvme_critical_warning_state {
+
+ uint8_t raw;
+
+ struct {
+ uint8_t available_spare : 1;
+ uint8_t temperature : 1;
+ uint8_t device_reliability : 1;
+ uint8_t read_only : 1;
+ uint8_t volatile_memory_backup : 1;
+ uint8_t reserved : 3;
+ } __packed bits;
+} __packed;
+
+struct nvme_health_information_page {
+
+ union nvme_critical_warning_state critical_warning;
+
+ uint16_t temperature;
+ uint8_t available_spare;
+ uint8_t available_spare_threshold;
+ uint8_t percentage_used;
+
+ uint8_t reserved[26];
+
+ /*
+ * Note that the following are 128-bit values, but are
+ * defined as an array of 2 64-bit values.
+ */
+ /* Data Units Read is always in 512-byte units. */
+ uint64_t data_units_read[2];
+ /* Data Units Written is always in 512-byte units. */
+ uint64_t data_units_written[2];
+ /* For NVM command set, this includes Compare commands. */
+ uint64_t host_read_commands[2];
+ uint64_t host_write_commands[2];
+ /* Controller Busy Time is reported in minutes. */
+ uint64_t controller_busy_time[2];
+ uint64_t power_cycles[2];
+ uint64_t power_on_hours[2];
+ uint64_t unsafe_shutdowns[2];
+ uint64_t media_errors[2];
+ uint64_t num_error_info_log_entries[2];
+
+ uint8_t reserved2[320];
+} __packed __aligned(4);
+
+struct nvme_firmware_page {
+
+ struct {
+ uint8_t slot : 3; /* slot for current FW */
+ uint8_t reserved : 5;
+ } __packed afi;
+
+ uint8_t reserved[7];
+ uint64_t revision[7]; /* revisions for 7 slots */
+ uint8_t reserved2[448];
+} __packed __aligned(4);
+
+#define NVME_TEST_MAX_THREADS 128
+
+struct nvme_io_test {
+
+ enum nvme_nvm_opcode opc;
+ uint32_t size;
+ uint32_t time; /* in seconds */
+ uint32_t num_threads;
+ uint32_t flags;
+ uint32_t io_completed[NVME_TEST_MAX_THREADS];
+};
+
+enum nvme_io_test_flags {
+
+ /*
+ * Specifies whether dev_refthread/dev_relthread should be
+ * called during NVME_BIO_TEST. Ignored for other test
+ * types.
+ */
+ NVME_TEST_FLAG_REFTHREAD = 0x1,
+};
+
+struct nvme_pt_command {
+
+ /*
+ * cmd is used to specify a passthrough command to a controller or
+ * namespace.
+ *
+ * The following fields from cmd may be specified by the caller:
+ * * opc (opcode)
+ * * nsid (namespace id) - for admin commands only
+ * * cdw10-cdw15
+ *
+ * Remaining fields must be set to 0 by the caller.
+ */
+ struct nvme_command cmd;
+
+ /*
+ * cpl returns completion status for the passthrough command
+ * specified by cmd.
+ *
+ * The following fields will be filled out by the driver, for
+ * consumption by the caller:
+ * * cdw0
+ * * status (except for phase)
+ *
+ * Remaining fields will be set to 0 by the driver.
+ */
+ struct nvme_completion cpl;
+
+ /* buf is the data buffer associated with this passthrough command. */
+ void * buf;
+
+ /*
+ * len is the length of the data buffer associated with this
+ * passthrough command.
+ */
+ uint32_t len;
+
+ /*
+ * is_read = 1 if the passthrough command will read data into the
+ * supplied buffer from the controller.
+ *
+ * is_read = 0 if the passthrough command will write data from the
+ * supplied buffer to the controller.
+ */
+ uint32_t is_read;
+
+ /*
+ * driver_lock is used by the driver only. It must be set to 0
+ * by the caller.
+ */
+ struct mtx * driver_lock;
+};
+
+#define nvme_completion_is_error(cpl) \
+ ((cpl)->status.sc != 0 || (cpl)->status.sct != 0)
+
+void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
+
+#ifdef _KERNEL
+
+struct bio;
+
+struct nvme_namespace;
+struct nvme_controller;
+struct nvme_consumer;
+
+typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
+
+typedef void *(*nvme_cons_ns_fn_t)(struct nvme_namespace *, void *);
+typedef void *(*nvme_cons_ctrlr_fn_t)(struct nvme_controller *);
+typedef void (*nvme_cons_async_fn_t)(void *, const struct nvme_completion *,
+ uint32_t, void *, uint32_t);
+typedef void (*nvme_cons_fail_fn_t)(void *);
+
+enum nvme_namespace_flags {
+ NVME_NS_DEALLOCATE_SUPPORTED = 0x1,
+ NVME_NS_FLUSH_SUPPORTED = 0x2,
+};
+
+int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
+ struct nvme_pt_command *pt,
+ uint32_t nsid, int is_user_buffer,
+ int is_admin_cmd);
+
+/* Admin functions */
+void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
+ uint8_t feature, uint32_t cdw11,
+ void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr,
+ uint8_t feature, uint32_t cdw11,
+ void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr,
+ uint8_t log_page, uint32_t nsid,
+ void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+
+/* NVM I/O functions */
+int nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload,
+ uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+int nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+int nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload,
+ uint64_t lba, uint32_t lba_count, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+int nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+int nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
+ uint8_t num_ranges, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+int nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+
+/* Registration functions */
+struct nvme_consumer * nvme_register_consumer(nvme_cons_ns_fn_t ns_fn,
+ nvme_cons_ctrlr_fn_t ctrlr_fn,
+ nvme_cons_async_fn_t async_fn,
+ nvme_cons_fail_fn_t fail_fn);
+void nvme_unregister_consumer(struct nvme_consumer *consumer);
+
+/* Controller helper functions */
+device_t nvme_ctrlr_get_device(struct nvme_controller *ctrlr);
+const struct nvme_controller_data *
+ nvme_ctrlr_get_data(struct nvme_controller *ctrlr);
+
+/* Namespace helper functions */
+uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns);
+uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns);
+uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns);
+uint64_t nvme_ns_get_size(struct nvme_namespace *ns);
+uint32_t nvme_ns_get_flags(struct nvme_namespace *ns);
+const char * nvme_ns_get_serial_number(struct nvme_namespace *ns);
+const char * nvme_ns_get_model_number(struct nvme_namespace *ns);
+const struct nvme_namespace_data *
+ nvme_ns_get_data(struct nvme_namespace *ns);
+
+int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn);
+
+#endif /* _KERNEL */
+
+#endif /* __NVME_H__ */
Property changes on: trunk/sys/dev/nvme/nvme.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_ctrlr.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ctrlr.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_ctrlr.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,1200 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_ctrlr.c 253627 2013-07-24 22:42:00Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/ioccom.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+#include <sys/uio.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+ struct nvme_async_event_request *aer);
+
+static int
+nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
+{
+
+ /* Chatham puts the NVMe MMRs behind BAR 2/3, not BAR 0/1. */
+ if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+ ctrlr->resource_id = PCIR_BAR(2);
+ else
+ ctrlr->resource_id = PCIR_BAR(0);
+
+ ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE);
+
+ if(ctrlr->resource == NULL) {
+ nvme_printf(ctrlr, "unable to allocate pci resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
+ ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
+ ctrlr->regs = (struct nvme_registers *)ctrlr->bus_handle;
+
+ /*
+ * The NVMe spec allows for the MSI-X table to be placed behind
+ * BAR 4/5, separate from the control/doorbell registers. Always
+ * try to map this bar, because it must be mapped prior to calling
+ * pci_alloc_msix(). If the table isn't behind BAR 4/5,
+ * bus_alloc_resource() will just return NULL which is OK.
+ */
+ ctrlr->bar4_resource_id = PCIR_BAR(4);
+ ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE);
+
+ return (0);
+}
+
+#ifdef CHATHAM2
+static int
+nvme_ctrlr_allocate_chatham_bar(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->chatham_resource_id = PCIR_BAR(CHATHAM_CONTROL_BAR);
+ ctrlr->chatham_resource = bus_alloc_resource(ctrlr->dev,
+ SYS_RES_MEMORY, &ctrlr->chatham_resource_id, 0, ~0, 1,
+ RF_ACTIVE);
+
+ if(ctrlr->chatham_resource == NULL) {
+ nvme_printf(ctrlr, "unable to alloc pci resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->chatham_bus_tag = rman_get_bustag(ctrlr->chatham_resource);
+ ctrlr->chatham_bus_handle =
+ rman_get_bushandle(ctrlr->chatham_resource);
+
+ return (0);
+}
+
+static void
+nvme_ctrlr_setup_chatham(struct nvme_controller *ctrlr)
+{
+ uint64_t reg1, reg2, reg3;
+ uint64_t temp1, temp2;
+ uint32_t temp3;
+ uint32_t use_flash_timings = 0;
+
+ DELAY(10000);
+
+ temp3 = chatham_read_4(ctrlr, 0x8080);
+
+ device_printf(ctrlr->dev, "Chatham version: 0x%x\n", temp3);
+
+ ctrlr->chatham_lbas = chatham_read_4(ctrlr, 0x8068) - 0x110;
+ ctrlr->chatham_size = ctrlr->chatham_lbas * 512;
+
+ device_printf(ctrlr->dev, "Chatham size: %jd\n",
+ (intmax_t)ctrlr->chatham_size);
+
+ reg1 = reg2 = reg3 = ctrlr->chatham_size - 1;
+
+ TUNABLE_INT_FETCH("hw.nvme.use_flash_timings", &use_flash_timings);
+ if (use_flash_timings) {
+ device_printf(ctrlr->dev, "Chatham: using flash timings\n");
+ temp1 = 0x00001b58000007d0LL;
+ temp2 = 0x000000cb00000131LL;
+ } else {
+ device_printf(ctrlr->dev, "Chatham: using DDR timings\n");
+ temp1 = temp2 = 0x0LL;
+ }
+
+ chatham_write_8(ctrlr, 0x8000, reg1);
+ chatham_write_8(ctrlr, 0x8008, reg2);
+ chatham_write_8(ctrlr, 0x8010, reg3);
+
+ chatham_write_8(ctrlr, 0x8020, temp1);
+ temp3 = chatham_read_4(ctrlr, 0x8020);
+
+ chatham_write_8(ctrlr, 0x8028, temp2);
+ temp3 = chatham_read_4(ctrlr, 0x8028);
+
+ chatham_write_8(ctrlr, 0x8030, temp1);
+ chatham_write_8(ctrlr, 0x8038, temp2);
+ chatham_write_8(ctrlr, 0x8040, temp1);
+ chatham_write_8(ctrlr, 0x8048, temp2);
+ chatham_write_8(ctrlr, 0x8050, temp1);
+ chatham_write_8(ctrlr, 0x8058, temp2);
+
+ DELAY(10000);
+}
+
+static void
+nvme_chatham_populate_cdata(struct nvme_controller *ctrlr)
+{
+ struct nvme_controller_data *cdata;
+
+ cdata = &ctrlr->cdata;
+
+ cdata->vid = 0x8086;
+ cdata->ssvid = 0x2011;
+
+ /*
+ * Chatham2 puts garbage data in these fields when we
+ * invoke IDENTIFY_CONTROLLER, so we need to re-zero
+ * the fields before calling bcopy().
+ */
+ memset(cdata->sn, 0, sizeof(cdata->sn));
+ memcpy(cdata->sn, "2012", strlen("2012"));
+ memset(cdata->mn, 0, sizeof(cdata->mn));
+ memcpy(cdata->mn, "CHATHAM2", strlen("CHATHAM2"));
+ memset(cdata->fr, 0, sizeof(cdata->fr));
+ memcpy(cdata->fr, "0", strlen("0"));
+ cdata->rab = 8;
+ cdata->aerl = 3;
+ cdata->lpa.ns_smart = 1;
+ cdata->sqes.min = 6;
+ cdata->sqes.max = 6;
+ cdata->sqes.min = 4;
+ cdata->sqes.max = 4;
+ cdata->nn = 1;
+
+ /* Chatham2 doesn't support DSM command */
+ cdata->oncs.dsm = 0;
+
+ cdata->vwc.present = 1;
+}
+#endif /* CHATHAM2 */
+
+static void
+nvme_ctrlr_construct_admin_qpair(struct nvme_controller *ctrlr)
+{
+ struct nvme_qpair *qpair;
+ uint32_t num_entries;
+
+ qpair = &ctrlr->adminq;
+
+ num_entries = NVME_ADMIN_ENTRIES;
+ TUNABLE_INT_FETCH("hw.nvme.admin_entries", &num_entries);
+ /*
+ * If admin_entries was overridden to an invalid value, revert it
+ * back to our default value.
+ */
+ if (num_entries < NVME_MIN_ADMIN_ENTRIES ||
+ num_entries > NVME_MAX_ADMIN_ENTRIES) {
+ nvme_printf(ctrlr, "invalid hw.nvme.admin_entries=%d "
+ "specified\n", num_entries);
+ num_entries = NVME_ADMIN_ENTRIES;
+ }
+
+ /*
+ * The admin queue's max xfer size is treated differently than the
+ * max I/O xfer size. 16KB is sufficient here - maybe even less?
+ */
+ nvme_qpair_construct(qpair,
+ 0, /* qpair ID */
+ 0, /* vector */
+ num_entries,
+ NVME_ADMIN_TRACKERS,
+ ctrlr);
+}
+
+static int
+nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_qpair *qpair;
+ union cap_lo_register cap_lo;
+ int i, num_entries, num_trackers;
+
+ num_entries = NVME_IO_ENTRIES;
+ TUNABLE_INT_FETCH("hw.nvme.io_entries", &num_entries);
+
+ /*
+ * NVMe spec sets a hard limit of 64K max entries, but
+ * devices may specify a smaller limit, so we need to check
+ * the MQES field in the capabilities register.
+ */
+ cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
+ num_entries = min(num_entries, cap_lo.bits.mqes+1);
+
+ num_trackers = NVME_IO_TRACKERS;
+ TUNABLE_INT_FETCH("hw.nvme.io_trackers", &num_trackers);
+
+ num_trackers = max(num_trackers, NVME_MIN_IO_TRACKERS);
+ num_trackers = min(num_trackers, NVME_MAX_IO_TRACKERS);
+ /*
+ * No need to have more trackers than entries in the submit queue.
+ * Note also that for a queue size of N, we can only have (N-1)
+ * commands outstanding, hence the "-1" here.
+ */
+ num_trackers = min(num_trackers, (num_entries-1));
+
+ ctrlr->ioq = malloc(ctrlr->num_io_queues * sizeof(struct nvme_qpair),
+ M_NVME, M_ZERO | M_WAITOK);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ qpair = &ctrlr->ioq[i];
+
+ /*
+ * Admin queue has ID=0. IO queues start at ID=1 -
+ * hence the 'i+1' here.
+ *
+ * For I/O queues, use the controller-wide max_xfer_size
+ * calculated in nvme_attach().
+ */
+ nvme_qpair_construct(qpair,
+ i+1, /* qpair ID */
+ ctrlr->msix_enabled ? i+1 : 0, /* vector */
+ num_entries,
+ num_trackers,
+ ctrlr);
+
+ if (ctrlr->per_cpu_io_queues)
+ bus_bind_intr(ctrlr->dev, qpair->res, i);
+ }
+
+ return (0);
+}
+
+static void
+nvme_ctrlr_fail(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ ctrlr->is_failed = TRUE;
+ nvme_qpair_fail(&ctrlr->adminq);
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_fail(&ctrlr->ioq[i]);
+ nvme_notify_fail_consumers(ctrlr);
+}
+
+void
+nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+
+ mtx_lock(&ctrlr->lock);
+ STAILQ_INSERT_TAIL(&ctrlr->fail_req, req, stailq);
+ mtx_unlock(&ctrlr->lock);
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->fail_req_task);
+}
+
+static void
+nvme_ctrlr_fail_req_task(void *arg, int pending)
+{
+ struct nvme_controller *ctrlr = arg;
+ struct nvme_request *req;
+
+ mtx_lock(&ctrlr->lock);
+ while (!STAILQ_EMPTY(&ctrlr->fail_req)) {
+ req = STAILQ_FIRST(&ctrlr->fail_req);
+ STAILQ_REMOVE_HEAD(&ctrlr->fail_req, stailq);
+ nvme_qpair_manual_complete_request(req->qpair, req,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
+ }
+ mtx_unlock(&ctrlr->lock);
+}
+
+static int
+nvme_ctrlr_wait_for_ready(struct nvme_controller *ctrlr)
+{
+ int ms_waited;
+ union cc_register cc;
+ union csts_register csts;
+
+ cc.raw = nvme_mmio_read_4(ctrlr, cc);
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+
+ if (!cc.bits.en) {
+ nvme_printf(ctrlr, "%s called with cc.en = 0\n", __func__);
+ return (ENXIO);
+ }
+
+ ms_waited = 0;
+
+ while (!csts.bits.rdy) {
+ DELAY(1000);
+ if (ms_waited++ > ctrlr->ready_timeout_in_ms) {
+ nvme_printf(ctrlr, "controller did not become ready "
+ "within %d ms\n", ctrlr->ready_timeout_in_ms);
+ return (ENXIO);
+ }
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+ }
+
+ return (0);
+}
+
+static void
+nvme_ctrlr_disable(struct nvme_controller *ctrlr)
+{
+ union cc_register cc;
+ union csts_register csts;
+
+ cc.raw = nvme_mmio_read_4(ctrlr, cc);
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+
+ if (cc.bits.en == 1 && csts.bits.rdy == 0)
+ nvme_ctrlr_wait_for_ready(ctrlr);
+
+ cc.bits.en = 0;
+ nvme_mmio_write_4(ctrlr, cc, cc.raw);
+ DELAY(5000);
+}
+
+static int
+nvme_ctrlr_enable(struct nvme_controller *ctrlr)
+{
+ union cc_register cc;
+ union csts_register csts;
+ union aqa_register aqa;
+
+ cc.raw = nvme_mmio_read_4(ctrlr, cc);
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+
+ if (cc.bits.en == 1) {
+ if (csts.bits.rdy == 1)
+ return (0);
+ else
+ return (nvme_ctrlr_wait_for_ready(ctrlr));
+ }
+
+ nvme_mmio_write_8(ctrlr, asq, ctrlr->adminq.cmd_bus_addr);
+ DELAY(5000);
+ nvme_mmio_write_8(ctrlr, acq, ctrlr->adminq.cpl_bus_addr);
+ DELAY(5000);
+
+ aqa.raw = 0;
+ /* acqs and asqs are 0-based. */
+ aqa.bits.acqs = ctrlr->adminq.num_entries-1;
+ aqa.bits.asqs = ctrlr->adminq.num_entries-1;
+ nvme_mmio_write_4(ctrlr, aqa, aqa.raw);
+ DELAY(5000);
+
+ cc.bits.en = 1;
+ cc.bits.css = 0;
+ cc.bits.ams = 0;
+ cc.bits.shn = 0;
+ cc.bits.iosqes = 6; /* SQ entry size == 64 == 2^6 */
+ cc.bits.iocqes = 4; /* CQ entry size == 16 == 2^4 */
+
+ /* This evaluates to 0, which is according to spec. */
+ cc.bits.mps = (PAGE_SIZE >> 13);
+
+ nvme_mmio_write_4(ctrlr, cc, cc.raw);
+ DELAY(5000);
+
+ return (nvme_ctrlr_wait_for_ready(ctrlr));
+}
+
+int
+nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
+{
+ int i;
+
+ nvme_admin_qpair_disable(&ctrlr->adminq);
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_disable(&ctrlr->ioq[i]);
+
+ DELAY(100*1000);
+
+ nvme_ctrlr_disable(ctrlr);
+ return (nvme_ctrlr_enable(ctrlr));
+}
+
+void
+nvme_ctrlr_reset(struct nvme_controller *ctrlr)
+{
+ int cmpset;
+
+ cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
+
+ if (cmpset == 0 || ctrlr->is_failed)
+ /*
+ * Controller is already resetting or has failed. Return
+ * immediately since there is no need to kick off another
+ * reset in these cases.
+ */
+ return;
+
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
+}
+
+static int
+nvme_ctrlr_identify(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+
+ status.done = FALSE;
+ nvme_ctrlr_cmd_identify_controller(ctrlr, &ctrlr->cdata,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ pause("nvme", 1);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_identify_controller failed!\n");
+ return (ENXIO);
+ }
+
+#ifdef CHATHAM2
+ if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+ nvme_chatham_populate_cdata(ctrlr);
+#endif
+
+ /*
+ * Use MDTS to ensure our default max_xfer_size doesn't exceed what the
+ * controller supports.
+ */
+ if (ctrlr->cdata.mdts > 0)
+ ctrlr->max_xfer_size = min(ctrlr->max_xfer_size,
+ ctrlr->min_page_size * (1 << (ctrlr->cdata.mdts)));
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_set_num_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ int cq_allocated, i, sq_allocated;
+
+ status.done = FALSE;
+ nvme_ctrlr_cmd_set_num_queues(ctrlr, ctrlr->num_io_queues,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ pause("nvme", 1);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_set_num_queues failed!\n");
+ return (ENXIO);
+ }
+
+ /*
+ * Data in cdw0 is 0-based.
+ * Lower 16-bits indicate number of submission queues allocated.
+ * Upper 16-bits indicate number of completion queues allocated.
+ */
+ sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1;
+ cq_allocated = (status.cpl.cdw0 >> 16) + 1;
+
+ /*
+ * Check that the controller was able to allocate the number of
+ * queues we requested. If not, revert to one IO queue pair.
+ */
+ if (sq_allocated < ctrlr->num_io_queues ||
+ cq_allocated < ctrlr->num_io_queues) {
+
+ /*
+ * Destroy extra IO queue pairs that were created at
+ * controller construction time but are no longer
+ * needed. This will only happen when a controller
+ * supports fewer queues than MSI-X vectors. This
+ * is not the normal case, but does occur with the
+ * Chatham prototype board.
+ */
+ for (i = 1; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_destroy(&ctrlr->ioq[i]);
+
+ ctrlr->num_io_queues = 1;
+ ctrlr->per_cpu_io_queues = 0;
+ }
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_create_qpairs(struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+ struct nvme_qpair *qpair;
+ int i;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ qpair = &ctrlr->ioq[i];
+
+ status.done = FALSE;
+ nvme_ctrlr_cmd_create_io_cq(ctrlr, qpair, qpair->vector,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ pause("nvme", 1);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_create_io_cq failed!\n");
+ return (ENXIO);
+ }
+
+ status.done = FALSE;
+ nvme_ctrlr_cmd_create_io_sq(qpair->ctrlr, qpair,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ pause("nvme", 1);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_create_io_sq failed!\n");
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+static int
+nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
+{
+ struct nvme_namespace *ns;
+ int i, status;
+
+ for (i = 0; i < ctrlr->cdata.nn; i++) {
+ ns = &ctrlr->ns[i];
+ status = nvme_ns_construct(ns, i+1, ctrlr);
+ if (status != 0)
+ return (status);
+ }
+
+ return (0);
+}
+
+static boolean_t
+is_log_page_id_valid(uint8_t page_id)
+{
+
+ switch (page_id) {
+ case NVME_LOG_ERROR:
+ case NVME_LOG_HEALTH_INFORMATION:
+ case NVME_LOG_FIRMWARE_SLOT:
+ return (TRUE);
+ }
+
+ return (FALSE);
+}
+
+static uint32_t
+nvme_ctrlr_get_log_page_size(struct nvme_controller *ctrlr, uint8_t page_id)
+{
+ uint32_t log_page_size;
+
+ switch (page_id) {
+ case NVME_LOG_ERROR:
+ log_page_size = min(
+ sizeof(struct nvme_error_information_entry) *
+ ctrlr->cdata.elpe,
+ NVME_MAX_AER_LOG_SIZE);
+ break;
+ case NVME_LOG_HEALTH_INFORMATION:
+ log_page_size = sizeof(struct nvme_health_information_page);
+ break;
+ case NVME_LOG_FIRMWARE_SLOT:
+ log_page_size = sizeof(struct nvme_firmware_page);
+ break;
+ default:
+ log_page_size = 0;
+ break;
+ }
+
+ return (log_page_size);
+}
+
+static void
+nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_async_event_request *aer = arg;
+
+ /*
+ * If the log page fetch for some reason completed with an error,
+ * don't pass log page data to the consumers. In practice, this case
+ * should never happen.
+ */
+ if (nvme_completion_is_error(cpl))
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, NULL, 0);
+ else
+ /*
+ * Pass the cpl data from the original async event completion,
+ * not the log page fetch.
+ */
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
+
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
+}
+
+static void
+nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_async_event_request *aer = arg;
+
+ if (nvme_completion_is_error(cpl)) {
+ /*
+ * Do not retry failed async event requests. This avoids
+ * infinite loops where a new async event request is submitted
+ * to replace the one just failed, only to fail again and
+ * perpetuate the loop.
+ */
+ return;
+ }
+
+ /* Associated log page is in bits 23:16 of completion entry dw0. */
+ aer->log_page_id = (cpl->cdw0 & 0xFF0000) >> 16;
+
+ nvme_printf(aer->ctrlr, "async event occurred (log page id=0x%x)\n",
+ aer->log_page_id);
+
+ if (is_log_page_id_valid(aer->log_page_id)) {
+ aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
+ aer->log_page_id);
+ memcpy(&aer->cpl, cpl, sizeof(*cpl));
+ nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
+ NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
+ aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
+ aer);
+ /* Wait to notify consumers until after log page is fetched. */
+ } else {
+ nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
+ NULL, 0);
+
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+ nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
+ }
+}
+
+static void
+nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+ struct nvme_async_event_request *aer)
+{
+ struct nvme_request *req;
+
+ aer->ctrlr = ctrlr;
+ req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
+ aer->req = req;
+
+ /*
+ * Disable timeout here, since asynchronous event requests should by
+ * nature never be timed out.
+ */
+ req->timeout = FALSE;
+ req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+static void
+nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
+{
+ union nvme_critical_warning_state state;
+ struct nvme_async_event_request *aer;
+ uint32_t i;
+
+ state.raw = 0xFF;
+ state.bits.reserved = 0;
+ nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL);
+
+ /* aerl is a zero-based value, so we need to add 1 here. */
+ ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
+
+ /* Chatham doesn't support AERs. */
+ if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+ ctrlr->num_aers = 0;
+
+ for (i = 0; i < ctrlr->num_aers; i++) {
+ aer = &ctrlr->aer[i];
+ nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
+ }
+}
+
+static void
+nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->int_coal_time = 0;
+ TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
+ &ctrlr->int_coal_time);
+
+ ctrlr->int_coal_threshold = 0;
+ TUNABLE_INT_FETCH("hw.nvme.int_coal_threshold",
+ &ctrlr->int_coal_threshold);
+
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr, ctrlr->int_coal_time,
+ ctrlr->int_coal_threshold, NULL, NULL);
+}
+
+static void
+nvme_ctrlr_start(void *ctrlr_arg)
+{
+ struct nvme_controller *ctrlr = ctrlr_arg;
+ int i;
+
+ nvme_qpair_reset(&ctrlr->adminq);
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_reset(&ctrlr->ioq[i]);
+
+ nvme_admin_qpair_enable(&ctrlr->adminq);
+
+ if (nvme_ctrlr_identify(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
+ nvme_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ nvme_ctrlr_configure_aer(ctrlr);
+ nvme_ctrlr_configure_int_coalescing(ctrlr);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_io_qpair_enable(&ctrlr->ioq[i]);
+
+ /*
+ * Clear software progress marker to 0, to indicate to pre-boot
+ * software that OS driver load was successful.
+ *
+ * Chatham does not support this feature.
+ */
+ if (pci_get_devid(ctrlr->dev) != CHATHAM_PCI_ID)
+ nvme_ctrlr_cmd_set_feature(ctrlr,
+ NVME_FEAT_SOFTWARE_PROGRESS_MARKER, 0, NULL, 0, NULL, NULL);
+}
+
+void
+nvme_ctrlr_start_config_hook(void *arg)
+{
+ struct nvme_controller *ctrlr = arg;
+
+ nvme_ctrlr_start(ctrlr);
+ config_intrhook_disestablish(&ctrlr->config_hook);
+}
+
+static void
+nvme_ctrlr_reset_task(void *arg, int pending)
+{
+ struct nvme_controller *ctrlr = arg;
+ int status;
+
+ nvme_printf(ctrlr, "resetting controller\n");
+ status = nvme_ctrlr_hw_reset(ctrlr);
+ /*
+ * Use pause instead of DELAY, so that we yield to any nvme interrupt
+ * handlers on this CPU that were blocked on a qpair lock. We want
+ * all nvme interrupts completed before proceeding with restarting the
+ * controller.
+ *
+ * XXX - any way to guarantee the interrupt handlers have quiesced?
+ */
+ pause("nvmereset", hz / 10);
+ if (status == 0)
+ nvme_ctrlr_start(ctrlr);
+ else
+ nvme_ctrlr_fail(ctrlr);
+
+ atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
+}
+
+static void
+nvme_ctrlr_intx_handler(void *arg)
+{
+ struct nvme_controller *ctrlr = arg;
+
+ nvme_mmio_write_4(ctrlr, intms, 1);
+
+ nvme_qpair_process_completions(&ctrlr->adminq);
+
+ if (ctrlr->ioq[0].cpl)
+ nvme_qpair_process_completions(&ctrlr->ioq[0]);
+
+ nvme_mmio_write_4(ctrlr, intmc, 1);
+}
+
+static int
+nvme_ctrlr_configure_intx(struct nvme_controller *ctrlr)
+{
+
+ ctrlr->num_io_queues = 1;
+ ctrlr->per_cpu_io_queues = 0;
+ ctrlr->rid = 0;
+ ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
+
+ if (ctrlr->res == NULL) {
+ nvme_printf(ctrlr, "unable to allocate shared IRQ\n");
+ return (ENOMEM);
+ }
+
+ bus_setup_intr(ctrlr->dev, ctrlr->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, nvme_ctrlr_intx_handler,
+ ctrlr, &ctrlr->tag);
+
+ if (ctrlr->tag == NULL) {
+ nvme_printf(ctrlr, "unable to setup intx handler\n");
+ return (ENOMEM);
+ }
+
+ return (0);
+}
+
+static void
+nvme_pt_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_pt_command *pt = arg;
+
+ bzero(&pt->cpl, sizeof(pt->cpl));
+ pt->cpl.cdw0 = cpl->cdw0;
+ pt->cpl.status = cpl->status;
+ pt->cpl.status.p = 0;
+
+ mtx_lock(pt->driver_lock);
+ wakeup(pt);
+ mtx_unlock(pt->driver_lock);
+}
+
+int
+nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ int is_admin_cmd)
+{
+ struct nvme_request *req;
+ struct mtx *mtx;
+ struct buf *buf = NULL;
+ int ret = 0;
+
+ if (pt->len > 0) {
+ if (pt->len > ctrlr->max_xfer_size) {
+ nvme_printf(ctrlr, "pt->len (%d) "
+ "exceeds max_xfer_size (%d)\n", pt->len,
+ ctrlr->max_xfer_size);
+ return EIO;
+ }
+ if (is_user_buffer) {
+ /*
+ * Ensure the user buffer is wired for the duration of
+ * this passthrough command.
+ */
+ PHOLD(curproc);
+ buf = getpbuf(NULL);
+ buf->b_saveaddr = buf->b_data;
+ buf->b_data = pt->buf;
+ buf->b_bufsize = pt->len;
+ buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
+#ifdef NVME_UNMAPPED_BIO_SUPPORT
+ if (vmapbuf(buf, 1) < 0) {
+#else
+ if (vmapbuf(buf) < 0) {
+#endif
+ ret = EFAULT;
+ goto err;
+ }
+ req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
+ nvme_pt_done, pt);
+ } else
+ req = nvme_allocate_request_vaddr(pt->buf, pt->len,
+ nvme_pt_done, pt);
+ } else
+ req = nvme_allocate_request_null(nvme_pt_done, pt);
+
+ req->cmd.opc = pt->cmd.opc;
+ req->cmd.cdw10 = pt->cmd.cdw10;
+ req->cmd.cdw11 = pt->cmd.cdw11;
+ req->cmd.cdw12 = pt->cmd.cdw12;
+ req->cmd.cdw13 = pt->cmd.cdw13;
+ req->cmd.cdw14 = pt->cmd.cdw14;
+ req->cmd.cdw15 = pt->cmd.cdw15;
+
+ req->cmd.nsid = nsid;
+
+ if (is_admin_cmd)
+ mtx = &ctrlr->lock;
+ else
+ mtx = &ctrlr->ns[nsid-1].lock;
+
+ mtx_lock(mtx);
+ pt->driver_lock = mtx;
+
+ if (is_admin_cmd)
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+ else
+ nvme_ctrlr_submit_io_request(ctrlr, req);
+
+ mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
+ mtx_unlock(mtx);
+
+ pt->driver_lock = NULL;
+
+err:
+ if (buf != NULL) {
+ relpbuf(buf, NULL);
+ PRELE(curproc);
+ }
+
+ return (ret);
+}
+
+static int
+nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+ struct nvme_controller *ctrlr;
+ struct nvme_pt_command *pt;
+
+ ctrlr = cdev->si_drv1;
+
+ switch (cmd) {
+ case NVME_RESET_CONTROLLER:
+ nvme_ctrlr_reset(ctrlr);
+ break;
+ case NVME_PASSTHROUGH_CMD:
+ pt = (struct nvme_pt_command *)arg;
+ return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, pt->cmd.nsid,
+ 1 /* is_user_buffer */, 1 /* is_admin_cmd */));
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+static struct cdevsw nvme_ctrlr_cdevsw = {
+ .d_version = D_VERSION,
+ .d_flags = 0,
+ .d_ioctl = nvme_ctrlr_ioctl
+};
+
+int
+nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
+{
+ union cap_lo_register cap_lo;
+ union cap_hi_register cap_hi;
+ int num_vectors, per_cpu_io_queues, status = 0;
+ int timeout_period;
+
+ ctrlr->dev = dev;
+
+ mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF);
+
+ status = nvme_ctrlr_allocate_bar(ctrlr);
+
+ if (status != 0)
+ return (status);
+
+#ifdef CHATHAM2
+ if (pci_get_devid(dev) == CHATHAM_PCI_ID) {
+ status = nvme_ctrlr_allocate_chatham_bar(ctrlr);
+ if (status != 0)
+ return (status);
+ nvme_ctrlr_setup_chatham(ctrlr);
+ }
+#endif
+
+ /*
+ * Software emulators may set the doorbell stride to something
+ * other than zero, but this driver is not set up to handle that.
+ */
+ cap_hi.raw = nvme_mmio_read_4(ctrlr, cap_hi);
+ if (cap_hi.bits.dstrd != 0)
+ return (ENXIO);
+
+ ctrlr->min_page_size = 1 << (12 + cap_hi.bits.mpsmin);
+
+ /* Get ready timeout value from controller, in units of 500ms. */
+ cap_lo.raw = nvme_mmio_read_4(ctrlr, cap_lo);
+ ctrlr->ready_timeout_in_ms = cap_lo.bits.to * 500;
+
+ timeout_period = NVME_DEFAULT_TIMEOUT_PERIOD;
+ TUNABLE_INT_FETCH("hw.nvme.timeout_period", &timeout_period);
+ timeout_period = min(timeout_period, NVME_MAX_TIMEOUT_PERIOD);
+ timeout_period = max(timeout_period, NVME_MIN_TIMEOUT_PERIOD);
+ ctrlr->timeout_period = timeout_period;
+
+ nvme_retry_count = NVME_DEFAULT_RETRY_COUNT;
+ TUNABLE_INT_FETCH("hw.nvme.retry_count", &nvme_retry_count);
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.nvme.per_cpu_io_queues", &per_cpu_io_queues);
+ ctrlr->per_cpu_io_queues = per_cpu_io_queues ? TRUE : FALSE;
+
+ if (ctrlr->per_cpu_io_queues)
+ ctrlr->num_io_queues = mp_ncpus;
+ else
+ ctrlr->num_io_queues = 1;
+
+ ctrlr->force_intx = 0;
+ TUNABLE_INT_FETCH("hw.nvme.force_intx", &ctrlr->force_intx);
+
+ ctrlr->enable_aborts = 0;
+ TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
+
+ ctrlr->msix_enabled = 1;
+
+ if (ctrlr->force_intx) {
+ ctrlr->msix_enabled = 0;
+ goto intx;
+ }
+
+ /* One vector per IO queue, plus one vector for admin queue. */
+ num_vectors = ctrlr->num_io_queues + 1;
+
+ if (pci_msix_count(dev) < num_vectors) {
+ ctrlr->msix_enabled = 0;
+ goto intx;
+ }
+
+ if (pci_alloc_msix(dev, &num_vectors) != 0)
+ ctrlr->msix_enabled = 0;
+
+intx:
+
+ if (!ctrlr->msix_enabled)
+ nvme_ctrlr_configure_intx(ctrlr);
+
+ ctrlr->max_xfer_size = NVME_MAX_XFER_SIZE;
+ nvme_ctrlr_construct_admin_qpair(ctrlr);
+ status = nvme_ctrlr_construct_io_qpairs(ctrlr);
+
+ if (status != 0)
+ return (status);
+
+ ctrlr->cdev = make_dev(&nvme_ctrlr_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
+ "nvme%d", device_get_unit(dev));
+
+ if (ctrlr->cdev == NULL)
+ return (ENXIO);
+
+ ctrlr->cdev->si_drv1 = (void *)ctrlr;
+
+ ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "nvme taskq");
+
+ ctrlr->is_resetting = 0;
+ TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
+
+ TASK_INIT(&ctrlr->fail_req_task, 0, nvme_ctrlr_fail_req_task, ctrlr);
+ STAILQ_INIT(&ctrlr->fail_req);
+ ctrlr->is_failed = FALSE;
+
+ return (0);
+}
+
+void
+nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
+{
+ int i;
+
+ nvme_ctrlr_disable(ctrlr);
+ taskqueue_free(ctrlr->taskqueue);
+
+ for (i = 0; i < NVME_MAX_NAMESPACES; i++)
+ nvme_ns_destruct(&ctrlr->ns[i]);
+
+ if (ctrlr->cdev)
+ destroy_dev(ctrlr->cdev);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ nvme_io_qpair_destroy(&ctrlr->ioq[i]);
+ }
+
+ free(ctrlr->ioq, M_NVME);
+
+ nvme_admin_qpair_destroy(&ctrlr->adminq);
+
+ if (ctrlr->resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->resource_id, ctrlr->resource);
+ }
+
+ if (ctrlr->bar4_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->bar4_resource_id, ctrlr->bar4_resource);
+ }
+
+#ifdef CHATHAM2
+ if (ctrlr->chatham_resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ ctrlr->chatham_resource_id, ctrlr->chatham_resource);
+ }
+#endif
+
+ if (ctrlr->tag)
+ bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(ctrlr->res), ctrlr->res);
+
+ if (ctrlr->msix_enabled)
+ pci_release_msi(dev);
+}
+
+void
+nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+
+ nvme_qpair_submit_request(&ctrlr->adminq, req);
+}
+
+void
+nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req)
+{
+ struct nvme_qpair *qpair;
+
+ if (ctrlr->per_cpu_io_queues)
+ qpair = &ctrlr->ioq[curcpu];
+ else
+ qpair = &ctrlr->ioq[0];
+
+ nvme_qpair_submit_request(qpair, req);
+}
+
+device_t
+nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
+{
+
+ return (ctrlr->dev);
+}
+
+const struct nvme_controller_data *
+nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
+{
+
+ return (&ctrlr->cdata);
+}
Property changes on: trunk/sys/dev/nvme/nvme_ctrlr.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_ctrlr_cmd.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ctrlr_cmd.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_ctrlr_cmd.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,326 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_ctrlr_cmd.c 253296 2013-07-12 22:07:33Z jimharris $");
+
+#include "nvme_private.h"
+
+void
+nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ sizeof(struct nvme_controller_data), cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_IDENTIFY;
+
+ /*
+ * TODO: create an identify command data structure, which
+ * includes this CNS bit in cdw10.
+ */
+ cmd->cdw10 = 1;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint16_t nsid,
+ void *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_IDENTIFY;
+
+ /*
+ * TODO: create an identify command data structure
+ */
+ cmd->nsid = nsid;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, uint16_t vector, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_CREATE_IO_CQ;
+
+ /*
+ * TODO: create a create io completion queue command data
+ * structure.
+ */
+ cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
+ /* 0x3 = interrupts enabled | physically contiguous */
+ cmd->cdw11 = (vector << 16) | 0x3;
+ cmd->prp1 = io_que->cpl_bus_addr;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_CREATE_IO_SQ;
+
+ /*
+ * TODO: create a create io submission queue command data
+ * structure.
+ */
+ cmd->cdw10 = ((io_que->num_entries-1) << 16) | io_que->id;
+ /* 0x1 = physically contiguous */
+ cmd->cdw11 = (io_que->id << 16) | 0x1;
+ cmd->prp1 = io_que->cmd_bus_addr;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DELETE_IO_CQ;
+
+ /*
+ * TODO: create a delete io completion queue command data
+ * structure.
+ */
+ cmd->cdw10 = io_que->id;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DELETE_IO_SQ;
+
+ /*
+ * TODO: create a delete io submission queue command data
+ * structure.
+ */
+ cmd->cdw10 = io_que->id;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_SET_FEATURES;
+ cmd->cdw10 = feature;
+ cmd->cdw11 = cdw11;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_GET_FEATURES;
+ cmd->cdw10 = feature;
+ cmd->cdw11 = cdw11;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
+ uint32_t num_queues, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ uint32_t cdw11;
+
+ cdw11 = ((num_queues - 1) << 16) || (num_queues - 1);
+ nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_NUMBER_OF_QUEUES, cdw11,
+ NULL, 0, cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
+ union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ uint32_t cdw11;
+
+ cdw11 = state.raw;
+ nvme_ctrlr_cmd_set_feature(ctrlr,
+ NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
+ cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
+ uint32_t microseconds, uint32_t threshold, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ uint32_t cdw11;
+
+ if ((microseconds/100) >= 0x100) {
+ nvme_printf(ctrlr, "invalid coal time %d, disabling\n",
+ microseconds);
+ microseconds = 0;
+ threshold = 0;
+ }
+
+ if (threshold >= 0x100) {
+ nvme_printf(ctrlr, "invalid threshold %d, disabling\n",
+ threshold);
+ threshold = 0;
+ microseconds = 0;
+ }
+
+ cdw11 = ((microseconds/100) << 8) | threshold;
+ nvme_ctrlr_cmd_set_feature(ctrlr, NVME_FEAT_INTERRUPT_COALESCING, cdw11,
+ NULL, 0, cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_GET_LOG_PAGE;
+ cmd->nsid = nsid;
+ cmd->cdw10 = ((payload_size/sizeof(uint32_t)) - 1) << 16;
+ cmd->cdw10 |= log_page;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
+ struct nvme_error_information_entry *payload, uint32_t num_entries,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
+
+ /* Controller's error log page entries is 0-based. */
+ KASSERT(num_entries <= (ctrlr->cdata.elpe + 1),
+ ("%s called with num_entries=%d but (elpe+1)=%d\n", __func__,
+ num_entries, ctrlr->cdata.elpe + 1));
+
+ if (num_entries > (ctrlr->cdata.elpe + 1))
+ num_entries = ctrlr->cdata.elpe + 1;
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_ERROR,
+ NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload) * num_entries,
+ cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
+ uint32_t nsid, struct nvme_health_information_page *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
+ nsid, payload, sizeof(*payload), cb_fn, cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
+ struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+
+ nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
+ NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
+ cb_arg);
+}
+
+void
+nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
+ uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_ABORT;
+ cmd->cdw10 = (cid << 16) | sqid;
+
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
Property changes on: trunk/sys/dev/nvme/nvme_ctrlr_cmd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_ns.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ns.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_ns.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,364 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_ns.c 253630 2013-07-24 22:46:27Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/disk.h>
+#include <sys/fcntl.h>
+#include <sys/ioccom.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+static int
+nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
+ struct thread *td)
+{
+ struct nvme_namespace *ns;
+ struct nvme_controller *ctrlr;
+ struct nvme_pt_command *pt;
+
+ ns = cdev->si_drv1;
+ ctrlr = ns->ctrlr;
+
+ switch (cmd) {
+ case NVME_IO_TEST:
+ case NVME_BIO_TEST:
+ nvme_ns_test(ns, cmd, arg);
+ break;
+ case NVME_PASSTHROUGH_CMD:
+ pt = (struct nvme_pt_command *)arg;
+ return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id,
+ 1 /* is_user_buffer */, 0 /* is_admin_cmd */));
+ case DIOCGMEDIASIZE:
+ *(off_t *)arg = (off_t)nvme_ns_get_size(ns);
+ break;
+ case DIOCGSECTORSIZE:
+ *(u_int *)arg = nvme_ns_get_sector_size(ns);
+ break;
+ default:
+ return (ENOTTY);
+ }
+
+ return (0);
+}
+
+static int
+nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused,
+ struct thread *td)
+{
+ int error = 0;
+
+ if (flags & FWRITE)
+ error = securelevel_gt(td->td_ucred, 0);
+
+ return (error);
+}
+
+static int
+nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
+ struct thread *td)
+{
+
+ return (0);
+}
+
+static void
+nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct bio *bp = arg;
+
+ /*
+ * TODO: add more extensive translation of NVMe status codes
+ * to different bio error codes (i.e. EIO, EINVAL, etc.)
+ */
+ if (nvme_completion_is_error(cpl)) {
+ bp->bio_error = EIO;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ } else
+ bp->bio_resid = 0;
+
+ biodone(bp);
+}
+
+static void
+nvme_ns_strategy(struct bio *bp)
+{
+ struct nvme_namespace *ns;
+ int err;
+
+ ns = bp->bio_dev->si_drv1;
+ err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done);
+
+ if (err) {
+ bp->bio_error = err;
+ bp->bio_flags |= BIO_ERROR;
+ bp->bio_resid = bp->bio_bcount;
+ biodone(bp);
+ }
+
+}
+
+static struct cdevsw nvme_ns_cdevsw = {
+ .d_version = D_VERSION,
+#ifdef NVME_UNMAPPED_BIO_SUPPORT
+ .d_flags = D_DISK | D_UNMAPPED_IO,
+#else
+ .d_flags = D_DISK,
+#endif
+ .d_read = physread,
+ .d_write = physwrite,
+ .d_open = nvme_ns_open,
+ .d_close = nvme_ns_close,
+ .d_strategy = nvme_ns_strategy,
+ .d_ioctl = nvme_ns_ioctl
+};
+
+uint32_t
+nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+uint32_t
+nvme_ns_get_sector_size(struct nvme_namespace *ns)
+{
+ return (1 << ns->data.lbaf[ns->data.flbas.format].lbads);
+}
+
+uint64_t
+nvme_ns_get_num_sectors(struct nvme_namespace *ns)
+{
+ return (ns->data.nsze);
+}
+
+uint64_t
+nvme_ns_get_size(struct nvme_namespace *ns)
+{
+ return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns));
+}
+
+uint32_t
+nvme_ns_get_flags(struct nvme_namespace *ns)
+{
+ return (ns->flags);
+}
+
+const char *
+nvme_ns_get_serial_number(struct nvme_namespace *ns)
+{
+ return ((const char *)ns->ctrlr->cdata.sn);
+}
+
+const char *
+nvme_ns_get_model_number(struct nvme_namespace *ns)
+{
+ return ((const char *)ns->ctrlr->cdata.mn);
+}
+
+const struct nvme_namespace_data *
+nvme_ns_get_data(struct nvme_namespace *ns)
+{
+
+ return (&ns->data);
+}
+
+static void
+nvme_ns_bio_done(void *arg, const struct nvme_completion *status)
+{
+ struct bio *bp = arg;
+ nvme_cb_fn_t bp_cb_fn;
+
+ bp_cb_fn = bp->bio_driver1;
+
+ if (bp->bio_driver2)
+ free(bp->bio_driver2, M_NVME);
+
+ bp_cb_fn(bp, status);
+}
+
+int
+nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn)
+{
+ struct nvme_dsm_range *dsm_range;
+ int err;
+
+ bp->bio_driver1 = cb_fn;
+
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp);
+ break;
+ case BIO_WRITE:
+ err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp);
+ break;
+ case BIO_FLUSH:
+ err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp);
+ break;
+ case BIO_DELETE:
+ dsm_range =
+ malloc(sizeof(struct nvme_dsm_range), M_NVME,
+ M_ZERO | M_WAITOK);
+ dsm_range->length =
+ bp->bio_bcount/nvme_ns_get_sector_size(ns);
+ dsm_range->starting_lba =
+ bp->bio_offset/nvme_ns_get_sector_size(ns);
+ bp->bio_driver2 = dsm_range;
+ err = nvme_ns_cmd_deallocate(ns, dsm_range, 1,
+ nvme_ns_bio_done, bp);
+ if (err != 0)
+ free(dsm_range, M_NVME);
+ break;
+ default:
+ err = EIO;
+ break;
+ }
+
+ return (err);
+}
+
+#ifdef CHATHAM2
+static void
+nvme_ns_populate_chatham_data(struct nvme_namespace *ns)
+{
+ struct nvme_controller *ctrlr;
+ struct nvme_namespace_data *nsdata;
+
+ ctrlr = ns->ctrlr;
+ nsdata = &ns->data;
+
+ nsdata->nsze = ctrlr->chatham_lbas;
+ nsdata->ncap = ctrlr->chatham_lbas;
+ nsdata->nuse = ctrlr->chatham_lbas;
+
+ /* Chatham2 doesn't support thin provisioning. */
+ nsdata->nsfeat.thin_prov = 0;
+
+ /* Set LBA size to 512 bytes. */
+ nsdata->lbaf[0].lbads = 9;
+}
+#endif /* CHATHAM2 */
+
+int
+nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
+ struct nvme_controller *ctrlr)
+{
+ struct nvme_completion_poll_status status;
+
+ ns->ctrlr = ctrlr;
+ ns->id = id;
+
+ /*
+ * Namespaces are reconstructed after a controller reset, so check
+ * to make sure we only call mtx_init once on each mtx.
+ *
+ * TODO: Move this somewhere where it gets called at controller
+ * construction time, which is not invoked as part of each
+ * controller reset.
+ */
+ if (!mtx_initialized(&ns->lock))
+ mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF);
+
+#ifdef CHATHAM2
+ if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+ nvme_ns_populate_chatham_data(ns);
+ else {
+#endif
+ status.done = FALSE;
+ nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data,
+ nvme_completion_poll_cb, &status);
+ while (status.done == FALSE)
+ DELAY(5);
+ if (nvme_completion_is_error(&status.cpl)) {
+ nvme_printf(ctrlr, "nvme_identify_namespace failed\n");
+ return (ENXIO);
+ }
+#ifdef CHATHAM2
+ }
+#endif
+
+ /*
+ * Note: format is a 0-based value, so > is appropriate here,
+ * not >=.
+ */
+ if (ns->data.flbas.format > ns->data.nlbaf) {
+ printf("lba format %d exceeds number supported (%d)\n",
+ ns->data.flbas.format, ns->data.nlbaf+1);
+ return (1);
+ }
+
+ if (ctrlr->cdata.oncs.dsm)
+ ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED;
+
+ if (ctrlr->cdata.vwc.present)
+ ns->flags |= NVME_NS_FLUSH_SUPPORTED;
+
+ /*
+ * cdev may have already been created, if we are reconstructing the
+ * namespace after a controller-level reset.
+ */
+ if (ns->cdev != NULL)
+ return (0);
+
+/*
+ * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never
+ * be destroyed. This avoids refcounting on the cdev object.
+ * That should be OK case here, as long as we're not supporting PCIe
+ * surprise removal nor namespace deletion.
+ */
+#ifdef MAKEDEV_ETERNAL_KLD
+ ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, 0,
+ NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
+ device_get_unit(ctrlr->dev), ns->id);
+#else
+ ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, 0,
+ NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d",
+ device_get_unit(ctrlr->dev), ns->id);
+#endif
+
+ if (ns->cdev != NULL)
+ ns->cdev->si_drv1 = ns;
+
+ return (0);
+}
+
+void nvme_ns_destruct(struct nvme_namespace *ns)
+{
+
+ if (ns->cdev != NULL)
+ destroy_dev(ns->cdev);
+}
Property changes on: trunk/sys/dev/nvme/nvme_ns.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_ns_cmd.c
===================================================================
--- trunk/sys/dev/nvme/nvme_ns_cmd.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_ns_cmd.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,186 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_ns_cmd.c 253630 2013-07-24 22:46:27Z jimharris $");
+
+#include "nvme_private.h"
+
+int
+nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
+ uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_READ;
+ cmd->nsid = ns->id;
+
+ /* TODO: create a read command data structure */
+ *(uint64_t *)&cmd->cdw10 = lba;
+ cmd->cdw12 = lba_count-1;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+ uint64_t lba;
+ uint64_t lba_count;
+
+ req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_READ;
+ cmd->nsid = ns->id;
+
+ lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
+ lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
+
+ /* TODO: create a read command data structure */
+ *(uint64_t *)&cmd->cdw10 = lba;
+ cmd->cdw12 = lba_count-1;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
+ uint32_t lba_count, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_WRITE;
+ cmd->nsid = ns->id;
+
+ /* TODO: create a write command data structure */
+ *(uint64_t *)&cmd->cdw10 = lba;
+ cmd->cdw12 = lba_count-1;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+ uint64_t lba;
+ uint64_t lba_count;
+
+ req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_WRITE;
+ cmd->nsid = ns->id;
+
+ lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
+ lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
+
+ /* TODO: create a write command data structure */
+ *(uint64_t *)&cmd->cdw10 = lba;
+ cmd->cdw12 = lba_count-1;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
+ uint8_t num_ranges, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_vaddr(payload,
+ num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_DATASET_MANAGEMENT;
+ cmd->nsid = ns->id;
+
+ /* TODO: create a delete command data structure */
+ cmd->cdw10 = num_ranges - 1;
+ cmd->cdw11 = NVME_DSM_ATTR_DEALLOCATE;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
+
+int
+nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+ struct nvme_command *cmd;
+
+ req = nvme_allocate_request_null(cb_fn, cb_arg);
+
+ if (req == NULL)
+ return (ENOMEM);
+
+ cmd = &req->cmd;
+ cmd->opc = NVME_OPC_FLUSH;
+ cmd->nsid = ns->id;
+
+ nvme_ctrlr_submit_io_request(ns->ctrlr, req);
+
+ return (0);
+}
Property changes on: trunk/sys/dev/nvme/nvme_ns_cmd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_private.h
===================================================================
--- trunk/sys/dev/nvme/nvme_private.h (rev 0)
+++ trunk/sys/dev/nvme/nvme_private.h 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,556 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: release/9.2.0/sys/dev/nvme/nvme_private.h 253297 2013-07-12 22:08:24Z jimharris $
+ */
+
+#ifndef __NVME_PRIVATE_H__
+#define __NVME_PRIVATE_H__
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+
+#include <vm/uma.h>
+
+#include <machine/bus.h>
+
+#include "nvme.h"
+
+#define DEVICE2SOFTC(dev) ((struct nvme_controller *) device_get_softc(dev))
+
+MALLOC_DECLARE(M_NVME);
+
+#define CHATHAM2
+
+#ifdef CHATHAM2
+#define CHATHAM_PCI_ID 0x20118086
+#define CHATHAM_CONTROL_BAR 0
+#endif
+
+#define IDT32_PCI_ID 0x80d0111d /* 32 channel board */
+#define IDT8_PCI_ID 0x80d2111d /* 8 channel board */
+
+/*
+ * For commands requiring more than 2 PRP entries, one PRP will be
+ * embedded in the command (prp1), and the rest of the PRP entries
+ * will be in a list pointed to by the command (prp2). This means
+ * that real max number of PRP entries we support is 32+1, which
+ * results in a max xfer size of 32*PAGE_SIZE.
+ */
+#define NVME_MAX_PRP_LIST_ENTRIES (NVME_MAX_XFER_SIZE / PAGE_SIZE)
+
+#define NVME_ADMIN_TRACKERS (16)
+#define NVME_ADMIN_ENTRIES (128)
+/* min and max are defined in admin queue attributes section of spec */
+#define NVME_MIN_ADMIN_ENTRIES (2)
+#define NVME_MAX_ADMIN_ENTRIES (4096)
+
+/*
+ * NVME_IO_ENTRIES defines the size of an I/O qpair's submission and completion
+ * queues, while NVME_IO_TRACKERS defines the maximum number of I/O that we
+ * will allow outstanding on an I/O qpair at any time. The only advantage in
+ * having IO_ENTRIES > IO_TRACKERS is for debugging purposes - when dumping
+ * the contents of the submission and completion queues, it will show a longer
+ * history of data.
+ */
+#define NVME_IO_ENTRIES (256)
+#define NVME_IO_TRACKERS (128)
+#define NVME_MIN_IO_TRACKERS (4)
+#define NVME_MAX_IO_TRACKERS (1024)
+
+/*
+ * NVME_MAX_IO_ENTRIES is not defined, since it is specified in CC.MQES
+ * for each controller.
+ */
+
+#define NVME_INT_COAL_TIME (0) /* disabled */
+#define NVME_INT_COAL_THRESHOLD (0) /* 0-based */
+
+#define NVME_MAX_NAMESPACES (16)
+#define NVME_MAX_CONSUMERS (2)
+#define NVME_MAX_ASYNC_EVENTS (8)
+
+#define NVME_DEFAULT_TIMEOUT_PERIOD (30) /* in seconds */
+#define NVME_MIN_TIMEOUT_PERIOD (5)
+#define NVME_MAX_TIMEOUT_PERIOD (120)
+
+#define NVME_DEFAULT_RETRY_COUNT (4)
+
+/* Maximum log page size to fetch for AERs. */
+#define NVME_MAX_AER_LOG_SIZE (4096)
+
+/*
+ * Define CACHE_LINE_SIZE here for older FreeBSD versions that do not define
+ * it.
+ */
+#ifndef CACHE_LINE_SIZE
+#define CACHE_LINE_SIZE (64)
+#endif
+
+/*
+ * Use presence of the BIO_UNMAPPED flag to determine whether unmapped I/O
+ * support and the bus_dmamap_load_bio API are available on the target
+ * kernel. This will ease porting back to earlier stable branches at a
+ * later point.
+ */
+#ifdef BIO_UNMAPPED
+#define NVME_UNMAPPED_BIO_SUPPORT
+#endif
+
+extern uma_zone_t nvme_request_zone;
+extern int32_t nvme_retry_count;
+
+struct nvme_completion_poll_status {
+
+ struct nvme_completion cpl;
+ boolean_t done;
+};
+
+#define NVME_REQUEST_VADDR 1
+#define NVME_REQUEST_NULL 2 /* For requests with no payload. */
+#define NVME_REQUEST_UIO 3
+#ifdef NVME_UNMAPPED_BIO_SUPPORT
+#define NVME_REQUEST_BIO 4
+#endif
+
+struct nvme_request {
+
+ struct nvme_command cmd;
+ struct nvme_qpair *qpair;
+ union {
+ void *payload;
+ struct bio *bio;
+ } u;
+ uint32_t type;
+ uint32_t payload_size;
+ boolean_t timeout;
+ nvme_cb_fn_t cb_fn;
+ void *cb_arg;
+ int32_t retries;
+ STAILQ_ENTRY(nvme_request) stailq;
+};
+
+struct nvme_async_event_request {
+
+ struct nvme_controller *ctrlr;
+ struct nvme_request *req;
+ struct nvme_completion cpl;
+ uint32_t log_page_id;
+ uint32_t log_page_size;
+ uint8_t log_page_buffer[NVME_MAX_AER_LOG_SIZE];
+};
+
+struct nvme_tracker {
+
+ TAILQ_ENTRY(nvme_tracker) tailq;
+ struct nvme_request *req;
+ struct nvme_qpair *qpair;
+ struct callout timer;
+ bus_dmamap_t payload_dma_map;
+ uint16_t cid;
+
+ uint64_t prp[NVME_MAX_PRP_LIST_ENTRIES];
+ bus_addr_t prp_bus_addr;
+ bus_dmamap_t prp_dma_map;
+};
+
+struct nvme_qpair {
+
+ struct nvme_controller *ctrlr;
+ uint32_t id;
+ uint32_t phase;
+
+ uint16_t vector;
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ uint32_t num_entries;
+ uint32_t num_trackers;
+ uint32_t sq_tdbl_off;
+ uint32_t cq_hdbl_off;
+
+ uint32_t sq_head;
+ uint32_t sq_tail;
+ uint32_t cq_head;
+
+ int64_t num_cmds;
+ int64_t num_intr_handler_calls;
+
+ struct nvme_command *cmd;
+ struct nvme_completion *cpl;
+
+ bus_dma_tag_t dma_tag;
+
+ bus_dmamap_t cmd_dma_map;
+ uint64_t cmd_bus_addr;
+
+ bus_dmamap_t cpl_dma_map;
+ uint64_t cpl_bus_addr;
+
+ TAILQ_HEAD(, nvme_tracker) free_tr;
+ TAILQ_HEAD(, nvme_tracker) outstanding_tr;
+ STAILQ_HEAD(, nvme_request) queued_req;
+
+ struct nvme_tracker **act_tr;
+
+ boolean_t is_enabled;
+
+ struct mtx lock __aligned(CACHE_LINE_SIZE);
+
+} __aligned(CACHE_LINE_SIZE);
+
+struct nvme_namespace {
+
+ struct nvme_controller *ctrlr;
+ struct nvme_namespace_data data;
+ uint16_t id;
+ uint16_t flags;
+ struct cdev *cdev;
+ void *cons_cookie[NVME_MAX_CONSUMERS];
+ struct mtx lock;
+};
+
+/*
+ * One of these per allocated PCI device.
+ */
+struct nvme_controller {
+
+ device_t dev;
+
+ struct mtx lock;
+
+ uint32_t ready_timeout_in_ms;
+
+ bus_space_tag_t bus_tag;
+ bus_space_handle_t bus_handle;
+ int resource_id;
+ struct resource *resource;
+
+ /*
+ * The NVMe spec allows for the MSI-X table to be placed in BAR 4/5,
+ * separate from the control registers which are in BAR 0/1. These
+ * members track the mapping of BAR 4/5 for that reason.
+ */
+ int bar4_resource_id;
+ struct resource *bar4_resource;
+
+#ifdef CHATHAM2
+ bus_space_tag_t chatham_bus_tag;
+ bus_space_handle_t chatham_bus_handle;
+ int chatham_resource_id;
+ struct resource *chatham_resource;
+#endif
+
+ uint32_t msix_enabled;
+ uint32_t force_intx;
+ uint32_t enable_aborts;
+
+ uint32_t num_io_queues;
+ boolean_t per_cpu_io_queues;
+
+ /* Fields for tracking progress during controller initialization. */
+ struct intr_config_hook config_hook;
+ uint32_t ns_identified;
+ uint32_t queues_created;
+
+ struct task reset_task;
+ struct task fail_req_task;
+ struct taskqueue *taskqueue;
+
+ /* For shared legacy interrupt. */
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ bus_dma_tag_t hw_desc_tag;
+ bus_dmamap_t hw_desc_map;
+
+ /** maximum i/o size in bytes */
+ uint32_t max_xfer_size;
+
+ /** minimum page size supported by this controller in bytes */
+ uint32_t min_page_size;
+
+ /** interrupt coalescing time period (in microseconds) */
+ uint32_t int_coal_time;
+
+ /** interrupt coalescing threshold */
+ uint32_t int_coal_threshold;
+
+ /** timeout period in seconds */
+ uint32_t timeout_period;
+
+ struct nvme_qpair adminq;
+ struct nvme_qpair *ioq;
+
+ struct nvme_registers *regs;
+
+ struct nvme_controller_data cdata;
+ struct nvme_namespace ns[NVME_MAX_NAMESPACES];
+
+ struct cdev *cdev;
+
+ uint32_t num_aers;
+ struct nvme_async_event_request aer[NVME_MAX_ASYNC_EVENTS];
+
+ void *cons_cookie[NVME_MAX_CONSUMERS];
+
+ uint32_t is_resetting;
+
+ boolean_t is_failed;
+ STAILQ_HEAD(, nvme_request) fail_req;
+
+#ifdef CHATHAM2
+ uint64_t chatham_size;
+ uint64_t chatham_lbas;
+#endif
+};
+
+#define nvme_mmio_offsetof(reg) \
+ offsetof(struct nvme_registers, reg)
+
+#define nvme_mmio_read_4(sc, reg) \
+ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg))
+
+#define nvme_mmio_write_4(sc, reg, val) \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg), val)
+
+#define nvme_mmio_write_8(sc, reg, val) \
+ do { \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg), val & 0xFFFFFFFF); \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ nvme_mmio_offsetof(reg)+4, \
+ (val & 0xFFFFFFFF00000000UL) >> 32); \
+ } while (0);
+
+#ifdef CHATHAM2
+#define chatham_read_4(softc, reg) \
+ bus_space_read_4((softc)->chatham_bus_tag, \
+ (softc)->chatham_bus_handle, reg)
+
+#define chatham_write_8(sc, reg, val) \
+ do { \
+ bus_space_write_4((sc)->chatham_bus_tag, \
+ (sc)->chatham_bus_handle, reg, val & 0xffffffff); \
+ bus_space_write_4((sc)->chatham_bus_tag, \
+ (sc)->chatham_bus_handle, reg+4, \
+ (val & 0xFFFFFFFF00000000UL) >> 32); \
+ } while (0);
+
+#endif /* CHATHAM2 */
+
+#if __FreeBSD_version < 800054
+#define wmb() __asm volatile("sfence" ::: "memory")
+#define mb() __asm volatile("mfence" ::: "memory")
+#endif
+
+#define nvme_printf(ctrlr, fmt, args...) \
+ device_printf(ctrlr->dev, fmt, ##args)
+
+void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg);
+
+void nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr,
+ void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr,
+ uint16_t nsid, void *payload,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_set_interrupt_coalescing(struct nvme_controller *ctrlr,
+ uint32_t microseconds,
+ uint32_t threshold,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
+ struct nvme_error_information_entry *payload,
+ uint32_t num_entries, /* 0 = max */
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
+ uint32_t nsid,
+ struct nvme_health_information_page *payload,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
+ struct nvme_firmware_page *payload,
+ nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que, uint16_t vector,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
+ struct nvme_qpair *io_que,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
+ uint32_t num_queues, nvme_cb_fn_t cb_fn,
+ void *cb_arg);
+void nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
+ union nvme_critical_warning_state state,
+ nvme_cb_fn_t cb_fn, void *cb_arg);
+void nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
+ uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
+
+void nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl);
+
+int nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev);
+void nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev);
+int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr);
+void nvme_ctrlr_reset(struct nvme_controller *ctrlr);
+/* ctrlr defined as void * to allow use with config_intrhook. */
+void nvme_ctrlr_start_config_hook(void *ctrlr_arg);
+void nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+void nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+void nvme_ctrlr_post_failed_request(struct nvme_controller *ctrlr,
+ struct nvme_request *req);
+
+void nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
+ uint16_t vector, uint32_t num_entries,
+ uint32_t num_trackers,
+ struct nvme_controller *ctrlr);
+void nvme_qpair_submit_tracker(struct nvme_qpair *qpair,
+ struct nvme_tracker *tr);
+void nvme_qpair_process_completions(struct nvme_qpair *qpair);
+void nvme_qpair_submit_request(struct nvme_qpair *qpair,
+ struct nvme_request *req);
+void nvme_qpair_reset(struct nvme_qpair *qpair);
+void nvme_qpair_fail(struct nvme_qpair *qpair);
+void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
+ struct nvme_request *req,
+ uint32_t sct, uint32_t sc,
+ boolean_t print_on_error);
+
+void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
+void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
+void nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
+
+void nvme_io_qpair_enable(struct nvme_qpair *qpair);
+void nvme_io_qpair_disable(struct nvme_qpair *qpair);
+void nvme_io_qpair_destroy(struct nvme_qpair *qpair);
+
+int nvme_ns_construct(struct nvme_namespace *ns, uint16_t id,
+ struct nvme_controller *ctrlr);
+void nvme_ns_destruct(struct nvme_namespace *ns);
+
+void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr);
+
+void nvme_dump_command(struct nvme_command *cmd);
+void nvme_dump_completion(struct nvme_completion *cpl);
+
+static __inline void
+nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ uint64_t *bus_addr = (uint64_t *)arg;
+
+ *bus_addr = seg[0].ds_addr;
+}
+
+static __inline struct nvme_request *
+_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = uma_zalloc(nvme_request_zone, M_NOWAIT | M_ZERO);
+ if (req != NULL) {
+ req->cb_fn = cb_fn;
+ req->cb_arg = cb_arg;
+ req->timeout = TRUE;
+ }
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL) {
+ req->type = NVME_REQUEST_VADDR;
+ req->u.payload = payload;
+ req->payload_size = payload_size;
+ }
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL)
+ req->type = NVME_REQUEST_NULL;
+ return (req);
+}
+
+static __inline struct nvme_request *
+nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct nvme_request *req;
+
+ req = _nvme_allocate_request(cb_fn, cb_arg);
+ if (req != NULL) {
+#ifdef NVME_UNMAPPED_BIO_SUPPORT
+ req->type = NVME_REQUEST_BIO;
+ req->u.bio = bio;
+#else
+ req->type = NVME_REQUEST_VADDR;
+ req->u.payload = bio->bio_data;
+ req->payload_size = bio->bio_bcount;
+#endif
+ }
+ return (req);
+}
+
+#define nvme_free_request(req) uma_zfree(nvme_request_zone, req)
+
+void nvme_notify_async_consumers(struct nvme_controller *ctrlr,
+ const struct nvme_completion *async_cpl,
+ uint32_t log_page_id, void *log_page_buffer,
+ uint32_t log_page_size);
+void nvme_notify_fail_consumers(struct nvme_controller *ctrlr);
+
+#endif /* __NVME_PRIVATE_H__ */
Property changes on: trunk/sys/dev/nvme/nvme_private.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_qpair.c
===================================================================
--- trunk/sys/dev/nvme/nvme_qpair.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_qpair.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,984 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_qpair.c 253296 2013-07-12 22:07:33Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+#include <dev/pci/pcivar.h>
+
+#include "nvme_private.h"
+
+static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
+ struct nvme_request *req);
+
+struct nvme_opcode_string {
+
+ uint16_t opc;
+ const char * str;
+};
+
+static struct nvme_opcode_string admin_opcode[] = {
+ { NVME_OPC_DELETE_IO_SQ, "DELETE IO SQ" },
+ { NVME_OPC_CREATE_IO_SQ, "CREATE IO SQ" },
+ { NVME_OPC_GET_LOG_PAGE, "GET LOG PAGE" },
+ { NVME_OPC_DELETE_IO_CQ, "DELETE IO CQ" },
+ { NVME_OPC_CREATE_IO_CQ, "CREATE IO CQ" },
+ { NVME_OPC_IDENTIFY, "IDENTIFY" },
+ { NVME_OPC_ABORT, "ABORT" },
+ { NVME_OPC_SET_FEATURES, "SET FEATURES" },
+ { NVME_OPC_GET_FEATURES, "GET FEATURES" },
+ { NVME_OPC_ASYNC_EVENT_REQUEST, "ASYNC EVENT REQUEST" },
+ { NVME_OPC_FIRMWARE_ACTIVATE, "FIRMWARE ACTIVATE" },
+ { NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD, "FIRMWARE IMAGE DOWNLOAD" },
+ { NVME_OPC_FORMAT_NVM, "FORMAT NVM" },
+ { NVME_OPC_SECURITY_SEND, "SECURITY SEND" },
+ { NVME_OPC_SECURITY_RECEIVE, "SECURITY RECEIVE" },
+ { 0xFFFF, "ADMIN COMMAND" }
+};
+
+static struct nvme_opcode_string io_opcode[] = {
+ { NVME_OPC_FLUSH, "FLUSH" },
+ { NVME_OPC_WRITE, "WRITE" },
+ { NVME_OPC_READ, "READ" },
+ { NVME_OPC_WRITE_UNCORRECTABLE, "WRITE UNCORRECTABLE" },
+ { NVME_OPC_COMPARE, "COMPARE" },
+ { NVME_OPC_DATASET_MANAGEMENT, "DATASET MANAGEMENT" },
+ { 0xFFFF, "IO COMMAND" }
+};
+
+static const char *
+get_admin_opcode_string(uint16_t opc)
+{
+ struct nvme_opcode_string *entry;
+
+ entry = admin_opcode;
+
+ while (entry->opc != 0xFFFF) {
+ if (entry->opc == opc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+static const char *
+get_io_opcode_string(uint16_t opc)
+{
+ struct nvme_opcode_string *entry;
+
+ entry = io_opcode;
+
+ while (entry->opc != 0xFFFF) {
+ if (entry->opc == opc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+
+static void
+nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
+ struct nvme_command *cmd)
+{
+
+ nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
+ "cdw10:%08x cdw11:%08x\n",
+ get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
+ cmd->nsid, cmd->cdw10, cmd->cdw11);
+}
+
+static void
+nvme_io_qpair_print_command(struct nvme_qpair *qpair,
+ struct nvme_command *cmd)
+{
+
+ switch (cmd->opc) {
+ case NVME_OPC_WRITE:
+ case NVME_OPC_READ:
+ case NVME_OPC_WRITE_UNCORRECTABLE:
+ case NVME_OPC_COMPARE:
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
+ "lba:%llu len:%d\n",
+ get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
+ cmd->nsid,
+ ((unsigned long long)cmd->cdw11 << 32) + cmd->cdw10,
+ (cmd->cdw12 & 0xFFFF) + 1);
+ break;
+ case NVME_OPC_FLUSH:
+ case NVME_OPC_DATASET_MANAGEMENT:
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
+ get_io_opcode_string(cmd->opc), qpair->id, cmd->cid,
+ cmd->nsid);
+ break;
+ default:
+ nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
+ get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
+ cmd->cid, cmd->nsid);
+ break;
+ }
+}
+
+static void
+nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
+{
+ if (qpair->id == 0)
+ nvme_admin_qpair_print_command(qpair, cmd);
+ else
+ nvme_io_qpair_print_command(qpair, cmd);
+}
+
+struct nvme_status_string {
+
+ uint16_t sc;
+ const char * str;
+};
+
+static struct nvme_status_string generic_status[] = {
+ { NVME_SC_SUCCESS, "SUCCESS" },
+ { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
+ { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
+ { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
+ { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
+ { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
+ { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
+ { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
+ { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
+ { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
+ { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
+ { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
+ { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
+ { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
+ { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
+ { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
+ { 0xFFFF, "GENERIC" }
+};
+
+static struct nvme_status_string command_specific_status[] = {
+ { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
+ { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
+ { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
+ { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
+ { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
+ { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
+ { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
+ { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
+ { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
+ { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
+ { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
+ { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
+ { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
+ { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
+ { 0xFFFF, "COMMAND SPECIFIC" }
+};
+
+static struct nvme_status_string media_error_status[] = {
+ { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
+ { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
+ { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
+ { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
+ { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
+ { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
+ { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
+ { 0xFFFF, "MEDIA ERROR" }
+};
+
+static const char *
+get_status_string(uint16_t sct, uint16_t sc)
+{
+ struct nvme_status_string *entry;
+
+ switch (sct) {
+ case NVME_SCT_GENERIC:
+ entry = generic_status;
+ break;
+ case NVME_SCT_COMMAND_SPECIFIC:
+ entry = command_specific_status;
+ break;
+ case NVME_SCT_MEDIA_ERROR:
+ entry = media_error_status;
+ break;
+ case NVME_SCT_VENDOR_SPECIFIC:
+ return ("VENDOR SPECIFIC");
+ default:
+ return ("RESERVED");
+ }
+
+ while (entry->sc != 0xFFFF) {
+ if (entry->sc == sc)
+ return (entry->str);
+ entry++;
+ }
+ return (entry->str);
+}
+
+static void
+nvme_qpair_print_completion(struct nvme_qpair *qpair,
+ struct nvme_completion *cpl)
+{
+ nvme_printf(qpair->ctrlr, "%s (%02x/%02x) sqid:%d cid:%d cdw0:%x\n",
+ get_status_string(cpl->status.sct, cpl->status.sc),
+ cpl->status.sct, cpl->status.sc, cpl->sqid, cpl->cid, cpl->cdw0);
+}
+
+static boolean_t
+nvme_completion_is_retry(const struct nvme_completion *cpl)
+{
+ /*
+ * TODO: spec is not clear how commands that are aborted due
+ * to TLER will be marked. So for now, it seems
+ * NAMESPACE_NOT_READY is the only case where we should
+ * look at the DNR bit.
+ */
+ switch (cpl->status.sct) {
+ case NVME_SCT_GENERIC:
+ switch (cpl->status.sc) {
+ case NVME_SC_ABORTED_BY_REQUEST:
+ case NVME_SC_NAMESPACE_NOT_READY:
+ if (cpl->status.dnr)
+ return (0);
+ else
+ return (1);
+ case NVME_SC_INVALID_OPCODE:
+ case NVME_SC_INVALID_FIELD:
+ case NVME_SC_COMMAND_ID_CONFLICT:
+ case NVME_SC_DATA_TRANSFER_ERROR:
+ case NVME_SC_ABORTED_POWER_LOSS:
+ case NVME_SC_INTERNAL_DEVICE_ERROR:
+ case NVME_SC_ABORTED_SQ_DELETION:
+ case NVME_SC_ABORTED_FAILED_FUSED:
+ case NVME_SC_ABORTED_MISSING_FUSED:
+ case NVME_SC_INVALID_NAMESPACE_OR_FORMAT:
+ case NVME_SC_COMMAND_SEQUENCE_ERROR:
+ case NVME_SC_LBA_OUT_OF_RANGE:
+ case NVME_SC_CAPACITY_EXCEEDED:
+ default:
+ return (0);
+ }
+ case NVME_SCT_COMMAND_SPECIFIC:
+ case NVME_SCT_MEDIA_ERROR:
+ case NVME_SCT_VENDOR_SPECIFIC:
+ default:
+ return (0);
+ }
+}
+
+static void
+nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
+ uint16_t cid)
+{
+
+ bus_dmamap_create(qpair->dma_tag, 0, &tr->payload_dma_map);
+ bus_dmamap_create(qpair->dma_tag, 0, &tr->prp_dma_map);
+
+ bus_dmamap_load(qpair->dma_tag, tr->prp_dma_map, tr->prp,
+ sizeof(tr->prp), nvme_single_map, &tr->prp_bus_addr, 0);
+
+ callout_init(&tr->timer, 1);
+ tr->cid = cid;
+ tr->qpair = qpair;
+}
+
+static void
+nvme_qpair_complete_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
+ struct nvme_completion *cpl, boolean_t print_on_error)
+{
+ struct nvme_request *req;
+ boolean_t retry, error;
+
+ req = tr->req;
+ error = nvme_completion_is_error(cpl);
+ retry = error && nvme_completion_is_retry(cpl) &&
+ req->retries < nvme_retry_count;
+
+ if (error && print_on_error) {
+ nvme_qpair_print_command(qpair, &req->cmd);
+ nvme_qpair_print_completion(qpair, cpl);
+ }
+
+ qpair->act_tr[cpl->cid] = NULL;
+
+ KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n"));
+
+ if (req->cb_fn && !retry)
+ req->cb_fn(req->cb_arg, cpl);
+
+ mtx_lock(&qpair->lock);
+ callout_stop(&tr->timer);
+
+ if (retry) {
+ req->retries++;
+ nvme_qpair_submit_tracker(qpair, tr);
+ } else {
+ if (req->type != NVME_REQUEST_NULL)
+ bus_dmamap_unload(qpair->dma_tag,
+ tr->payload_dma_map);
+
+ nvme_free_request(req);
+ tr->req = NULL;
+
+ TAILQ_REMOVE(&qpair->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
+
+ /*
+ * If the controller is in the middle of resetting, don't
+ * try to submit queued requests here - let the reset logic
+ * handle that instead.
+ */
+ if (!STAILQ_EMPTY(&qpair->queued_req) &&
+ !qpair->ctrlr->is_resetting) {
+ req = STAILQ_FIRST(&qpair->queued_req);
+ STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
+ _nvme_qpair_submit_request(qpair, req);
+ }
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_manual_complete_tracker(struct nvme_qpair *qpair,
+ struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
+ boolean_t print_on_error)
+{
+ struct nvme_completion cpl;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.sqid = qpair->id;
+ cpl.cid = tr->cid;
+ cpl.status.sct = sct;
+ cpl.status.sc = sc;
+ cpl.status.dnr = dnr;
+ nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
+}
+
+void
+nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
+ struct nvme_request *req, uint32_t sct, uint32_t sc,
+ boolean_t print_on_error)
+{
+ struct nvme_completion cpl;
+ boolean_t error;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.sqid = qpair->id;
+ cpl.status.sct = sct;
+ cpl.status.sc = sc;
+
+ error = nvme_completion_is_error(&cpl);
+
+ if (error && print_on_error) {
+ nvme_qpair_print_command(qpair, &req->cmd);
+ nvme_qpair_print_completion(qpair, &cpl);
+ }
+
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl);
+
+ nvme_free_request(req);
+}
+
+void
+nvme_qpair_process_completions(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_completion *cpl;
+
+ qpair->num_intr_handler_calls++;
+
+ if (!qpair->is_enabled)
+ /*
+ * qpair is not enabled, likely because a controller reset is
+ * is in progress. Ignore the interrupt - any I/O that was
+ * associated with this interrupt will get retried when the
+ * reset is complete.
+ */
+ return;
+
+ while (1) {
+ cpl = &qpair->cpl[qpair->cq_head];
+
+ if (cpl->status.p != qpair->phase)
+ break;
+
+ tr = qpair->act_tr[cpl->cid];
+
+ if (tr != NULL) {
+ nvme_qpair_complete_tracker(qpair, tr, cpl, TRUE);
+ qpair->sq_head = cpl->sqhd;
+ } else {
+ nvme_printf(qpair->ctrlr,
+ "cpl does not map to outstanding cmd\n");
+ nvme_dump_completion(cpl);
+ KASSERT(0, ("received completion for unknown cmd\n"));
+ }
+
+ if (++qpair->cq_head == qpair->num_entries) {
+ qpair->cq_head = 0;
+ qpair->phase = !qpair->phase;
+ }
+
+ nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].cq_hdbl,
+ qpair->cq_head);
+ }
+}
+
+static void
+nvme_qpair_msix_handler(void *arg)
+{
+ struct nvme_qpair *qpair = arg;
+
+ nvme_qpair_process_completions(qpair);
+}
+
+void
+nvme_qpair_construct(struct nvme_qpair *qpair, uint32_t id,
+ uint16_t vector, uint32_t num_entries, uint32_t num_trackers,
+ struct nvme_controller *ctrlr)
+{
+ struct nvme_tracker *tr;
+ uint32_t i;
+
+ qpair->id = id;
+ qpair->vector = vector;
+ qpair->num_entries = num_entries;
+#ifdef CHATHAM2
+ /*
+ * Chatham prototype board starts having issues at higher queue
+ * depths. So use a conservative estimate here of no more than 64
+ * outstanding I/O per queue at any one point.
+ */
+ if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+ num_trackers = min(num_trackers, 64);
+#endif
+ qpair->num_trackers = num_trackers;
+ qpair->ctrlr = ctrlr;
+
+ if (ctrlr->msix_enabled) {
+
+ /*
+ * MSI-X vector resource IDs start at 1, so we add one to
+ * the queue's vector to get the corresponding rid to use.
+ */
+ qpair->rid = vector + 1;
+
+ qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &qpair->rid, RF_ACTIVE);
+
+ bus_setup_intr(ctrlr->dev, qpair->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL,
+ nvme_qpair_msix_handler, qpair, &qpair->tag);
+ }
+
+ mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF);
+
+ bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev),
+ sizeof(uint64_t), PAGE_SIZE, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, NVME_MAX_XFER_SIZE,
+ (NVME_MAX_XFER_SIZE/PAGE_SIZE)+1, PAGE_SIZE, 0,
+ NULL, NULL, &qpair->dma_tag);
+
+ qpair->num_cmds = 0;
+ qpair->num_intr_handler_calls = 0;
+
+ qpair->cmd = contigmalloc(qpair->num_entries *
+ sizeof(struct nvme_command), M_NVME, M_ZERO,
+ 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
+ qpair->cpl = contigmalloc(qpair->num_entries *
+ sizeof(struct nvme_completion), M_NVME, M_ZERO,
+ 0, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
+
+ bus_dmamap_create(qpair->dma_tag, 0, &qpair->cmd_dma_map);
+ bus_dmamap_create(qpair->dma_tag, 0, &qpair->cpl_dma_map);
+
+ bus_dmamap_load(qpair->dma_tag, qpair->cmd_dma_map,
+ qpair->cmd, qpair->num_entries * sizeof(struct nvme_command),
+ nvme_single_map, &qpair->cmd_bus_addr, 0);
+ bus_dmamap_load(qpair->dma_tag, qpair->cpl_dma_map,
+ qpair->cpl, qpair->num_entries * sizeof(struct nvme_completion),
+ nvme_single_map, &qpair->cpl_bus_addr, 0);
+
+ qpair->sq_tdbl_off = nvme_mmio_offsetof(doorbell[id].sq_tdbl);
+ qpair->cq_hdbl_off = nvme_mmio_offsetof(doorbell[id].cq_hdbl);
+
+ TAILQ_INIT(&qpair->free_tr);
+ TAILQ_INIT(&qpair->outstanding_tr);
+ STAILQ_INIT(&qpair->queued_req);
+
+ for (i = 0; i < qpair->num_trackers; i++) {
+ tr = malloc(sizeof(*tr), M_NVME, M_ZERO | M_WAITOK);
+ nvme_qpair_construct_tracker(qpair, tr, i);
+ TAILQ_INSERT_HEAD(&qpair->free_tr, tr, tailq);
+ }
+
+ qpair->act_tr = malloc(sizeof(struct nvme_tracker *) * qpair->num_entries,
+ M_NVME, M_ZERO | M_WAITOK);
+}
+
+static void
+nvme_qpair_destroy(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ if (qpair->tag)
+ bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
+
+ if (qpair->res)
+ bus_release_resource(qpair->ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(qpair->res), qpair->res);
+
+ if (qpair->cmd) {
+ bus_dmamap_unload(qpair->dma_tag, qpair->cmd_dma_map);
+ bus_dmamap_destroy(qpair->dma_tag, qpair->cmd_dma_map);
+ contigfree(qpair->cmd,
+ qpair->num_entries * sizeof(struct nvme_command), M_NVME);
+ }
+
+ if (qpair->cpl) {
+ bus_dmamap_unload(qpair->dma_tag, qpair->cpl_dma_map);
+ bus_dmamap_destroy(qpair->dma_tag, qpair->cpl_dma_map);
+ contigfree(qpair->cpl,
+ qpair->num_entries * sizeof(struct nvme_completion),
+ M_NVME);
+ }
+
+ if (qpair->dma_tag)
+ bus_dma_tag_destroy(qpair->dma_tag);
+
+ if (qpair->act_tr)
+ free(qpair->act_tr, M_NVME);
+
+ while (!TAILQ_EMPTY(&qpair->free_tr)) {
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
+ bus_dmamap_destroy(qpair->dma_tag, tr->payload_dma_map);
+ bus_dmamap_destroy(qpair->dma_tag, tr->prp_dma_map);
+ free(tr, M_NVME);
+ }
+}
+
+static void
+nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ while (tr != NULL) {
+ if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
+ nvme_qpair_manual_complete_tracker(qpair, tr,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
+ FALSE);
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ } else {
+ tr = TAILQ_NEXT(tr, tailq);
+ }
+ }
+}
+
+void
+nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
+{
+
+ nvme_admin_qpair_abort_aers(qpair);
+ nvme_qpair_destroy(qpair);
+}
+
+void
+nvme_io_qpair_destroy(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_destroy(qpair);
+}
+
+static void
+nvme_abort_complete(void *arg, const struct nvme_completion *status)
+{
+ struct nvme_tracker *tr = arg;
+
+ /*
+ * If cdw0 == 1, the controller was not able to abort the command
+ * we requested. We still need to check the active tracker array,
+ * to cover race where I/O timed out at same time controller was
+ * completing the I/O.
+ */
+ if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
+ /*
+ * An I/O has timed out, and the controller was unable to
+ * abort it for some reason. Construct a fake completion
+ * status, and then complete the I/O's tracker manually.
+ */
+ nvme_printf(tr->qpair->ctrlr,
+ "abort command failed, aborting command manually\n");
+ nvme_qpair_manual_complete_tracker(tr->qpair, tr,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
+ }
+}
+
+static void
+nvme_timeout(void *arg)
+{
+ struct nvme_tracker *tr = arg;
+ struct nvme_qpair *qpair = tr->qpair;
+ struct nvme_controller *ctrlr = qpair->ctrlr;
+ union csts_register csts;
+
+ /* Read csts to get value of cfs - controller fatal status. */
+ csts.raw = nvme_mmio_read_4(ctrlr, csts);
+
+ if (ctrlr->enable_aborts && csts.bits.cfs == 0) {
+ /*
+ * If aborts are enabled, only use them if the controller is
+ * not reporting fatal status.
+ */
+ nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
+ nvme_abort_complete, tr);
+ } else
+ nvme_ctrlr_reset(ctrlr);
+}
+
+void
+nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
+{
+ struct nvme_request *req;
+ struct nvme_controller *ctrlr;
+
+ mtx_assert(&qpair->lock, MA_OWNED);
+
+ req = tr->req;
+ req->cmd.cid = tr->cid;
+ qpair->act_tr[tr->cid] = tr;
+ ctrlr = qpair->ctrlr;
+
+ if (req->timeout)
+#if __FreeBSD_version >= 800030
+ callout_reset_curcpu(&tr->timer, ctrlr->timeout_period * hz,
+ nvme_timeout, tr);
+#else
+ callout_reset(&tr->timer, ctrlr->timeout_period * hz,
+ nvme_timeout, tr);
+#endif
+
+ /* Copy the command from the tracker to the submission queue. */
+ memcpy(&qpair->cmd[qpair->sq_tail], &req->cmd, sizeof(req->cmd));
+
+ if (++qpair->sq_tail == qpair->num_entries)
+ qpair->sq_tail = 0;
+
+ wmb();
+ nvme_mmio_write_4(qpair->ctrlr, doorbell[qpair->id].sq_tdbl,
+ qpair->sq_tail);
+
+ qpair->num_cmds++;
+}
+
+static void
+nvme_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct nvme_tracker *tr = arg;
+ uint32_t cur_nseg;
+
+ /*
+ * If the mapping operation failed, return immediately. The caller
+ * is responsible for detecting the error status and failing the
+ * tracker manually.
+ */
+ if (error != 0)
+ return;
+
+ /*
+ * Note that we specified PAGE_SIZE for alignment and max
+ * segment size when creating the bus dma tags. So here
+ * we can safely just transfer each segment to its
+ * associated PRP entry.
+ */
+ tr->req->cmd.prp1 = seg[0].ds_addr;
+
+ if (nseg == 2) {
+ tr->req->cmd.prp2 = seg[1].ds_addr;
+ } else if (nseg > 2) {
+ cur_nseg = 1;
+ tr->req->cmd.prp2 = (uint64_t)tr->prp_bus_addr;
+ while (cur_nseg < nseg) {
+ tr->prp[cur_nseg-1] =
+ (uint64_t)seg[cur_nseg].ds_addr;
+ cur_nseg++;
+ }
+ }
+
+ nvme_qpair_submit_tracker(tr->qpair, tr);
+}
+
+static void
+_nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
+{
+ struct nvme_tracker *tr;
+ int err = 0;
+
+ mtx_assert(&qpair->lock, MA_OWNED);
+
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ req->qpair = qpair;
+
+ if (tr == NULL || !qpair->is_enabled) {
+ /*
+ * No tracker is available, or the qpair is disabled due to
+ * an in-progress controller-level reset or controller
+ * failure.
+ */
+
+ if (qpair->ctrlr->is_failed) {
+ /*
+ * The controller has failed. Post the request to a
+ * task where it will be aborted, so that we do not
+ * invoke the request's callback in the context
+ * of the submission.
+ */
+ nvme_ctrlr_post_failed_request(qpair->ctrlr, req);
+ } else {
+ /*
+ * Put the request on the qpair's request queue to be
+ * processed when a tracker frees up via a command
+ * completion or when the controller reset is
+ * completed.
+ */
+ STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
+ }
+ return;
+ }
+
+ TAILQ_REMOVE(&qpair->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&qpair->outstanding_tr, tr, tailq);
+ tr->req = req;
+
+ switch (req->type) {
+ case NVME_REQUEST_VADDR:
+ KASSERT(req->payload_size <= qpair->ctrlr->max_xfer_size,
+ ("payload_size (%d) exceeds max_xfer_size (%d)\n",
+ req->payload_size, qpair->ctrlr->max_xfer_size));
+ err = bus_dmamap_load(tr->qpair->dma_tag, tr->payload_dma_map,
+ req->u.payload, req->payload_size, nvme_payload_map, tr, 0);
+ if (err != 0)
+ nvme_printf(qpair->ctrlr,
+ "bus_dmamap_load returned 0x%x!\n", err);
+ break;
+ case NVME_REQUEST_NULL:
+ nvme_qpair_submit_tracker(tr->qpair, tr);
+ break;
+#ifdef NVME_UNMAPPED_BIO_SUPPORT
+ case NVME_REQUEST_BIO:
+ KASSERT(req->u.bio->bio_bcount <= qpair->ctrlr->max_xfer_size,
+ ("bio->bio_bcount (%jd) exceeds max_xfer_size (%d)\n",
+ (intmax_t)req->u.bio->bio_bcount,
+ qpair->ctrlr->max_xfer_size));
+ err = bus_dmamap_load_bio(tr->qpair->dma_tag,
+ tr->payload_dma_map, req->u.bio, nvme_payload_map, tr, 0);
+ if (err != 0)
+ nvme_printf(qpair->ctrlr,
+ "bus_dmamap_load_bio returned 0x%x!\n", err);
+ break;
+#endif
+ default:
+ panic("unknown nvme request type 0x%x\n", req->type);
+ break;
+ }
+
+ if (err != 0) {
+ /*
+ * The dmamap operation failed, so we manually fail the
+ * tracker here with DATA_TRANSFER_ERROR status.
+ *
+ * nvme_qpair_manual_complete_tracker must not be called
+ * with the qpair lock held.
+ */
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_DATA_TRANSFER_ERROR, 1 /* do not retry */, TRUE);
+ mtx_lock(&qpair->lock);
+ }
+}
+
+void
+nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
+{
+
+ mtx_lock(&qpair->lock);
+ _nvme_qpair_submit_request(qpair, req);
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_enable(struct nvme_qpair *qpair)
+{
+
+ qpair->is_enabled = TRUE;
+}
+
+void
+nvme_qpair_reset(struct nvme_qpair *qpair)
+{
+
+ qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
+
+ /*
+ * First time through the completion queue, HW will set phase
+ * bit on completions to 1. So set this to 1 here, indicating
+ * we're looking for a 1 to know which entries have completed.
+ * we'll toggle the bit each time when the completion queue
+ * rolls over.
+ */
+ qpair->phase = 1;
+
+ memset(qpair->cmd, 0,
+ qpair->num_entries * sizeof(struct nvme_command));
+ memset(qpair->cpl, 0,
+ qpair->num_entries * sizeof(struct nvme_completion));
+}
+
+void
+nvme_admin_qpair_enable(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_tracker *tr_temp;
+
+ /*
+ * Manually abort each outstanding admin command. Do not retry
+ * admin commands found here, since they will be left over from
+ * a controller reset and its likely the context in which the
+ * command was issued no longer applies.
+ */
+ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
+ nvme_printf(qpair->ctrlr,
+ "aborting outstanding admin command\n");
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
+ }
+
+ nvme_qpair_enable(qpair);
+}
+
+void
+nvme_io_qpair_enable(struct nvme_qpair *qpair)
+{
+ STAILQ_HEAD(, nvme_request) temp;
+ struct nvme_tracker *tr;
+ struct nvme_tracker *tr_temp;
+ struct nvme_request *req;
+
+ /*
+ * Manually abort each outstanding I/O. This normally results in a
+ * retry, unless the retry count on the associated request has
+ * reached its limit.
+ */
+ TAILQ_FOREACH_SAFE(tr, &qpair->outstanding_tr, tailq, tr_temp) {
+ nvme_printf(qpair->ctrlr, "aborting outstanding i/o\n");
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, 0, TRUE);
+ }
+
+ mtx_lock(&qpair->lock);
+
+ nvme_qpair_enable(qpair);
+
+ STAILQ_INIT(&temp);
+ STAILQ_SWAP(&qpair->queued_req, &temp, nvme_request);
+
+ while (!STAILQ_EMPTY(&temp)) {
+ req = STAILQ_FIRST(&temp);
+ STAILQ_REMOVE_HEAD(&temp, stailq);
+ nvme_printf(qpair->ctrlr, "resubmitting queued i/o\n");
+ nvme_qpair_print_command(qpair, &req->cmd);
+ _nvme_qpair_submit_request(qpair, req);
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
+static void
+nvme_qpair_disable(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+
+ qpair->is_enabled = FALSE;
+ mtx_lock(&qpair->lock);
+ TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq)
+ callout_stop(&tr->timer);
+ mtx_unlock(&qpair->lock);
+}
+
+void
+nvme_admin_qpair_disable(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_disable(qpair);
+ nvme_admin_qpair_abort_aers(qpair);
+}
+
+void
+nvme_io_qpair_disable(struct nvme_qpair *qpair)
+{
+
+ nvme_qpair_disable(qpair);
+}
+
+void
+nvme_qpair_fail(struct nvme_qpair *qpair)
+{
+ struct nvme_tracker *tr;
+ struct nvme_request *req;
+
+ mtx_lock(&qpair->lock);
+
+ while (!STAILQ_EMPTY(&qpair->queued_req)) {
+ req = STAILQ_FIRST(&qpair->queued_req);
+ STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
+ nvme_printf(qpair->ctrlr, "failing queued i/o\n");
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, TRUE);
+ mtx_lock(&qpair->lock);
+ }
+
+ /* Manually abort each outstanding I/O. */
+ while (!TAILQ_EMPTY(&qpair->outstanding_tr)) {
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
+ /*
+ * Do not remove the tracker. The abort_tracker path will
+ * do that for us.
+ */
+ nvme_printf(qpair->ctrlr, "failing outstanding i/o\n");
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_tracker(qpair, tr, NVME_SCT_GENERIC,
+ NVME_SC_ABORTED_BY_REQUEST, 1 /* do not retry */, TRUE);
+ mtx_lock(&qpair->lock);
+ }
+
+ mtx_unlock(&qpair->lock);
+}
+
Property changes on: trunk/sys/dev/nvme/nvme_qpair.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_sysctl.c
===================================================================
--- trunk/sys/dev/nvme/nvme_sysctl.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_sysctl.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,298 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_sysctl.c 253296 2013-07-12 22:07:33Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include "nvme_private.h"
+
+/*
+ * CTLTYPE_S64 and sysctl_handle_64 were added in r217616. Define these
+ * explicitly here for older kernels that don't include the r217616
+ * changeset.
+ */
+#ifndef CTLTYPE_S64
+#define CTLTYPE_S64 CTLTYPE_QUAD
+#define sysctl_handle_64 sysctl_handle_quad
+#endif
+
+static void
+nvme_dump_queue(struct nvme_qpair *qpair)
+{
+ struct nvme_completion *cpl;
+ struct nvme_command *cmd;
+ int i;
+
+ printf("id:%04Xh phase:%d\n", qpair->id, qpair->phase);
+
+ printf("Completion queue:\n");
+ for (i = 0; i < qpair->num_entries; i++) {
+ cpl = &qpair->cpl[i];
+ printf("%05d: ", i);
+ nvme_dump_completion(cpl);
+ }
+
+ printf("Submission queue:\n");
+ for (i = 0; i < qpair->num_entries; i++) {
+ cmd = &qpair->cmd[i];
+ printf("%05d: ", i);
+ nvme_dump_command(cmd);
+ }
+}
+
+
+static int
+nvme_sysctl_dump_debug(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_qpair *qpair = arg1;
+ uint32_t val = 0;
+
+ int error = sysctl_handle_int(oidp, &val, 0, req);
+
+ if (error)
+ return (error);
+
+ if (val != 0)
+ nvme_dump_queue(qpair);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->int_coal_time;
+ int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
+ req);
+
+ if (error)
+ return (error);
+
+ if (oldval != ctrlr->int_coal_time)
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
+ ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
+ NULL);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_int_coal_threshold(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->int_coal_threshold;
+ int error = sysctl_handle_int(oidp, &ctrlr->int_coal_threshold, 0,
+ req);
+
+ if (error)
+ return (error);
+
+ if (oldval != ctrlr->int_coal_threshold)
+ nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
+ ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
+ NULL);
+
+ return (0);
+}
+
+static int
+nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t oldval = ctrlr->timeout_period;
+ int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);
+
+ if (error)
+ return (error);
+
+ if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
+ ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
+ ctrlr->timeout_period = oldval;
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static void
+nvme_qpair_reset_stats(struct nvme_qpair *qpair)
+{
+
+ qpair->num_cmds = 0;
+ qpair->num_intr_handler_calls = 0;
+}
+
+static int
+nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_cmds = 0;
+ int i;
+
+ num_cmds = ctrlr->adminq.num_cmds;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_cmds += ctrlr->ioq[i].num_cmds;
+
+ return (sysctl_handle_64(oidp, &num_cmds, 0, req));
+}
+
+static int
+nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ int64_t num_intr_handler_calls = 0;
+ int i;
+
+ num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
+
+ return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
+}
+
+static int
+nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct nvme_controller *ctrlr = arg1;
+ uint32_t i, val = 0;
+
+ int error = sysctl_handle_int(oidp, &val, 0, req);
+
+ if (error)
+ return (error);
+
+ if (val != 0) {
+ nvme_qpair_reset_stats(&ctrlr->adminq);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_reset_stats(&ctrlr->ioq[i]);
+ }
+
+ return (0);
+}
+
+
+static void
+nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
+ struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
+{
+ struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
+ CTLFLAG_RD, &qpair->num_entries, 0,
+ "Number of entries in hardware queue");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
+ CTLFLAG_RD, &qpair->num_trackers, 0,
+ "Number of trackers pre-allocated for this queue pair");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head",
+ CTLFLAG_RD, &qpair->sq_head, 0,
+ "Current head of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail",
+ CTLFLAG_RD, &qpair->sq_tail, 0,
+ "Current tail of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head",
+ CTLFLAG_RD, &qpair->cq_head, 0,
+ "Current head of completion queue (as observed by driver)");
+
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds",
+ CTLFLAG_RD, &qpair->num_cmds, "Number of commands submitted");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
+ CTLFLAG_RD, &qpair->num_intr_handler_calls,
+ "Number of times interrupt handler was invoked (will typically be "
+ "less than number of actual interrupts generated due to "
+ "coalescing)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
+ "dump_debug", CTLTYPE_UINT | CTLFLAG_RW, qpair, 0,
+ nvme_sysctl_dump_debug, "IU", "Dump debug data");
+}
+
+void
+nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
+{
+ struct sysctl_ctx_list *ctrlr_ctx;
+ struct sysctl_oid *ctrlr_tree, *que_tree;
+ struct sysctl_oid_list *ctrlr_list;
+#define QUEUE_NAME_LENGTH 16
+ char queue_name[QUEUE_NAME_LENGTH];
+ int i;
+
+ ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
+ ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
+ ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "int_coal_time", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_int_coal_time, "IU",
+ "Interrupt coalescing timeout (in microseconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "int_coal_threshold", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_int_coal_threshold, "IU",
+ "Interrupt coalescing threshold");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "timeout_period", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_timeout_period, "IU",
+ "Timeout period (in seconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_cmds", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_cmds, "IU",
+ "Number of commands submitted");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD,
+ ctrlr, 0, nvme_sysctl_num_intr_handler_calls, "IU",
+ "Number of times interrupt handler was invoked (will "
+ "typically be less than number of actual interrupts "
+ "generated due to coalescing)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "reset_stats", CTLTYPE_UINT | CTLFLAG_RW, ctrlr, 0,
+ nvme_sysctl_reset_stats, "IU", "Reset statistics to zero");
+
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
+ CTLFLAG_RD, NULL, "Admin Queue");
+
+ nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ queue_name, CTLFLAG_RD, NULL, "IO Queue");
+ nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
+ que_tree);
+ }
+}
Property changes on: trunk/sys/dev/nvme/nvme_sysctl.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_test.c
===================================================================
--- trunk/sys/dev/nvme/nvme_test.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_test.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,306 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2012-2013 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_test.c 253296 2013-07-12 22:07:33Z jimharris $");
+
+#include <sys/param.h>
+#include <sys/bio.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/kthread.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/sysproto.h>
+#include <sys/systm.h>
+#include <sys/unistd.h>
+
+#include <geom/geom.h>
+
+#include "nvme_private.h"
+
+struct nvme_io_test_thread {
+
+ uint32_t idx;
+ struct nvme_namespace *ns;
+ enum nvme_nvm_opcode opc;
+ struct timeval start;
+ void *buf;
+ uint32_t size;
+ uint32_t time;
+ uint32_t io_completed;
+};
+
+struct nvme_io_test_internal {
+
+ struct nvme_namespace *ns;
+ enum nvme_nvm_opcode opc;
+ struct timeval start;
+ uint32_t time;
+ uint32_t size;
+ uint32_t td_active;
+ uint32_t td_idx;
+ uint32_t flags;
+ uint32_t io_completed[NVME_TEST_MAX_THREADS];
+};
+
+static void
+nvme_ns_bio_test_cb(struct bio *bio)
+{
+ struct mtx *mtx;
+
+ mtx = mtx_pool_find(mtxpool_sleep, bio);
+ mtx_lock(mtx);
+ wakeup(bio);
+ mtx_unlock(mtx);
+}
+
+static void
+nvme_ns_bio_test(void *arg)
+{
+ struct nvme_io_test_internal *io_test = arg;
+ struct cdevsw *csw;
+ struct mtx *mtx;
+ struct bio *bio;
+ struct cdev *dev;
+ void *buf;
+ struct timeval t;
+ uint64_t offset;
+ uint32_t idx, io_completed = 0;
+#if __FreeBSD_version >= 900017
+ int ref;
+#endif
+
+ buf = malloc(io_test->size, M_NVME, M_WAITOK);
+ idx = atomic_fetchadd_int(&io_test->td_idx, 1);
+ dev = io_test->ns->cdev;
+
+ offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns);
+
+ while (1) {
+
+ bio = g_alloc_bio();
+
+ memset(bio, 0, sizeof(*bio));
+ bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
+ BIO_READ : BIO_WRITE;
+ bio->bio_done = nvme_ns_bio_test_cb;
+ bio->bio_dev = dev;
+ bio->bio_offset = offset;
+ bio->bio_data = buf;
+ bio->bio_bcount = io_test->size;
+
+ if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
+#if __FreeBSD_version >= 900017
+ csw = dev_refthread(dev, &ref);
+#else
+ csw = dev_refthread(dev);
+#endif
+ } else
+ csw = dev->si_devsw;
+
+ mtx = mtx_pool_find(mtxpool_sleep, bio);
+ mtx_lock(mtx);
+ (*csw->d_strategy)(bio);
+ msleep(bio, mtx, PRIBIO, "biotestwait", 0);
+ mtx_unlock(mtx);
+
+ if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
+#if __FreeBSD_version >= 900017
+ dev_relthread(dev, ref);
+#else
+ dev_relthread(dev);
+#endif
+ }
+
+ if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
+ break;
+
+ g_destroy_bio(bio);
+
+ io_completed++;
+
+ getmicrouptime(&t);
+ timevalsub(&t, &io_test->start);
+
+ if (t.tv_sec >= io_test->time)
+ break;
+
+ offset += io_test->size;
+ if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
+ offset = 0;
+ }
+
+ io_test->io_completed[idx] = io_completed;
+ wakeup_one(io_test);
+
+ free(buf, M_NVME);
+
+ atomic_subtract_int(&io_test->td_active, 1);
+ mb();
+
+#if __FreeBSD_version >= 800000
+ kthread_exit();
+#else
+ kthread_exit(0);
+#endif
+}
+
+static void
+nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_io_test_thread *tth = arg;
+ struct timeval t;
+
+ tth->io_completed++;
+
+ if (nvme_completion_is_error(cpl)) {
+ printf("%s: error occurred\n", __func__);
+ wakeup_one(tth);
+ return;
+ }
+
+ getmicrouptime(&t);
+ timevalsub(&t, &tth->start);
+
+ if (t.tv_sec >= tth->time) {
+ wakeup_one(tth);
+ return;
+ }
+
+ switch (tth->opc) {
+ case NVME_OPC_WRITE:
+ nvme_ns_cmd_write(tth->ns, tth->buf, tth->idx * 2048,
+ tth->size/nvme_ns_get_sector_size(tth->ns),
+ nvme_ns_io_test_cb, tth);
+ break;
+ case NVME_OPC_READ:
+ nvme_ns_cmd_read(tth->ns, tth->buf, tth->idx * 2048,
+ tth->size/nvme_ns_get_sector_size(tth->ns),
+ nvme_ns_io_test_cb, tth);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+nvme_ns_io_test(void *arg)
+{
+ struct nvme_io_test_internal *io_test = arg;
+ struct nvme_io_test_thread *tth;
+ struct nvme_completion cpl;
+ int error;
+
+ tth = malloc(sizeof(*tth), M_NVME, M_WAITOK | M_ZERO);
+ tth->ns = io_test->ns;
+ tth->opc = io_test->opc;
+ memcpy(&tth->start, &io_test->start, sizeof(tth->start));
+ tth->buf = malloc(io_test->size, M_NVME, M_WAITOK);
+ tth->size = io_test->size;
+ tth->time = io_test->time;
+ tth->idx = atomic_fetchadd_int(&io_test->td_idx, 1);
+
+ memset(&cpl, 0, sizeof(cpl));
+
+ nvme_ns_io_test_cb(tth, &cpl);
+
+ error = tsleep(tth, 0, "test_wait", tth->time*hz*2);
+
+ if (error)
+ printf("%s: error = %d\n", __func__, error);
+
+ io_test->io_completed[tth->idx] = tth->io_completed;
+ wakeup_one(io_test);
+
+ free(tth->buf, M_NVME);
+ free(tth, M_NVME);
+
+ atomic_subtract_int(&io_test->td_active, 1);
+ mb();
+
+#if __FreeBSD_version >= 800004
+ kthread_exit();
+#else
+ kthread_exit(0);
+#endif
+}
+
+void
+nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg)
+{
+ struct nvme_io_test *io_test;
+ struct nvme_io_test_internal *io_test_internal;
+ void (*fn)(void *);
+ int i;
+
+ io_test = (struct nvme_io_test *)arg;
+
+ if ((io_test->opc != NVME_OPC_READ) &&
+ (io_test->opc != NVME_OPC_WRITE))
+ return;
+
+ if (io_test->size % nvme_ns_get_sector_size(ns))
+ return;
+
+ io_test_internal = malloc(sizeof(*io_test_internal), M_NVME,
+ M_WAITOK | M_ZERO);
+ io_test_internal->opc = io_test->opc;
+ io_test_internal->ns = ns;
+ io_test_internal->td_active = io_test->num_threads;
+ io_test_internal->time = io_test->time;
+ io_test_internal->size = io_test->size;
+ io_test_internal->flags = io_test->flags;
+
+ if (cmd == NVME_IO_TEST)
+ fn = nvme_ns_io_test;
+ else
+ fn = nvme_ns_bio_test;
+
+ getmicrouptime(&io_test_internal->start);
+
+ for (i = 0; i < io_test->num_threads; i++)
+#if __FreeBSD_version >= 800004
+ kthread_add(fn, io_test_internal,
+ NULL, NULL, 0, 0, "nvme_io_test[%d]", i);
+#else
+ kthread_create(fn, io_test_internal,
+ NULL, 0, 0, "nvme_io_test[%d]", i);
+#endif
+
+ tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz);
+
+ while (io_test_internal->td_active > 0)
+ DELAY(10);
+
+ memcpy(io_test->io_completed, io_test_internal->io_completed,
+ sizeof(io_test->io_completed));
+
+ free(io_test_internal, M_NVME);
+}
Property changes on: trunk/sys/dev/nvme/nvme_test.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Added: trunk/sys/dev/nvme/nvme_util.c
===================================================================
--- trunk/sys/dev/nvme/nvme_util.c (rev 0)
+++ trunk/sys/dev/nvme/nvme_util.c 2017-09-19 01:19:26 UTC (rev 9565)
@@ -0,0 +1,62 @@
+/* $MidnightBSD$ */
+/*-
+ * Copyright (C) 2013 Intel Corporation
+ * Copyright (C) 1997 Justin T. Gibbs
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: release/9.2.0/sys/dev/nvme/nvme_util.c 253631 2013-07-24 22:48:29Z jimharris $");
+
+#include <sys/param.h>
+#include <dev/nvme/nvme.h>
+
+void
+nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen)
+{
+ uint8_t *cur_pos;
+
+ /* Trim leading/trailing spaces, nulls. */
+ while (srclen > 0 && src[0] == ' ')
+ src++, srclen--;
+ while (srclen > 0
+ && (src[srclen - 1] == ' ' || src[srclen - 1] == '\0'))
+ srclen--;
+
+ while (srclen > 0 && dstlen > 1) {
+ cur_pos = dst;
+
+ /* Show '?' for non-printable characters. */
+ if (*src < 0x20 || *src >= 0x7F)
+ *cur_pos++ = '?';
+ else
+ *cur_pos++ = *src;
+ src++;
+ srclen--;
+ dstlen -= cur_pos - dst;
+ dst = cur_pos;
+ }
+ *dst = '\0';
+}
+
Property changes on: trunk/sys/dev/nvme/nvme_util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:keywords
## -0,0 +1 ##
+MidnightBSD=%H
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
More information about the Midnightbsd-cvs
mailing list